deps: upgrade v8 to 3.31.74.1
authorBen Noordhuis <info@bnoordhuis.nl>
Wed, 7 Jan 2015 17:38:38 +0000 (18:38 +0100)
committerBen Noordhuis <info@bnoordhuis.nl>
Wed, 7 Jan 2015 21:11:18 +0000 (22:11 +0100)
PR-URL: https://github.com/iojs/io.js/pull/243
Reviewed-By: Fedor Indutny <fedor@indutny.com>
Reviewed-By: Trevor Norris <trev.norris@gmail.com>
827 files changed:
deps/v8/.DEPS.git [deleted file]
deps/v8/.gitignore
deps/v8/BUILD.gn
deps/v8/ChangeLog
deps/v8/DEPS
deps/v8/Makefile
deps/v8/Makefile.android
deps/v8/OWNERS
deps/v8/PRESUBMIT.py
deps/v8/README.md
deps/v8/build/android.gypi
deps/v8/build/features.gypi
deps/v8/build/standalone.gypi
deps/v8/build/toolchain.gypi
deps/v8/codereview.settings
deps/v8/include/v8.h
deps/v8/include/v8config.h
deps/v8/samples/lineprocessor.cc [deleted file]
deps/v8/samples/process.cc
deps/v8/samples/samples.gyp
deps/v8/samples/shell.cc
deps/v8/src/accessors.cc
deps/v8/src/accessors.h
deps/v8/src/api.cc
deps/v8/src/arguments.h
deps/v8/src/arm/assembler-arm.cc
deps/v8/src/arm/assembler-arm.h
deps/v8/src/arm/builtins-arm.cc
deps/v8/src/arm/code-stubs-arm.cc
deps/v8/src/arm/code-stubs-arm.h
deps/v8/src/arm/codegen-arm.cc
deps/v8/src/arm/constants-arm.h
deps/v8/src/arm/cpu-arm.cc
deps/v8/src/arm/deoptimizer-arm.cc
deps/v8/src/arm/disasm-arm.cc
deps/v8/src/arm/full-codegen-arm.cc
deps/v8/src/arm/lithium-arm.cc
deps/v8/src/arm/lithium-arm.h
deps/v8/src/arm/lithium-codegen-arm.cc
deps/v8/src/arm/macro-assembler-arm.cc
deps/v8/src/arm/macro-assembler-arm.h
deps/v8/src/arm/simulator-arm.cc
deps/v8/src/arm64/assembler-arm64-inl.h
deps/v8/src/arm64/assembler-arm64.cc
deps/v8/src/arm64/builtins-arm64.cc
deps/v8/src/arm64/code-stubs-arm64.cc
deps/v8/src/arm64/code-stubs-arm64.h
deps/v8/src/arm64/cpu-arm64.cc
deps/v8/src/arm64/deoptimizer-arm64.cc
deps/v8/src/arm64/full-codegen-arm64.cc
deps/v8/src/arm64/lithium-arm64.cc
deps/v8/src/arm64/lithium-arm64.h
deps/v8/src/arm64/lithium-codegen-arm64.cc
deps/v8/src/arm64/macro-assembler-arm64-inl.h
deps/v8/src/arm64/macro-assembler-arm64.cc
deps/v8/src/arm64/macro-assembler-arm64.h
deps/v8/src/arm64/simulator-arm64.cc
deps/v8/src/array-iterator.js
deps/v8/src/array.js
deps/v8/src/ast-numbering.cc
deps/v8/src/ast-this-access-visitor.cc [new file with mode: 0644]
deps/v8/src/ast-this-access-visitor.h [new file with mode: 0644]
deps/v8/src/ast-value-factory.cc
deps/v8/src/ast-value-factory.h
deps/v8/src/ast.cc
deps/v8/src/ast.h
deps/v8/src/base/cpu.cc
deps/v8/src/base/cpu.h
deps/v8/src/base/iterator.h [new file with mode: 0644]
deps/v8/src/base/macros.h
deps/v8/src/base/platform/platform-linux.cc
deps/v8/src/base/platform/platform-posix.cc
deps/v8/src/base/platform/platform-win32.cc
deps/v8/src/base/platform/time.cc
deps/v8/src/base/sys-info.cc
deps/v8/src/bootstrapper.cc
deps/v8/src/bootstrapper.h
deps/v8/src/builtins.cc
deps/v8/src/checks.h
deps/v8/src/code-stubs-hydrogen.cc
deps/v8/src/code-stubs.cc
deps/v8/src/code-stubs.h
deps/v8/src/collection-iterator.js
deps/v8/src/collection.js
deps/v8/src/compiler.cc
deps/v8/src/compiler.h
deps/v8/src/compiler/access-builder.cc
deps/v8/src/compiler/access-builder.h
deps/v8/src/compiler/arm/code-generator-arm.cc
deps/v8/src/compiler/arm/instruction-codes-arm.h
deps/v8/src/compiler/arm/instruction-selector-arm.cc
deps/v8/src/compiler/arm/linkage-arm.cc
deps/v8/src/compiler/arm64/code-generator-arm64.cc
deps/v8/src/compiler/arm64/instruction-codes-arm64.h
deps/v8/src/compiler/arm64/instruction-selector-arm64.cc
deps/v8/src/compiler/arm64/linkage-arm64.cc
deps/v8/src/compiler/ast-graph-builder.cc
deps/v8/src/compiler/ast-graph-builder.h
deps/v8/src/compiler/ast-loop-assignment-analyzer.h
deps/v8/src/compiler/basic-block-instrumentor.cc
deps/v8/src/compiler/change-lowering.cc
deps/v8/src/compiler/change-lowering.h
deps/v8/src/compiler/code-generator-impl.h
deps/v8/src/compiler/code-generator.cc
deps/v8/src/compiler/code-generator.h
deps/v8/src/compiler/common-node-cache.cc [new file with mode: 0644]
deps/v8/src/compiler/common-node-cache.h
deps/v8/src/compiler/common-operator-reducer.cc [new file with mode: 0644]
deps/v8/src/compiler/common-operator-reducer.h [new file with mode: 0644]
deps/v8/src/compiler/common-operator.cc
deps/v8/src/compiler/common-operator.h
deps/v8/src/compiler/control-builders.cc
deps/v8/src/compiler/control-builders.h
deps/v8/src/compiler/control-equivalence.h [new file with mode: 0644]
deps/v8/src/compiler/control-reducer.cc
deps/v8/src/compiler/frame.h
deps/v8/src/compiler/gap-resolver.h
deps/v8/src/compiler/generic-algorithm-inl.h [deleted file]
deps/v8/src/compiler/generic-algorithm.h
deps/v8/src/compiler/generic-graph.h [deleted file]
deps/v8/src/compiler/generic-node-inl.h [deleted file]
deps/v8/src/compiler/generic-node.h [deleted file]
deps/v8/src/compiler/graph-builder.cc
deps/v8/src/compiler/graph-builder.h
deps/v8/src/compiler/graph-inl.h
deps/v8/src/compiler/graph-reducer.cc
deps/v8/src/compiler/graph-reducer.h
deps/v8/src/compiler/graph-replay.cc
deps/v8/src/compiler/graph-replay.h
deps/v8/src/compiler/graph-visualizer.cc
deps/v8/src/compiler/graph-visualizer.h
deps/v8/src/compiler/graph.cc
deps/v8/src/compiler/graph.h
deps/v8/src/compiler/ia32/code-generator-ia32.cc
deps/v8/src/compiler/ia32/instruction-codes-ia32.h
deps/v8/src/compiler/ia32/instruction-selector-ia32.cc
deps/v8/src/compiler/ia32/linkage-ia32.cc
deps/v8/src/compiler/instruction-codes.h
deps/v8/src/compiler/instruction-selector-impl.h
deps/v8/src/compiler/instruction-selector.cc
deps/v8/src/compiler/instruction-selector.h
deps/v8/src/compiler/instruction.cc
deps/v8/src/compiler/instruction.h
deps/v8/src/compiler/js-builtin-reducer.cc
deps/v8/src/compiler/js-builtin-reducer.h
deps/v8/src/compiler/js-context-specialization.cc
deps/v8/src/compiler/js-context-specialization.h
deps/v8/src/compiler/js-generic-lowering.cc
deps/v8/src/compiler/js-generic-lowering.h
deps/v8/src/compiler/js-graph.cc
deps/v8/src/compiler/js-graph.h
deps/v8/src/compiler/js-inlining.cc
deps/v8/src/compiler/js-intrinsic-builder.cc
deps/v8/src/compiler/js-operator.cc
deps/v8/src/compiler/js-operator.h
deps/v8/src/compiler/js-typed-lowering.cc
deps/v8/src/compiler/js-typed-lowering.h
deps/v8/src/compiler/jump-threading.cc [new file with mode: 0644]
deps/v8/src/compiler/jump-threading.h [new file with mode: 0644]
deps/v8/src/compiler/linkage-impl.h
deps/v8/src/compiler/linkage.cc
deps/v8/src/compiler/linkage.h
deps/v8/src/compiler/load-elimination.cc [new file with mode: 0644]
deps/v8/src/compiler/load-elimination.h [new file with mode: 0644]
deps/v8/src/compiler/loop-analysis.cc [new file with mode: 0644]
deps/v8/src/compiler/loop-analysis.h [new file with mode: 0644]
deps/v8/src/compiler/machine-operator-reducer.cc
deps/v8/src/compiler/machine-operator-reducer.h
deps/v8/src/compiler/machine-operator.cc
deps/v8/src/compiler/machine-operator.h
deps/v8/src/compiler/mips/code-generator-mips.cc
deps/v8/src/compiler/mips/instruction-codes-mips.h
deps/v8/src/compiler/mips/instruction-selector-mips.cc
deps/v8/src/compiler/mips/linkage-mips.cc
deps/v8/src/compiler/mips64/OWNERS [new file with mode: 0644]
deps/v8/src/compiler/mips64/code-generator-mips64.cc [new file with mode: 0644]
deps/v8/src/compiler/mips64/instruction-codes-mips64.h [new file with mode: 0644]
deps/v8/src/compiler/mips64/instruction-selector-mips64.cc [new file with mode: 0644]
deps/v8/src/compiler/mips64/linkage-mips64.cc [new file with mode: 0644]
deps/v8/src/compiler/move-optimizer.cc [new file with mode: 0644]
deps/v8/src/compiler/move-optimizer.h [new file with mode: 0644]
deps/v8/src/compiler/node-cache.cc
deps/v8/src/compiler/node-cache.h
deps/v8/src/compiler/node-matchers.h
deps/v8/src/compiler/node-properties-inl.h
deps/v8/src/compiler/node-properties.h
deps/v8/src/compiler/node.cc
deps/v8/src/compiler/node.h
deps/v8/src/compiler/opcodes.cc [new file with mode: 0644]
deps/v8/src/compiler/opcodes.h
deps/v8/src/compiler/operator-properties.cc [moved from deps/v8/src/compiler/operator-properties-inl.h with 71% similarity]
deps/v8/src/compiler/operator-properties.h
deps/v8/src/compiler/operator.h
deps/v8/src/compiler/phi-reducer.h [deleted file]
deps/v8/src/compiler/pipeline.cc
deps/v8/src/compiler/pipeline.h
deps/v8/src/compiler/raw-machine-assembler.cc
deps/v8/src/compiler/raw-machine-assembler.h
deps/v8/src/compiler/register-allocator-verifier.cc [new file with mode: 0644]
deps/v8/src/compiler/register-allocator-verifier.h [new file with mode: 0644]
deps/v8/src/compiler/register-allocator.cc
deps/v8/src/compiler/register-allocator.h
deps/v8/src/compiler/representation-change.h
deps/v8/src/compiler/schedule.cc
deps/v8/src/compiler/schedule.h
deps/v8/src/compiler/scheduler.cc
deps/v8/src/compiler/scheduler.h
deps/v8/src/compiler/select-lowering.cc
deps/v8/src/compiler/select-lowering.h
deps/v8/src/compiler/simplified-lowering.cc
deps/v8/src/compiler/simplified-lowering.h
deps/v8/src/compiler/simplified-operator-reducer.cc
deps/v8/src/compiler/simplified-operator-reducer.h
deps/v8/src/compiler/simplified-operator.cc
deps/v8/src/compiler/simplified-operator.h
deps/v8/src/compiler/source-position.cc
deps/v8/src/compiler/typer.cc
deps/v8/src/compiler/typer.h
deps/v8/src/compiler/value-numbering-reducer.cc
deps/v8/src/compiler/value-numbering-reducer.h
deps/v8/src/compiler/verifier.cc
deps/v8/src/compiler/x64/code-generator-x64.cc
deps/v8/src/compiler/x64/instruction-codes-x64.h
deps/v8/src/compiler/x64/instruction-selector-x64.cc
deps/v8/src/compiler/x64/linkage-x64.cc
deps/v8/src/contexts.cc
deps/v8/src/contexts.h
deps/v8/src/counters.h
deps/v8/src/cpu-profiler.cc
deps/v8/src/d8.cc
deps/v8/src/d8.gyp
deps/v8/src/d8.h
deps/v8/src/debug-debugger.js
deps/v8/src/debug.cc
deps/v8/src/debug.h
deps/v8/src/elements.cc
deps/v8/src/elements.h
deps/v8/src/execution.cc
deps/v8/src/execution.h
deps/v8/src/factory.cc
deps/v8/src/factory.h
deps/v8/src/field-index.h
deps/v8/src/flag-definitions.h
deps/v8/src/flags.cc
deps/v8/src/flags.h
deps/v8/src/full-codegen.cc
deps/v8/src/full-codegen.h
deps/v8/src/generator.js
deps/v8/src/global-handles.cc
deps/v8/src/global-handles.h
deps/v8/src/globals.h
deps/v8/src/harmony-array-includes.js [new file with mode: 0644]
deps/v8/src/harmony-array.js
deps/v8/src/harmony-classes.js
deps/v8/src/harmony-regexp.js [new file with mode: 0644]
deps/v8/src/harmony-string.js
deps/v8/src/harmony-templates.js [new file with mode: 0644]
deps/v8/src/harmony-tostring.js
deps/v8/src/harmony-typedarray.js
deps/v8/src/heap-snapshot-generator.cc
deps/v8/src/heap-snapshot-generator.h
deps/v8/src/heap/gc-idle-time-handler.cc
deps/v8/src/heap/gc-idle-time-handler.h
deps/v8/src/heap/gc-tracer.cc
deps/v8/src/heap/gc-tracer.h
deps/v8/src/heap/heap-inl.h
deps/v8/src/heap/heap.cc
deps/v8/src/heap/heap.h
deps/v8/src/heap/incremental-marking-inl.h
deps/v8/src/heap/incremental-marking.cc
deps/v8/src/heap/incremental-marking.h
deps/v8/src/heap/mark-compact.cc
deps/v8/src/heap/mark-compact.h
deps/v8/src/heap/objects-visiting-inl.h
deps/v8/src/heap/objects-visiting.cc
deps/v8/src/heap/objects-visiting.h
deps/v8/src/heap/spaces.cc
deps/v8/src/heap/spaces.h
deps/v8/src/heap/store-buffer.cc
deps/v8/src/hydrogen-instructions.cc
deps/v8/src/hydrogen-instructions.h
deps/v8/src/hydrogen.cc
deps/v8/src/hydrogen.h
deps/v8/src/i18n.cc
deps/v8/src/ia32/assembler-ia32.cc
deps/v8/src/ia32/assembler-ia32.h
deps/v8/src/ia32/builtins-ia32.cc
deps/v8/src/ia32/code-stubs-ia32.cc
deps/v8/src/ia32/code-stubs-ia32.h
deps/v8/src/ia32/deoptimizer-ia32.cc
deps/v8/src/ia32/disasm-ia32.cc
deps/v8/src/ia32/full-codegen-ia32.cc
deps/v8/src/ia32/lithium-codegen-ia32.cc
deps/v8/src/ia32/lithium-ia32.cc
deps/v8/src/ia32/lithium-ia32.h
deps/v8/src/ia32/macro-assembler-ia32.cc
deps/v8/src/ia32/macro-assembler-ia32.h
deps/v8/src/ic/access-compiler.h
deps/v8/src/ic/arm/handler-compiler-arm.cc
deps/v8/src/ic/arm/ic-arm.cc
deps/v8/src/ic/arm/ic-compiler-arm.cc
deps/v8/src/ic/arm/stub-cache-arm.cc
deps/v8/src/ic/arm64/handler-compiler-arm64.cc
deps/v8/src/ic/arm64/ic-arm64.cc
deps/v8/src/ic/arm64/ic-compiler-arm64.cc
deps/v8/src/ic/arm64/stub-cache-arm64.cc
deps/v8/src/ic/handler-compiler.cc
deps/v8/src/ic/handler-compiler.h
deps/v8/src/ic/ia32/handler-compiler-ia32.cc
deps/v8/src/ic/ia32/ic-compiler-ia32.cc
deps/v8/src/ic/ia32/ic-ia32.cc
deps/v8/src/ic/ia32/stub-cache-ia32.cc
deps/v8/src/ic/ic-compiler.cc
deps/v8/src/ic/ic-inl.h
deps/v8/src/ic/ic-state.cc
deps/v8/src/ic/ic-state.h
deps/v8/src/ic/ic.cc
deps/v8/src/ic/ic.h
deps/v8/src/ic/mips/handler-compiler-mips.cc
deps/v8/src/ic/mips/ic-compiler-mips.cc
deps/v8/src/ic/mips/ic-mips.cc
deps/v8/src/ic/mips/stub-cache-mips.cc
deps/v8/src/ic/mips64/handler-compiler-mips64.cc
deps/v8/src/ic/mips64/ic-compiler-mips64.cc
deps/v8/src/ic/mips64/ic-mips64.cc
deps/v8/src/ic/mips64/stub-cache-mips64.cc
deps/v8/src/ic/ppc/access-compiler-ppc.cc [new file with mode: 0644]
deps/v8/src/ic/ppc/handler-compiler-ppc.cc [new file with mode: 0644]
deps/v8/src/ic/ppc/ic-compiler-ppc.cc [new file with mode: 0644]
deps/v8/src/ic/ppc/ic-ppc.cc [new file with mode: 0644]
deps/v8/src/ic/ppc/stub-cache-ppc.cc [new file with mode: 0644]
deps/v8/src/ic/stub-cache.h
deps/v8/src/ic/x64/handler-compiler-x64.cc
deps/v8/src/ic/x64/ic-compiler-x64.cc
deps/v8/src/ic/x64/ic-x64.cc
deps/v8/src/ic/x64/stub-cache-x64.cc
deps/v8/src/ic/x87/handler-compiler-x87.cc
deps/v8/src/ic/x87/ic-compiler-x87.cc
deps/v8/src/ic/x87/ic-x87.cc
deps/v8/src/ic/x87/stub-cache-x87.cc
deps/v8/src/interface.h
deps/v8/src/isolate.cc
deps/v8/src/isolate.h
deps/v8/src/json-parser.h
deps/v8/src/json-stringifier.h
deps/v8/src/jsregexp.cc
deps/v8/src/jsregexp.h
deps/v8/src/layout-descriptor-inl.h [new file with mode: 0644]
deps/v8/src/layout-descriptor.cc [new file with mode: 0644]
deps/v8/src/layout-descriptor.h [new file with mode: 0644]
deps/v8/src/libplatform/default-platform.h
deps/v8/src/libplatform/worker-thread.h
deps/v8/src/liveedit-debugger.js
deps/v8/src/liveedit.cc
deps/v8/src/liveedit.h
deps/v8/src/log-utils.cc
deps/v8/src/log.cc
deps/v8/src/lookup-inl.h
deps/v8/src/lookup.cc
deps/v8/src/lookup.h
deps/v8/src/macros.py
deps/v8/src/math.js
deps/v8/src/messages.js
deps/v8/src/mips/builtins-mips.cc
deps/v8/src/mips/code-stubs-mips.cc
deps/v8/src/mips/code-stubs-mips.h
deps/v8/src/mips/codegen-mips.cc
deps/v8/src/mips/constants-mips.h
deps/v8/src/mips/deoptimizer-mips.cc
deps/v8/src/mips/full-codegen-mips.cc
deps/v8/src/mips/lithium-codegen-mips.cc
deps/v8/src/mips/lithium-mips.cc
deps/v8/src/mips/lithium-mips.h
deps/v8/src/mips/macro-assembler-mips.cc
deps/v8/src/mips/macro-assembler-mips.h
deps/v8/src/mips64/assembler-mips64.cc
deps/v8/src/mips64/assembler-mips64.h
deps/v8/src/mips64/builtins-mips64.cc
deps/v8/src/mips64/code-stubs-mips64.cc
deps/v8/src/mips64/code-stubs-mips64.h
deps/v8/src/mips64/codegen-mips64.cc
deps/v8/src/mips64/constants-mips64.cc
deps/v8/src/mips64/constants-mips64.h
deps/v8/src/mips64/deoptimizer-mips64.cc
deps/v8/src/mips64/disasm-mips64.cc
deps/v8/src/mips64/full-codegen-mips64.cc
deps/v8/src/mips64/lithium-codegen-mips64.cc
deps/v8/src/mips64/lithium-mips64.cc
deps/v8/src/mips64/lithium-mips64.h
deps/v8/src/mips64/macro-assembler-mips64.cc
deps/v8/src/mips64/macro-assembler-mips64.h
deps/v8/src/mips64/simulator-mips64.cc
deps/v8/src/mirror-debugger.js
deps/v8/src/mksnapshot.cc
deps/v8/src/natives-external.cc
deps/v8/src/natives.h
deps/v8/src/objects-debug.cc
deps/v8/src/objects-inl.h
deps/v8/src/objects-printer.cc
deps/v8/src/objects.cc
deps/v8/src/objects.h
deps/v8/src/optimizing-compiler-thread.cc
deps/v8/src/optimizing-compiler-thread.h
deps/v8/src/ostreams.cc
deps/v8/src/ostreams.h
deps/v8/src/parser.cc
deps/v8/src/parser.h
deps/v8/src/ppc/assembler-ppc-inl.h [new file with mode: 0644]
deps/v8/src/ppc/assembler-ppc.cc [new file with mode: 0644]
deps/v8/src/ppc/assembler-ppc.h [new file with mode: 0644]
deps/v8/src/ppc/builtins-ppc.cc [new file with mode: 0644]
deps/v8/src/ppc/code-stubs-ppc.cc [new file with mode: 0644]
deps/v8/src/ppc/code-stubs-ppc.h [new file with mode: 0644]
deps/v8/src/ppc/codegen-ppc.cc [new file with mode: 0644]
deps/v8/src/ppc/codegen-ppc.h [new file with mode: 0644]
deps/v8/src/ppc/constants-ppc.cc [new file with mode: 0644]
deps/v8/src/ppc/constants-ppc.h [new file with mode: 0644]
deps/v8/src/ppc/cpu-ppc.cc [new file with mode: 0644]
deps/v8/src/ppc/debug-ppc.cc [new file with mode: 0644]
deps/v8/src/ppc/deoptimizer-ppc.cc [new file with mode: 0644]
deps/v8/src/ppc/disasm-ppc.cc [new file with mode: 0644]
deps/v8/src/ppc/frames-ppc.cc [new file with mode: 0644]
deps/v8/src/ppc/frames-ppc.h [new file with mode: 0644]
deps/v8/src/ppc/full-codegen-ppc.cc [new file with mode: 0644]
deps/v8/src/ppc/interface-descriptors-ppc.cc [new file with mode: 0644]
deps/v8/src/ppc/lithium-codegen-ppc.cc [new file with mode: 0644]
deps/v8/src/ppc/lithium-codegen-ppc.h [new file with mode: 0644]
deps/v8/src/ppc/lithium-gap-resolver-ppc.cc [new file with mode: 0644]
deps/v8/src/ppc/lithium-gap-resolver-ppc.h [new file with mode: 0644]
deps/v8/src/ppc/lithium-ppc.cc [new file with mode: 0644]
deps/v8/src/ppc/lithium-ppc.h [new file with mode: 0644]
deps/v8/src/ppc/macro-assembler-ppc.cc [new file with mode: 0644]
deps/v8/src/ppc/macro-assembler-ppc.h [new file with mode: 0644]
deps/v8/src/ppc/regexp-macro-assembler-ppc.cc [new file with mode: 0644]
deps/v8/src/ppc/regexp-macro-assembler-ppc.h [new file with mode: 0644]
deps/v8/src/ppc/simulator-ppc.cc [new file with mode: 0644]
deps/v8/src/ppc/simulator-ppc.h [new file with mode: 0644]
deps/v8/src/preparser.cc
deps/v8/src/preparser.h
deps/v8/src/prettyprinter.h
deps/v8/src/promise.js
deps/v8/src/property-details.h
deps/v8/src/property.cc
deps/v8/src/property.h
deps/v8/src/regexp.js
deps/v8/src/rewriter.cc
deps/v8/src/runtime-profiler.cc
deps/v8/src/runtime.js
deps/v8/src/runtime/runtime-array.cc
deps/v8/src/runtime/runtime-classes.cc
deps/v8/src/runtime/runtime-collections.cc
deps/v8/src/runtime/runtime-compiler.cc
deps/v8/src/runtime/runtime-debug.cc
deps/v8/src/runtime/runtime-generator.cc
deps/v8/src/runtime/runtime-literals.cc
deps/v8/src/runtime/runtime-object.cc
deps/v8/src/runtime/runtime-observe.cc
deps/v8/src/runtime/runtime-regexp.cc
deps/v8/src/runtime/runtime-scopes.cc
deps/v8/src/runtime/runtime-strings.cc
deps/v8/src/runtime/runtime-test.cc
deps/v8/src/runtime/runtime-uri.cc
deps/v8/src/runtime/runtime.cc
deps/v8/src/runtime/runtime.h
deps/v8/src/scanner-character-streams.h
deps/v8/src/scanner.cc
deps/v8/src/scanner.h
deps/v8/src/scopeinfo.cc
deps/v8/src/scopes.cc
deps/v8/src/scopes.h
deps/v8/src/serialize.cc
deps/v8/src/serialize.h
deps/v8/src/snapshot-common.cc
deps/v8/src/snapshot-empty.cc
deps/v8/src/snapshot-external.cc
deps/v8/src/snapshot-source-sink.cc
deps/v8/src/snapshot-source-sink.h
deps/v8/src/snapshot.h
deps/v8/src/string-builder.cc [new file with mode: 0644]
deps/v8/src/string-builder.h [moved from deps/v8/src/runtime/string-builder.h with 62% similarity]
deps/v8/src/string-iterator.js
deps/v8/src/string-stream.cc
deps/v8/src/string-stream.h
deps/v8/src/symbol.js
deps/v8/src/third_party/fdlibm/fdlibm.cc
deps/v8/src/third_party/fdlibm/fdlibm.h
deps/v8/src/third_party/fdlibm/fdlibm.js
deps/v8/src/token.h
deps/v8/src/transitions-inl.h
deps/v8/src/transitions.cc
deps/v8/src/transitions.h
deps/v8/src/type-feedback-vector.cc
deps/v8/src/type-feedback-vector.h
deps/v8/src/type-info.cc
deps/v8/src/type-info.h
deps/v8/src/types.cc
deps/v8/src/types.h
deps/v8/src/typing.cc
deps/v8/src/utils.h
deps/v8/src/v8.cc
deps/v8/src/v8.h
deps/v8/src/v8natives.js
deps/v8/src/v8threads.cc
deps/v8/src/variables.cc
deps/v8/src/variables.h
deps/v8/src/version.cc
deps/v8/src/version.h
deps/v8/src/weak-collection.js
deps/v8/src/x64/assembler-x64-inl.h
deps/v8/src/x64/assembler-x64.cc
deps/v8/src/x64/assembler-x64.h
deps/v8/src/x64/builtins-x64.cc
deps/v8/src/x64/code-stubs-x64.cc
deps/v8/src/x64/code-stubs-x64.h
deps/v8/src/x64/deoptimizer-x64.cc
deps/v8/src/x64/disasm-x64.cc
deps/v8/src/x64/full-codegen-x64.cc
deps/v8/src/x64/lithium-codegen-x64.cc
deps/v8/src/x64/lithium-x64.cc
deps/v8/src/x64/lithium-x64.h
deps/v8/src/x64/macro-assembler-x64.cc
deps/v8/src/x64/macro-assembler-x64.h
deps/v8/src/x87/builtins-x87.cc
deps/v8/src/x87/code-stubs-x87.cc
deps/v8/src/x87/code-stubs-x87.h
deps/v8/src/x87/deoptimizer-x87.cc
deps/v8/src/x87/full-codegen-x87.cc
deps/v8/src/x87/lithium-codegen-x87.cc
deps/v8/src/x87/lithium-x87.cc
deps/v8/src/x87/lithium-x87.h
deps/v8/src/x87/macro-assembler-x87.cc
deps/v8/src/x87/macro-assembler-x87.h
deps/v8/src/zone-allocator.h
deps/v8/src/zone-containers.h
deps/v8/src/zone-inl.h
deps/v8/test/cctest/cctest.gyp
deps/v8/test/cctest/cctest.status
deps/v8/test/cctest/compiler/call-tester.h
deps/v8/test/cctest/compiler/codegen-tester.cc
deps/v8/test/cctest/compiler/codegen-tester.h
deps/v8/test/cctest/compiler/function-tester.h
deps/v8/test/cctest/compiler/graph-builder-tester.cc
deps/v8/test/cctest/compiler/graph-builder-tester.h
deps/v8/test/cctest/compiler/simplified-graph-builder.cc
deps/v8/test/cctest/compiler/simplified-graph-builder.h
deps/v8/test/cctest/compiler/test-basic-block-profiler.cc
deps/v8/test/cctest/compiler/test-branch-combine.cc
deps/v8/test/cctest/compiler/test-changes-lowering.cc
deps/v8/test/cctest/compiler/test-codegen-deopt.cc
deps/v8/test/cctest/compiler/test-control-reducer.cc
deps/v8/test/cctest/compiler/test-graph-reducer.cc
deps/v8/test/cctest/compiler/test-graph-visualizer.cc
deps/v8/test/cctest/compiler/test-instruction.cc
deps/v8/test/cctest/compiler/test-js-constant-cache.cc
deps/v8/test/cctest/compiler/test-js-context-specialization.cc
deps/v8/test/cctest/compiler/test-js-typed-lowering.cc
deps/v8/test/cctest/compiler/test-jump-threading.cc [new file with mode: 0644]
deps/v8/test/cctest/compiler/test-linkage.cc
deps/v8/test/cctest/compiler/test-loop-analysis.cc [new file with mode: 0644]
deps/v8/test/cctest/compiler/test-machine-operator-reducer.cc
deps/v8/test/cctest/compiler/test-node-algorithm.cc
deps/v8/test/cctest/compiler/test-node-cache.cc
deps/v8/test/cctest/compiler/test-node.cc
deps/v8/test/cctest/compiler/test-phi-reducer.cc [deleted file]
deps/v8/test/cctest/compiler/test-run-inlining.cc
deps/v8/test/cctest/compiler/test-run-machops.cc
deps/v8/test/cctest/compiler/test-schedule.cc
deps/v8/test/cctest/compiler/test-scheduler.cc
deps/v8/test/cctest/compiler/test-simplified-lowering.cc
deps/v8/test/cctest/compiler/test-typer.cc
deps/v8/test/cctest/test-accessors.cc
deps/v8/test/cctest/test-api.cc
deps/v8/test/cctest/test-assembler-arm.cc
deps/v8/test/cctest/test-assembler-ia32.cc
deps/v8/test/cctest/test-assembler-x64.cc
deps/v8/test/cctest/test-ast.cc
deps/v8/test/cctest/test-compiler.cc
deps/v8/test/cctest/test-debug.cc
deps/v8/test/cctest/test-decls.cc
deps/v8/test/cctest/test-disasm-arm.cc
deps/v8/test/cctest/test-disasm-ia32.cc
deps/v8/test/cctest/test-disasm-x64.cc
deps/v8/test/cctest/test-feedback-vector.cc
deps/v8/test/cctest/test-heap-profiler.cc
deps/v8/test/cctest/test-heap.cc
deps/v8/test/cctest/test-log.cc
deps/v8/test/cctest/test-mark-compact.cc
deps/v8/test/cctest/test-object-observe.cc
deps/v8/test/cctest/test-parsing.cc
deps/v8/test/cctest/test-serialize.cc
deps/v8/test/cctest/test-spaces.cc
deps/v8/test/cctest/test-strings.cc
deps/v8/test/cctest/test-transitions.cc
deps/v8/test/cctest/test-types.cc
deps/v8/test/cctest/test-unboxed-doubles.cc [new file with mode: 0644]
deps/v8/test/cctest/test-weakmaps.cc
deps/v8/test/cctest/types-fuzz.h
deps/v8/test/js-perf-test/Classes/Classes.json [deleted file]
deps/v8/test/js-perf-test/Classes/default-constructor.js [new file with mode: 0644]
deps/v8/test/js-perf-test/Classes/run.js
deps/v8/test/js-perf-test/Collections/Collections.json [deleted file]
deps/v8/test/js-perf-test/Iterators/Iterators.json [deleted file]
deps/v8/test/js-perf-test/JSTests.json [new file with mode: 0644]
deps/v8/test/js-perf-test/Strings/Strings.json [deleted file]
deps/v8/test/js-perf-test/Strings/harmony-string.js
deps/v8/test/js-perf-test/Templates/run.js [new file with mode: 0644]
deps/v8/test/js-perf-test/Templates/templates.js [new file with mode: 0644]
deps/v8/test/message/single-function-literal.js
deps/v8/test/message/super-constructor-extra-statement.js [new file with mode: 0644]
deps/v8/test/message/super-constructor-extra-statement.out [new file with mode: 0644]
deps/v8/test/message/super-constructor.js [new file with mode: 0644]
deps/v8/test/message/super-constructor.out [new file with mode: 0644]
deps/v8/test/message/testcfg.py
deps/v8/test/mjsunit/array-methods-read-only-length.js [moved from deps/v8/test/mjsunit/array-push-unshift-read-only-length.js with 53% similarity]
deps/v8/test/mjsunit/array-shift4.js [new file with mode: 0644]
deps/v8/test/mjsunit/asm/embenchen/box2d.js
deps/v8/test/mjsunit/asm/embenchen/copy.js
deps/v8/test/mjsunit/asm/embenchen/corrections.js
deps/v8/test/mjsunit/asm/embenchen/fannkuch.js
deps/v8/test/mjsunit/asm/embenchen/fasta.js
deps/v8/test/mjsunit/asm/embenchen/lua_binarytrees.js
deps/v8/test/mjsunit/asm/embenchen/memops.js
deps/v8/test/mjsunit/asm/embenchen/primes.js
deps/v8/test/mjsunit/asm/embenchen/zlib.js
deps/v8/test/mjsunit/asm/float32array-negative-offset.js [new file with mode: 0644]
deps/v8/test/mjsunit/asm/float64array-negative-offset.js [new file with mode: 0644]
deps/v8/test/mjsunit/asm/float64mul.js
deps/v8/test/mjsunit/asm/if-tonumber.js [new file with mode: 0644]
deps/v8/test/mjsunit/asm/int16array-negative-offset.js [new file with mode: 0644]
deps/v8/test/mjsunit/asm/int32array-negative-offset.js [new file with mode: 0644]
deps/v8/test/mjsunit/asm/int32mod-constant.js [new file with mode: 0644]
deps/v8/test/mjsunit/asm/int32mod.js
deps/v8/test/mjsunit/asm/int8array-negative-offset.js [new file with mode: 0644]
deps/v8/test/mjsunit/asm/sign-extend.js [new file with mode: 0644]
deps/v8/test/mjsunit/asm/uint32mod-constant.js [new file with mode: 0644]
deps/v8/test/mjsunit/asm/uint32mod.js
deps/v8/test/mjsunit/asm/word32ror.js [new file with mode: 0644]
deps/v8/test/mjsunit/asm/zero-extend.js [new file with mode: 0644]
deps/v8/test/mjsunit/compiler/division-by-constant.js
deps/v8/test/mjsunit/compiler/literals.js
deps/v8/test/mjsunit/compiler/regress-3786.js [new file with mode: 0644]
deps/v8/test/mjsunit/compiler/regress-439743.js [new file with mode: 0644]
deps/v8/test/mjsunit/compiler/regress-443744.js [new file with mode: 0644]
deps/v8/test/mjsunit/compiler/regress-444508.js [new file with mode: 0644]
deps/v8/test/mjsunit/compiler/regress-444695.js [new file with mode: 0644]
deps/v8/test/mjsunit/compiler/regress-445267.js [new file with mode: 0644]
deps/v8/test/mjsunit/compiler/regress-445732.js [new file with mode: 0644]
deps/v8/test/mjsunit/compiler/regress-445858.js [new file with mode: 0644]
deps/v8/test/mjsunit/compiler/regress-445859.js [new file with mode: 0644]
deps/v8/test/mjsunit/compiler/regress-int32array-outofbounds-nan.js [new file with mode: 0644]
deps/v8/test/mjsunit/compiler/regress-uint8-deopt.js [new file with mode: 0644]
deps/v8/test/mjsunit/compiler/truncating-store.js [new file with mode: 0644]
deps/v8/test/mjsunit/debug-clearbreakpointgroup.js
deps/v8/test/mjsunit/debug-compile-event.js
deps/v8/test/mjsunit/debug-evaluate-locals-optimized-double.js
deps/v8/test/mjsunit/debug-evaluate-locals-optimized.js
deps/v8/test/mjsunit/debug-evaluate-with-context.js
deps/v8/test/mjsunit/debug-function-scopes.js
deps/v8/test/mjsunit/debug-scopes.js
deps/v8/test/mjsunit/debug-script.js
deps/v8/test/mjsunit/debug-step.js
deps/v8/test/mjsunit/debug-stepin-foreach.js [new file with mode: 0644]
deps/v8/test/mjsunit/deserialize-optimize-inner.js [new file with mode: 0644]
deps/v8/test/mjsunit/es6/collections.js
deps/v8/test/mjsunit/es6/debug-stepin-microtasks.js [new file with mode: 0644]
deps/v8/test/mjsunit/es6/debug-stepnext-for.js [new file with mode: 0644]
deps/v8/test/mjsunit/es6/generators-debug-scopes.js
deps/v8/test/mjsunit/es6/generators-iteration.js
deps/v8/test/mjsunit/es6/generators-mirror.js
deps/v8/test/mjsunit/es6/generators-objects.js
deps/v8/test/mjsunit/es6/generators-states.js [new file with mode: 0644]
deps/v8/test/mjsunit/es6/math-log2-log10.js
deps/v8/test/mjsunit/es6/mirror-collections.js
deps/v8/test/mjsunit/es6/mirror-iterators.js
deps/v8/test/mjsunit/es6/unscopables.js
deps/v8/test/mjsunit/es7/regress/regress-443982.js [new file with mode: 0644]
deps/v8/test/mjsunit/function-length-accessor.js
deps/v8/test/mjsunit/harmony/array-concat.js [new file with mode: 0644]
deps/v8/test/mjsunit/harmony/array-from.js [new file with mode: 0644]
deps/v8/test/mjsunit/harmony/array-includes-to-object-sloppy.js [new file with mode: 0644]
deps/v8/test/mjsunit/harmony/array-includes-to-object-strict.js [new file with mode: 0644]
deps/v8/test/mjsunit/harmony/array-includes.js [new file with mode: 0644]
deps/v8/test/mjsunit/harmony/block-conflicts.js
deps/v8/test/mjsunit/harmony/block-const-assign.js
deps/v8/test/mjsunit/harmony/block-non-strict-errors.js [new file with mode: 0644]
deps/v8/test/mjsunit/harmony/classes.js
deps/v8/test/mjsunit/harmony/debug-blockscopes.js
deps/v8/test/mjsunit/harmony/debug-evaluate-blockscopes.js
deps/v8/test/mjsunit/harmony/debug-function-scopes.js
deps/v8/test/mjsunit/harmony/debug-step-into-class-extends.js [new file with mode: 0644]
deps/v8/test/mjsunit/harmony/debug-step-into-constructor.js [new file with mode: 0644]
deps/v8/test/mjsunit/harmony/disable-harmony-string.js [new file with mode: 0644]
deps/v8/test/mjsunit/harmony/module-linking.js
deps/v8/test/mjsunit/harmony/object-literals-super.js [new file with mode: 0644]
deps/v8/test/mjsunit/harmony/proxies-with-unscopables.js
deps/v8/test/mjsunit/harmony/regexp-flags.js [new file with mode: 0644]
deps/v8/test/mjsunit/harmony/regress/regress-2243.js
deps/v8/test/mjsunit/harmony/regress/regress-2858.js [new file with mode: 0644]
deps/v8/test/mjsunit/harmony/regress/regress-3683.js [new file with mode: 0644]
deps/v8/test/mjsunit/harmony/regress/regress-3741.js [new file with mode: 0644]
deps/v8/test/mjsunit/harmony/regress/regress-3750.js [new file with mode: 0644]
deps/v8/test/mjsunit/harmony/string-contains.js [deleted file]
deps/v8/test/mjsunit/harmony/string-includes.js [new file with mode: 0644]
deps/v8/test/mjsunit/harmony/string-raw.js [new file with mode: 0644]
deps/v8/test/mjsunit/harmony/super.js
deps/v8/test/mjsunit/harmony/templates.js [new file with mode: 0644]
deps/v8/test/mjsunit/harmony/typedarrays-of.js [new file with mode: 0644]
deps/v8/test/mjsunit/harmony/unicode-escapes.js [new file with mode: 0644]
deps/v8/test/mjsunit/keyed-load-with-string-key.js [new file with mode: 0644]
deps/v8/test/mjsunit/mirror-object.js
deps/v8/test/mjsunit/mjsunit.js
deps/v8/test/mjsunit/mjsunit.status
deps/v8/test/mjsunit/mod-range.js [new file with mode: 0644]
deps/v8/test/mjsunit/object-freeze-global.js [new file with mode: 0644]
deps/v8/test/mjsunit/object-freeze.js
deps/v8/test/mjsunit/object-prevent-extensions.js
deps/v8/test/mjsunit/object-seal-global.js [new file with mode: 0644]
deps/v8/test/mjsunit/object-seal.js
deps/v8/test/mjsunit/opt-elements-kind.js
deps/v8/test/mjsunit/regress-ntl.js [new file with mode: 0644]
deps/v8/test/mjsunit/regress/regress-136048.js
deps/v8/test/mjsunit/regress/regress-1757.js
deps/v8/test/mjsunit/regress/regress-2506.js
deps/v8/test/mjsunit/regress/regress-3229.js [new file with mode: 0644]
deps/v8/test/mjsunit/regress/regress-3687.js [new file with mode: 0644]
deps/v8/test/mjsunit/regress/regress-3709.js [new file with mode: 0644]
deps/v8/test/mjsunit/regress/regress-3717.js [new file with mode: 0644]
deps/v8/test/mjsunit/regress/regress-3756.js [new file with mode: 0644]
deps/v8/test/mjsunit/regress/regress-410030.js [new file with mode: 0644]
deps/v8/test/mjsunit/regress/regress-435073.js [new file with mode: 0644]
deps/v8/test/mjsunit/regress/regress-435477.js [new file with mode: 0644]
deps/v8/test/mjsunit/regress/regress-436893.js [new file with mode: 0644]
deps/v8/test/mjsunit/regress/regress-436896.js [new file with mode: 0644]
deps/v8/test/mjsunit/regress/regress-437765.js [new file with mode: 0644]
deps/v8/test/mjsunit/regress/regress-441099.js [new file with mode: 0644]
deps/v8/test/mjsunit/regress/regress-crbug-109362.js [new file with mode: 0644]
deps/v8/test/mjsunit/regress/regress-crbug-137689.js
deps/v8/test/mjsunit/regress/regress-crbug-320922.js
deps/v8/test/mjsunit/regress/regress-crbug-431602.js [new file with mode: 0644]
deps/v8/test/mjsunit/regress/regress-crbug-432493.js [new file with mode: 0644]
deps/v8/test/mjsunit/regress/regress-crbug-433332.js [new file with mode: 0644]
deps/v8/test/mjsunit/regress/regress-crbug-433766.js [new file with mode: 0644]
deps/v8/test/mjsunit/regress/regress-crbug-435825.js [new file with mode: 0644]
deps/v8/test/mjsunit/regress/regress-crbug-436820.js [new file with mode: 0644]
deps/v8/test/mjsunit/regress/regress-lea-matching.js [new file with mode: 0644]
deps/v8/test/mjsunit/regress/regress-parse-object-literal.js
deps/v8/test/mjsunit/regress/regress-splice-large-index.js [moved from deps/v8/test/mjsunit/bugs/bug-2615.js with 100% similarity]
deps/v8/test/mjsunit/regress/regress-unsigned-mul-add.js [new file with mode: 0644]
deps/v8/test/mjsunit/regress/regress-weakening-multiplication.js [new file with mode: 0644]
deps/v8/test/mjsunit/runtime-gen/loadfromsuper.js [deleted file]
deps/v8/test/mjsunit/strict-mode.js
deps/v8/test/mjsunit/string-slices.js
deps/v8/test/mozilla/mozilla.status
deps/v8/test/preparser/strict-const.js
deps/v8/test/preparser/strict-function-statement.pyt
deps/v8/test/preparser/testcfg.py
deps/v8/test/test262-es6/test262-es6.status
deps/v8/test/test262/test262.status
deps/v8/test/unittests/base/iterator-unittest.cc [new file with mode: 0644]
deps/v8/test/unittests/base/platform/platform-unittest.cc
deps/v8/test/unittests/compiler/arm/instruction-selector-arm-unittest.cc
deps/v8/test/unittests/compiler/arm64/instruction-selector-arm64-unittest.cc
deps/v8/test/unittests/compiler/change-lowering-unittest.cc
deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc [new file with mode: 0644]
deps/v8/test/unittests/compiler/common-operator-unittest.cc
deps/v8/test/unittests/compiler/control-equivalence-unittest.cc [new file with mode: 0644]
deps/v8/test/unittests/compiler/graph-reducer-unittest.cc
deps/v8/test/unittests/compiler/graph-unittest.cc
deps/v8/test/unittests/compiler/graph-unittest.h
deps/v8/test/unittests/compiler/ia32/instruction-selector-ia32-unittest.cc
deps/v8/test/unittests/compiler/instruction-selector-unittest.h
deps/v8/test/unittests/compiler/instruction-sequence-unittest.cc [new file with mode: 0644]
deps/v8/test/unittests/compiler/instruction-sequence-unittest.h [new file with mode: 0644]
deps/v8/test/unittests/compiler/js-builtin-reducer-unittest.cc
deps/v8/test/unittests/compiler/js-operator-unittest.cc
deps/v8/test/unittests/compiler/js-typed-lowering-unittest.cc
deps/v8/test/unittests/compiler/load-elimination-unittest.cc [new file with mode: 0644]
deps/v8/test/unittests/compiler/machine-operator-reducer-unittest.cc
deps/v8/test/unittests/compiler/machine-operator-unittest.cc
deps/v8/test/unittests/compiler/mips64/OWNERS [new file with mode: 0644]
deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc [new file with mode: 0644]
deps/v8/test/unittests/compiler/move-optimizer-unittest.cc [new file with mode: 0644]
deps/v8/test/unittests/compiler/node-matchers-unittest.cc [new file with mode: 0644]
deps/v8/test/unittests/compiler/node-test-utils.cc
deps/v8/test/unittests/compiler/node-test-utils.h
deps/v8/test/unittests/compiler/register-allocator-unittest.cc
deps/v8/test/unittests/compiler/select-lowering-unittest.cc
deps/v8/test/unittests/compiler/simplified-operator-reducer-unittest.cc
deps/v8/test/unittests/compiler/simplified-operator-unittest.cc
deps/v8/test/unittests/compiler/x64/instruction-selector-x64-unittest.cc
deps/v8/test/unittests/heap/gc-idle-time-handler-unittest.cc
deps/v8/test/unittests/test-utils.cc
deps/v8/test/unittests/test-utils.h
deps/v8/test/unittests/unittests.gyp
deps/v8/test/webkit/fast/js/Object-getOwnPropertyNames-expected.txt
deps/v8/test/webkit/fast/js/Object-getOwnPropertyNames.js
deps/v8/test/webkit/fast/js/basic-strict-mode-expected.txt
deps/v8/test/webkit/fast/regex/toString-expected.txt
deps/v8/test/webkit/testcfg.py
deps/v8/test/webkit/webkit.status
deps/v8/tools/android-sync.sh
deps/v8/tools/find-commit-for-patch.py [new file with mode: 0755]
deps/v8/tools/find_depot_tools.py [new file with mode: 0644]
deps/v8/tools/gen-postmortem-metadata.py
deps/v8/tools/gyp/v8.gyp
deps/v8/tools/js2c.py
deps/v8/tools/presubmit.py
deps/v8/tools/push-to-trunk/auto_push.py
deps/v8/tools/push-to-trunk/bump_up_version.py
deps/v8/tools/push-to-trunk/check_clusterfuzz.py [new file with mode: 0755]
deps/v8/tools/push-to-trunk/common_includes.py
deps/v8/tools/push-to-trunk/git_recipes.py
deps/v8/tools/push-to-trunk/merge_to_branch.py
deps/v8/tools/push-to-trunk/push_to_trunk.py
deps/v8/tools/push-to-trunk/releases.py
deps/v8/tools/push-to-trunk/test_scripts.py
deps/v8/tools/run-deopt-fuzzer.py
deps/v8/tools/run-tests.py
deps/v8/tools/run_perf.py
deps/v8/tools/testrunner/local/execution.py
deps/v8/tools/testrunner/local/progress.py
deps/v8/tools/testrunner/local/testsuite.py
deps/v8/tools/testrunner/objects/testcase.py
deps/v8/tools/trace-maps-processor.py [new file with mode: 0755]
deps/v8/tools/try_perf.py [new file with mode: 0755]
deps/v8/tools/unittests/run_perf_test.py
deps/v8/tools/whitespace.txt

diff --git a/deps/v8/.DEPS.git b/deps/v8/.DEPS.git
deleted file mode 100644 (file)
index 8f9da45..0000000
+++ /dev/null
@@ -1,113 +0,0 @@
-# DO NOT EDIT EXCEPT FOR LOCAL TESTING.
-# THIS IS A GENERATED FILE.
-# ALL MANUAL CHANGES WILL BE OVERWRITTEN.
-# SEE http://code.google.com/p/chromium/wiki/UsingGit
-# FOR HOW TO ROLL DEPS
-vars = {
-    'webkit_url':
-         'https://chromium.googlesource.com/chromium/blink.git',
-    'git_url':
-         'https://chromium.googlesource.com'
-}
-
-deps = {
-    'v8/build/gyp':
-        Var('git_url') + '/external/gyp.git@a3e2a5caf24a1e0a45401e09ad131210bf16b852',
-    'v8/buildtools':
-        Var('git_url') + '/chromium/buildtools.git@fb782d4369d5ae04f17a2fceef7de5a63e50f07b',
-    'v8/testing/gmock':
-        Var('git_url') + '/external/googlemock.git@896ba0e03f520fb9b6ed582bde2bd00847e3c3f2',
-    'v8/testing/gtest':
-        Var('git_url') + '/external/googletest.git@4650552ff637bb44ecf7784060091cbed3252211',
-    'v8/third_party/icu':
-        Var('git_url') + '/chromium/deps/icu52.git@26d8859357ac0bfb86b939bf21c087b8eae22494',
-}
-
-deps_os = {
-    'android':
-    {
-        'v8/third_party/android_tools':
-            Var('git_url') + '/android_tools.git@31869996507de16812bb53a3d0aaa15cd6194c16',
-    },
-    'win':
-    {
-        'v8/third_party/cygwin':
-            Var('git_url') + '/chromium/deps/cygwin.git@06a117a90c15174436bfa20ceebbfdf43b7eb820',
-        'v8/third_party/python_26':
-            Var('git_url') + '/chromium/deps/python_26.git@67d19f904470effe3122d27101cc5a8195abd157',
-    },
-}
-
-include_rules = [
-    '+include',
-    '+unicode',
-    '+third_party/fdlibm'
-]
-
-skip_child_includes = [
-    'build',
-    'third_party'
-]
-
-hooks = [
-    {
-    'action':
-         [
-    'download_from_google_storage',
-    '--no_resume',
-    '--platform=win32',
-    '--no_auth',
-    '--bucket',
-    'chromium-clang-format',
-    '-s',
-    'v8/buildtools/win/clang-format.exe.sha1'
-],
-    'pattern':
-         '.',
-    'name':
-         'clang_format_win'
-},
-    {
-    'action':
-         [
-    'download_from_google_storage',
-    '--no_resume',
-    '--platform=darwin',
-    '--no_auth',
-    '--bucket',
-    'chromium-clang-format',
-    '-s',
-    'v8/buildtools/mac/clang-format.sha1'
-],
-    'pattern':
-         '.',
-    'name':
-         'clang_format_mac'
-},
-    {
-    'action':
-         [
-    'download_from_google_storage',
-    '--no_resume',
-    '--platform=linux*',
-    '--no_auth',
-    '--bucket',
-    'chromium-clang-format',
-    '-s',
-    'v8/buildtools/linux64/clang-format.sha1'
-],
-    'pattern':
-         '.',
-    'name':
-         'clang_format_linux'
-},
-    {
-    'action':
-         [
-    'python',
-    'v8/build/gyp_v8'
-],
-    'pattern':
-         '.'
-}
-]
index d0407f3..f720bee 100644 (file)
@@ -66,8 +66,11 @@ shell_g
 /test/test262-es6/tc39-test262-*
 /testing/gmock
 /testing/gtest
+/third_party
 /third_party/icu
 /third_party/llvm
+/third_party/llvm-build
+/tools/clang
 /tools/jsfunfuzz
 /tools/jsfunfuzz.zip
 /tools/oom_dump/oom_dump
index 1758ee9..6534eea 100644 (file)
@@ -2,9 +2,12 @@
 # Use of this source code is governed by a BSD-style license that can be
 # found in the LICENSE file.
 
+# Because standalone V8 builds are not supported, assume this is part of a
+# Chromium build.
+import("//build/module_args/v8.gni")
+
 # TODO(jochen): These will need to be user-settable to support standalone V8
 # builds.
-v8_compress_startup_data = "off"
 v8_deprecation_warnings = false
 v8_enable_disassembler = false
 v8_enable_gdbjit = false
@@ -15,7 +18,6 @@ v8_interpreted_regexp = false
 v8_object_print = false
 v8_postmortem_support = false
 v8_use_snapshot = true
-v8_use_external_startup_data = false
 v8_enable_extra_checks = is_debug
 v8_target_arch = cpu_arch
 v8_random_seed = "314159265"
@@ -95,11 +97,6 @@ config("features") {
       "V8_I18N_SUPPORT",
     ]
   }
-  if (v8_compress_startup_data == "bz2") {
-    defines += [
-      "COMPRESS_STARTUP_DATA_BZ2",
-    ]
-  }
   if (v8_enable_extra_checks == true) {
     defines += [
       "ENABLE_EXTRA_CHECKS",
@@ -216,7 +213,6 @@ action("js2c") {
   args = [
     rebase_path("$target_gen_dir/libraries.cc", root_build_dir),
     "CORE",
-    v8_compress_startup_data
   ] + rebase_path(sources, root_build_dir)
 
   if (v8_use_external_startup_data) {
@@ -243,9 +239,12 @@ action("js2c_experimental") {
     "src/generator.js",
     "src/harmony-string.js",
     "src/harmony-array.js",
+    "src/harmony-array-includes.js",
     "src/harmony-typedarray.js",
     "src/harmony-classes.js",
-    "src/harmony-tostring.js"
+    "src/harmony-tostring.js",
+    "src/harmony-templates.js",
+    "src/harmony-regexp.js"
   ]
 
   outputs = [
@@ -255,7 +254,6 @@ action("js2c_experimental") {
   args = [
     rebase_path("$target_gen_dir/experimental-libraries.cc", root_build_dir),
     "EXPERIMENTAL",
-    v8_compress_startup_data
   ] + rebase_path(sources, root_build_dir)
 
   if (v8_use_external_startup_data) {
@@ -282,7 +280,7 @@ if (v8_use_external_startup_data) {
     ]
 
     outputs = [
-      "$root_gen_dir/natives_blob.bin"
+      "$root_out_dir/natives_blob.bin"
     ]
 
     script = "tools/concatenate-files.py"
@@ -335,10 +333,10 @@ action("run_mksnapshot") {
   }
 
   if (v8_use_external_startup_data) {
-    outputs += [ "$root_gen_dir/snapshot_blob.bin" ]
+    outputs += [ "$root_out_dir/snapshot_blob.bin" ]
     args += [
       "--startup_blob",
-      rebase_path("$root_gen_dir/snapshot_blob.bin", root_build_dir)
+      rebase_path("$root_out_dir/snapshot_blob.bin", root_build_dir)
     ]
   }
 }
@@ -361,7 +359,6 @@ source_set("v8_nosnapshot") {
     "$target_gen_dir/libraries.cc",
     "$target_gen_dir/experimental-libraries.cc",
     "src/snapshot-empty.cc",
-    "src/snapshot-common.cc",
   ]
 
   configs -= [ "//build/config/compiler:chromium_code" ]
@@ -383,7 +380,6 @@ source_set("v8_snapshot") {
     "$target_gen_dir/libraries.cc",
     "$target_gen_dir/experimental-libraries.cc",
     "$target_gen_dir/snapshot.cc",
-    "src/snapshot-common.cc",
   ]
 
   configs -= [ "//build/config/compiler:chromium_code" ]
@@ -436,6 +432,8 @@ source_set("v8_base") {
     "src/assert-scope.cc",
     "src/ast-numbering.cc",
     "src/ast-numbering.h",
+    "src/ast-this-access-visitor.cc",
+    "src/ast-this-access-visitor.h",
     "src/ast-value-factory.cc",
     "src/ast-value-factory.h",
     "src/ast.cc",
@@ -491,22 +489,22 @@ source_set("v8_base") {
     "src/compiler/code-generator-impl.h",
     "src/compiler/code-generator.cc",
     "src/compiler/code-generator.h",
+    "src/compiler/common-node-cache.cc",
     "src/compiler/common-node-cache.h",
+    "src/compiler/common-operator-reducer.cc",
+    "src/compiler/common-operator-reducer.h",
     "src/compiler/common-operator.cc",
     "src/compiler/common-operator.h",
     "src/compiler/control-builders.cc",
     "src/compiler/control-builders.h",
+    "src/compiler/control-equivalence.h",
     "src/compiler/control-reducer.cc",
     "src/compiler/control-reducer.h",
     "src/compiler/diamond.h",
     "src/compiler/frame.h",
     "src/compiler/gap-resolver.cc",
     "src/compiler/gap-resolver.h",
-    "src/compiler/generic-algorithm-inl.h",
     "src/compiler/generic-algorithm.h",
-    "src/compiler/generic-graph.h",
-    "src/compiler/generic-node-inl.h",
-    "src/compiler/generic-node.h",
     "src/compiler/graph-builder.cc",
     "src/compiler/graph-builder.h",
     "src/compiler/graph-inl.h",
@@ -540,15 +538,23 @@ source_set("v8_base") {
     "src/compiler/js-operator.h",
     "src/compiler/js-typed-lowering.cc",
     "src/compiler/js-typed-lowering.h",
+    "src/compiler/jump-threading.cc",
+    "src/compiler/jump-threading.h",
     "src/compiler/linkage-impl.h",
     "src/compiler/linkage.cc",
     "src/compiler/linkage.h",
+    "src/compiler/load-elimination.cc",
+    "src/compiler/load-elimination.h",
+    "src/compiler/loop-analysis.cc",
+    "src/compiler/loop-analysis.h",
     "src/compiler/machine-operator-reducer.cc",
     "src/compiler/machine-operator-reducer.h",
     "src/compiler/machine-operator.cc",
     "src/compiler/machine-operator.h",
     "src/compiler/machine-type.cc",
     "src/compiler/machine-type.h",
+    "src/compiler/move-optimizer.cc",
+    "src/compiler/move-optimizer.h",
     "src/compiler/node-aux-data-inl.h",
     "src/compiler/node-aux-data.h",
     "src/compiler/node-cache.cc",
@@ -558,12 +564,12 @@ source_set("v8_base") {
     "src/compiler/node-properties.h",
     "src/compiler/node.cc",
     "src/compiler/node.h",
+    "src/compiler/opcodes.cc",
     "src/compiler/opcodes.h",
-    "src/compiler/operator-properties-inl.h",
+    "src/compiler/operator-properties.cc",
     "src/compiler/operator-properties.h",
     "src/compiler/operator.cc",
     "src/compiler/operator.h",
-    "src/compiler/phi-reducer.h",
     "src/compiler/pipeline.cc",
     "src/compiler/pipeline.h",
     "src/compiler/pipeline-statistics.cc",
@@ -572,6 +578,8 @@ source_set("v8_base") {
     "src/compiler/raw-machine-assembler.h",
     "src/compiler/register-allocator.cc",
     "src/compiler/register-allocator.h",
+    "src/compiler/register-allocator-verifier.cc",
+    "src/compiler/register-allocator-verifier.h",
     "src/compiler/register-configuration.cc",
     "src/compiler/register-configuration.h",
     "src/compiler/representation-change.h",
@@ -780,6 +788,9 @@ source_set("v8_base") {
     "src/jsregexp-inl.h",
     "src/jsregexp.cc",
     "src/jsregexp.h",
+    "src/layout-descriptor-inl.h",
+    "src/layout-descriptor.cc",
+    "src/layout-descriptor.h",
     "src/list-inl.h",
     "src/list.h",
     "src/lithium-allocator-inl.h",
@@ -873,7 +884,6 @@ source_set("v8_base") {
     "src/runtime/runtime-utils.h",
     "src/runtime/runtime.cc",
     "src/runtime/runtime.h",
-    "src/runtime/string-builder.h",
     "src/safepoint-table.cc",
     "src/safepoint-table.h",
     "src/sampler.cc",
@@ -890,9 +900,12 @@ source_set("v8_base") {
     "src/serialize.h",
     "src/small-pointer-list.h",
     "src/smart-pointers.h",
+    "src/snapshot-common.cc",
     "src/snapshot-source-sink.cc",
     "src/snapshot-source-sink.h",
     "src/snapshot.h",
+    "src/string-builder.cc",
+    "src/string-builder.h",
     "src/string-search.cc",
     "src/string-search.h",
     "src/string-stream.cc",
@@ -1210,11 +1223,6 @@ source_set("v8_base") {
     # TODO(jschuh): crbug.com/167187 fix size_t to int truncations.
     cflags = [ "/wd4267" ]
   }
-  if (is_linux) {
-    if (v8_compress_startup_data == "bz2") {
-      libs += [ "bz2" ]
-    }
-  }
 
   if (v8_enable_i18n_support) {
     deps += [ "//third_party/icu" ]
@@ -1260,6 +1268,7 @@ source_set("v8_libbase") {
     "src/base/flags.h",
     "src/base/functional.cc",
     "src/base/functional.h",
+    "src/base/iterator.h",
     "src/base/lazy-instance.h",
     "src/base/logging.cc",
     "src/base/logging.h",
@@ -1388,10 +1397,6 @@ if (current_toolchain == host_toolchain) {
       ":v8_nosnapshot",
       "//build/config/sanitizers:deps",
     ]
-
-    if (v8_compress_startup_data == "bz2") {
-      libs = [ "bz2" ]
-    }
   }
 }
 
@@ -1406,7 +1411,7 @@ component("v8") {
     "src/v8dll-main.cc",
   ]
 
-  if (v8_use_external_startup_data) {
+  if (v8_use_snapshot && v8_use_external_startup_data) {
     deps = [
       ":v8_base",
       ":v8_external_snapshot",
@@ -1417,6 +1422,7 @@ component("v8") {
       ":v8_snapshot",
     ]
   } else {
+    assert(!v8_use_external_startup_data)
     deps = [
       ":v8_base",
       ":v8_nosnapshot",
@@ -1438,7 +1444,7 @@ component("v8") {
 } else {
 
 group("v8") {
-  if (v8_use_external_startup_data) {
+  if (v8_use_snapshot && v8_use_external_startup_data) {
     deps = [
       ":v8_base",
       ":v8_external_snapshot",
@@ -1449,6 +1455,7 @@ group("v8") {
       ":v8_snapshot",
     ]
   } else {
+    assert(!v8_use_external_startup_data)
     deps = [
       ":v8_base",
       ":v8_nosnapshot",
index 1e29853..d016b79 100644 (file)
@@ -1,3 +1,521 @@
+2014-12-23: Version 3.31.74
+
+        [turbofan] Turn DCHECK for fixed slot index into a CHECK (Chromium issue
+        444681).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-12-23: Version 3.31.73
+
+        [turbofan] Fix missing ChangeUint32ToUint64 in lowering of LoadBuffer
+        (Chromium issue 444695).
+
+        Enable the embedder to specify what kind of context was disposed.
+
+        Performance and stability improvements on all platforms.
+
+
+2014-12-22: Version 3.31.72
+
+        [turbofan] Correctify lowering of Uint8ClampedArray buffer access
+        (Chromium issue 444508).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-12-20: Version 3.31.71
+
+        Performance and stability improvements on all platforms.
+
+
+2014-12-20: Version 3.31.70
+
+        Performance and stability improvements on all platforms.
+
+
+2014-12-20: Version 3.31.69
+
+        Performance and stability improvements on all platforms.
+
+
+2014-12-19: Version 3.31.68
+
+        [turbofan] Fix unsafe out-of-bounds check for checked loads/stores
+        (Chromium issue 443744).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-12-19: Version 3.31.67
+
+        Performance and stability improvements on all platforms.
+
+
+2014-12-19: Version 3.31.66
+
+        Ship ES6 template literals (issue 3230).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-12-18: Version 3.31.65
+
+        ES6 template literals should not use legacy octal strings (issue 3736).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-12-18: Version 3.31.64
+
+        Fixed -fsanitize=float-cast-overflow problems (issue 3773).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-12-18: Version 3.31.63
+
+        ES6 computed property names (issue 3754).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-12-17: Version 3.31.62
+
+        Performance and stability improvements on all platforms.
+
+
+2014-12-17: Version 3.31.61
+
+        ES6: Update unscopables to match spec (issue 3632).
+
+        ES6 computed property names (issue 3754).
+
+        More -fsanitize=vptr fixes (Chromium issue 441099).
+
+        [turbofan] Cache conversions inserted during typed lowering (issue
+        3763).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-12-16: Version 3.31.60
+
+        Performance and stability improvements on all platforms.
+
+
+2014-12-16: Version 3.31.59
+
+        Performance and stability improvements on all platforms.
+
+
+2014-12-16: Version 3.31.58
+
+        Ship ES6 classes (issue 3330).
+
+        ES6 computed property names (issue 3754).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-12-12: Version 3.31.57
+
+        Consistently use only one of virtual/OVERRIDE/FINAL (issue 3753).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-12-12: Version 3.31.56
+
+        Performance and stability improvements on all platforms.
+
+
+2014-12-12: Version 3.31.55
+
+        Performance and stability improvements on all platforms.
+
+
+2014-12-11: Version 3.31.54
+
+        Implement Array.from() (issue 3336).
+
+        move v8_use_external_startup_data to standalone.gypi (Chromium issue
+        421063).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-12-11: Version 3.31.53
+
+        Performance and stability improvements on all platforms.
+
+
+2014-12-11: Version 3.31.52
+
+        Ship ES6 block scoping (issue 2198).
+
+        Optimize Object.seal and Object.preventExtensions (issue 3662, Chromium
+        issue 115960).
+
+        Add Array.prototype.includes (issue 3575).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-12-10: Version 3.31.51
+
+        [x64] Fix optimization for certain checked load/stores (Chromium issue
+        439743).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-12-09: Version 3.31.50
+
+        Temporarily restore make dependencies.
+
+        Performance and stability improvements on all platforms.
+
+
+2014-12-09: Version 3.31.49
+
+        Performance and stability improvements on all platforms.
+
+
+2014-12-09: Version 3.31.48
+
+        Performance and stability improvements on all platforms.
+
+
+2014-12-09: Version 3.31.47
+
+        Temporarily restore make dependencies.
+
+        Performance and stability improvements on all platforms.
+
+
+2014-12-08: Version 3.31.46
+
+        Performance and stability improvements on all platforms.
+
+
+2014-12-08: Version 3.31.45
+
+        Update all DEPS to match chromium's DEPS at edb488e.
+
+        Turn on DCHECKs and other debugging code if dcheck_always_on is 1 (issue
+        3731).
+
+        Optimize GetPrototype.
+
+        Performance and stability improvements on all platforms.
+
+
+2014-12-05: Version 3.31.44
+
+        Performance and stability improvements on all platforms.
+
+
+2014-12-04: Version 3.31.43
+
+        ES6 template literals: Fix issue with template after rbrace (issue
+        3734).
+
+        Stage ES6 template literals (issue 3230).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-12-04: Version 3.31.42
+
+        Performance and stability improvements on all platforms.
+
+
+2014-12-04: Version 3.31.41
+
+        Simplify template literal raw string creation (issue 3710).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-12-03: Version 3.31.40
+
+        Performance and stability improvements on all platforms.
+
+
+2014-12-03: Version 3.31.39
+
+        Performance and stability improvements on all platforms.
+
+
+2014-12-03: Version 3.31.38
+
+        Stage ES6 classes and object literal extensions (issue 3330).
+
+        Fixed environment handling for LFlooringDivI on ARM (Chromium issue
+        437765).
+
+        Add GetIdentityHash to v8::Name object API (Chromium issue 437416).
+
+        Set V8_CC_GNU or V8_CC_MSVC for clang in gcc / cl mode (Chromium issue
+        82385).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-12-02: Version 3.31.37
+
+        Performance and stability improvements on all platforms.
+
+
+2014-12-02: Version 3.31.36
+
+        Set V8_CC_GNU or V8_CC_MSVC for clang in gcc / cl mode (Chromium issue
+        82385).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-12-02: Version 3.31.35
+
+        Performance and stability improvements on all platforms.
+
+
+2014-12-01: Version 3.31.34
+
+        Performance and stability improvements on all platforms.
+
+
+2014-12-01: Version 3.31.33
+
+        Performance and stability improvements on all platforms.
+
+
+2014-12-01: Version 3.31.32
+
+        Performance and stability improvements on all platforms.
+
+
+2014-12-01: Version 3.31.31
+
+        Performance and stability improvements on all platforms.
+
+
+2014-11-29: Version 3.31.30
+
+        Performance and stability improvements on all platforms.
+
+
+2014-11-28: Version 3.31.29
+
+        Stage @@toStringTag (--harmony-tostring).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-11-28: Version 3.31.28
+
+        Performance and stability improvements on all platforms.
+
+
+2014-11-28: Version 3.31.27
+
+        Ship harmony-strings.
+
+        Performance and stability improvements on all platforms.
+
+
+2014-11-28: Version 3.31.26
+
+        Abort optimization in corner case (Chromium issue 436893).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-11-26: Version 3.31.25
+
+        Stage ES6 block scoping (issue 2198).
+
+        Introduce legacy const slots in correct context (Chromium issue 410030).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-11-26: Version 3.31.24
+
+        Performance and stability improvements on all platforms.
+
+
+2014-11-25: Version 3.31.23
+
+        Performance and stability improvements on all platforms.
+
+
+2014-11-25: Version 3.31.22
+
+        Performance and stability improvements on all platforms.
+
+
+2014-11-24: Version 3.31.21
+
+        Performance and stability improvements on all platforms.
+
+
+2014-11-24: Version 3.31.20
+
+        Performance and stability improvements on all platforms.
+
+
+2014-11-22: Version 3.31.19
+
+        Performance and stability improvements on all platforms.
+
+
+2014-11-21: Version 3.31.18
+
+        Performance and stability improvements on all platforms.
+
+
+2014-11-21: Version 3.31.17
+
+        Performance and stability improvements on all platforms.
+
+
+2014-11-21: Version 3.31.16
+
+        Cache template literal callSiteObj (issue 3230).
+
+        Rename String.prototype.contains to 'includes'.
+
+        Reserve code range block for evacuation (Chromium issue 430118).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-11-20: Version 3.31.15
+
+        Rename String.prototype.contains to 'includes'.
+
+        Performance and stability improvements on all platforms.
+
+
+2014-11-19: Version 3.31.14
+
+        Remove Weak{Map,Set}.prototype.clear.
+
+        Performance and stability improvements on all platforms.
+
+
+2014-11-19: Version 3.31.13
+
+        Performance and stability improvements on all platforms.
+
+
+2014-11-19: Version 3.31.12
+
+        Classes: Expand test to cover strict runtime behavior (issue 3330).
+
+        v8::String::Concat must not throw (Chromium issue 420240).
+
+        Fix disabling all break points from within the debug event callback
+        (Chromium issue 432493).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-11-18: Version 3.31.11
+
+        Performance and stability improvements on all platforms.
+
+
+2014-11-17: Version 3.31.10
+
+        Performance and stability improvements on all platforms.
+
+
+2014-11-17: Version 3.31.9
+
+        Expose internal properties of map/set iterators via mirrors.
+
+        Performance and stability improvements on all platforms.
+
+
+2014-11-17: Version 3.31.8
+
+        Performance and stability improvements on all platforms.
+
+
+2014-11-15: Version 3.31.7
+
+        Classes: Add support for stepping through default constructors (issue
+        3674).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-11-14: Version 3.31.6
+
+        Fix desugaring of let bindings in for loops to handle continue properly
+        (issue 3683).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-11-14: Version 3.31.5
+
+        Classes: Implement correct name binding (issue 3330).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-11-14: Version 3.31.4
+
+        Performance and stability improvements on all platforms.
+
+
+2014-11-14: Version 3.31.3
+
+        Classes: Cleanup default constructor flag.
+
+        Soft fail for invalid cache data.
+
+        Implement .of() on typed arrays (issue 3578).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-11-13: Version 3.31.2
+
+        MIPS: Leaving a generator via an exception causes it to close (issue
+        3096).
+
+        MIPS: ES6: Add support for super in object literals (issue 3571).
+
+        Increase the target new space size to the max new space size (issue
+        3626).
+
+        Leaving a generator via an exception causes it to close (issue 3096).
+
+        Correctly compute line numbers in functions from the function
+        constructor (Chromium issue 109362).
+
+        Rename v8::Exception::GetMessage to CreateMessage.
+
+        Classes: Add support for arguments in default constructor (issue 3672).
+
+        ES6: Add support for super in object literals (issue 3571).
+
+        Performance and stability improvements on all platforms.
+
+
+2014-11-12: Version 3.31.1
+
+        Fix has_constant_parameter_count() confusion in LReturn (Chromium issue
+        431602).
+
+        Performance and stability improvements on all platforms.
+
+
 2014-11-05: Version 3.30.33
 
         `1..isPrototypeOf.call(null)` should return false, not throw TypeError
index d4139c6..e85604b 100644 (file)
@@ -3,44 +3,32 @@
 # all paths in here must match this assumption.
 
 vars = {
-  "chromium_git": "https://chromium.googlesource.com",
-
-  "chromium_trunk": "https://src.chromium.org/svn/trunk",
-
-  "buildtools_revision": "fb782d4369d5ae04f17a2fceef7de5a63e50f07b",
+  "git_url": "https://chromium.googlesource.com",
 }
 
 deps = {
-  # Remember to keep the revision in sync with the Makefile.
   "v8/build/gyp":
-    "http://gyp.googlecode.com/svn/trunk@1831",
-
+    Var("git_url") + "/external/gyp.git" + "@" + "fe00999dfaee449d3465a9316778434884da4fa7",  # from svn revision 2010
   "v8/third_party/icu":
-    Var("chromium_trunk") + "/deps/third_party/icu52@277999",
-
+    Var("git_url") + "/chromium/deps/icu.git" + "@" + "51c1a4ce5f362676aa1f1cfdb5b7e52edabfa5aa",
   "v8/buildtools":
-    "https://chromium.googlesource.com/chromium/buildtools.git@" +
-    Var("buildtools_revision"),
-
+    Var("git_url") + "/chromium/buildtools.git" + "@" + "23a4e2f545c7b6340d7e5a2b74801941b0a86535",
   "v8/testing/gtest":
-    "http://googletest.googlecode.com/svn/trunk@692",
-
+    Var("git_url") + "/external/googletest.git" + "@" + "8245545b6dc9c4703e6496d1efd19e975ad2b038",  # from svn revision 700
   "v8/testing/gmock":
-    "http://googlemock.googlecode.com/svn/trunk@485",
+    Var("git_url") + "/external/googlemock.git" + "@" + "29763965ab52f24565299976b936d1265cb6a271",  # from svn revision 501
+  "v8/tools/clang":
+    Var("git_url") + "/chromium/src/tools/clang.git" + "@" + "90fb65e7a9a5c9d6d9613dfb0e78921c52ca9cfc",
 }
 
 deps_os = {
   "android": {
     "v8/third_party/android_tools":
-      Var("chromium_git") + "/android_tools.git" + "@" +
-          "31869996507de16812bb53a3d0aaa15cd6194c16",
+      Var("git_url") + "/android_tools.git" + "@" + "4f723e2a5fa5b7b8a198072ac19b92344be2b271",
   },
   "win": {
     "v8/third_party/cygwin":
-      Var("chromium_trunk") + "/deps/third_party/cygwin@66844",
-
-    "v8/third_party/python_26":
-      Var("chromium_trunk") + "/tools/third_party/python_26@89111",
+      Var("git_url") + "/chromium/deps/cygwin.git" + "@" + "c89e446b273697fadf3a10ff1007a97c0b7de6df",
   }
 }
 
@@ -93,6 +81,13 @@ hooks = [
     ],
   },
   {
+    # Pull clang if needed or requested via GYP_DEFINES.
+    # Note: On Win, this should run after win_toolchain, as it may use it.
+    'name': 'clang',
+    'pattern': '.',
+    'action': ['python', 'v8/tools/clang/scripts/update.py', '--if-needed'],
+  },
+  {
     # A change to a .gyp, .gypi, or to GYP itself should run the generator.
     "pattern": ".",
     "action": ["python", "v8/build/gyp_v8"],
index 3b02f52..606b5d7 100644 (file)
@@ -64,6 +64,10 @@ endif
 ifeq ($(verifyheap), on)
   GYPFLAGS += -Dv8_enable_verify_heap=1
 endif
+# tracemaps=on
+ifeq ($(tracemaps), on)
+  GYPFLAGS += -Dv8_trace_maps=1
+endif
 # backtrace=off
 ifeq ($(backtrace), off)
   GYPFLAGS += -Dv8_enable_backtrace=0
@@ -78,6 +82,9 @@ endif
 ifeq ($(snapshot), off)
   GYPFLAGS += -Dv8_use_snapshot='false'
 endif
+ifeq ($(snapshot), external)
+  GYPFLAGS += -Dv8_use_external_startup_data=1
+endif
 # extrachecks=on/off
 ifeq ($(extrachecks), on)
   GYPFLAGS += -Dv8_enable_extra_checks=1 -Dv8_enable_handle_zapping=1
@@ -486,7 +493,7 @@ gtags.clean:
 # "dependencies" includes also dependencies required for development.
 # Remember to keep these in sync with the DEPS file.
 builddeps:
-       svn checkout --force http://gyp.googlecode.com/svn/trunk build/gyp \
+       svn checkout --force https://gyp.googlecode.com/svn/trunk build/gyp \
            --revision 1831
        if svn info third_party/icu 2>&1 | grep -q icu46 ; then \
          svn switch --force \
@@ -497,9 +504,9 @@ builddeps:
              https://src.chromium.org/chrome/trunk/deps/third_party/icu52 \
              third_party/icu --revision 277999 ; \
        fi
-       svn checkout --force http://googletest.googlecode.com/svn/trunk \
+       svn checkout --force https://googletest.googlecode.com/svn/trunk \
            testing/gtest --revision 692
-       svn checkout --force http://googlemock.googlecode.com/svn/trunk \
+       svn checkout --force https://googlemock.googlecode.com/svn/trunk \
            testing/gmock --revision 485
 
 dependencies: builddeps
index 8e200f1..2a36403 100644 (file)
@@ -38,12 +38,10 @@ HOST_OS = $(shell uname -s | sed -e 's/Linux/linux/;s/Darwin/mac/')
 ANDROID_NDK_HOST_ARCH ?= $(shell uname -m | sed -e 's/i[3456]86/x86/')
 ifeq ($(HOST_OS), linux)
   TOOLCHAIN_DIR = linux-$(ANDROID_NDK_HOST_ARCH)
+else ifeq ($(HOST_OS), mac)
+  TOOLCHAIN_DIR = darwin-$(ANDROID_NDK_HOST_ARCH)
 else
-  ifeq ($(HOST_OS), mac)
-    TOOLCHAIN_DIR = darwin-$(ANDROID_NDK_HOST_ARCH)
-  else
-    $(error Host platform "${HOST_OS}" is not supported)
-  endif
+  $(error Host platform "${HOST_OS}" is not supported)
 endif
 
 ifeq ($(ARCH), android_arm)
@@ -52,38 +50,29 @@ ifeq ($(ARCH), android_arm)
   TOOLCHAIN_ARCH = arm-linux-androideabi
   TOOLCHAIN_PREFIX = $(TOOLCHAIN_ARCH)
   TOOLCHAIN_VER = 4.8
+else ifeq ($(ARCH), android_arm64)
+  DEFINES  = target_arch=arm64 v8_target_arch=arm64 android_target_arch=arm64 android_target_platform=21
+  TOOLCHAIN_ARCH = aarch64-linux-android
+  TOOLCHAIN_PREFIX = $(TOOLCHAIN_ARCH)
+  TOOLCHAIN_VER = 4.9
+else ifeq ($(ARCH), android_mipsel)
+  DEFINES  = target_arch=mipsel v8_target_arch=mipsel android_target_platform=14
+  DEFINES += android_target_arch=mips mips_arch_variant=mips32r2
+  TOOLCHAIN_ARCH = mipsel-linux-android
+  TOOLCHAIN_PREFIX = $(TOOLCHAIN_ARCH)
+  TOOLCHAIN_VER = 4.8
+else ifeq ($(ARCH), android_ia32)
+  DEFINES = target_arch=ia32 v8_target_arch=ia32 android_target_arch=x86 android_target_platform=14
+  TOOLCHAIN_ARCH = x86
+  TOOLCHAIN_PREFIX = i686-linux-android
+  TOOLCHAIN_VER = 4.8
+else ifeq ($(ARCH), android_x87)
+  DEFINES = target_arch=x87 v8_target_arch=x87 android_target_arch=x86 android_target_platform=14
+  TOOLCHAIN_ARCH = x86
+  TOOLCHAIN_PREFIX = i686-linux-android
+  TOOLCHAIN_VER = 4.8
 else
-  ifeq ($(ARCH), android_arm64)
-    DEFINES  = target_arch=arm64 v8_target_arch=arm64 android_target_arch=arm64 android_target_platform=L
-    TOOLCHAIN_ARCH = aarch64-linux-android
-    TOOLCHAIN_PREFIX = $(TOOLCHAIN_ARCH)
-    TOOLCHAIN_VER = 4.9
-  else
-    ifeq ($(ARCH), android_mipsel)
-      DEFINES  = target_arch=mipsel v8_target_arch=mipsel android_target_platform=14
-      DEFINES += android_target_arch=mips mips_arch_variant=mips32r2
-      TOOLCHAIN_ARCH = mipsel-linux-android
-      TOOLCHAIN_PREFIX = $(TOOLCHAIN_ARCH)
-      TOOLCHAIN_VER = 4.8
-
-    else
-      ifeq ($(ARCH), android_ia32)
-        DEFINES = target_arch=ia32 v8_target_arch=ia32 android_target_arch=x86 android_target_platform=14
-        TOOLCHAIN_ARCH = x86
-        TOOLCHAIN_PREFIX = i686-linux-android
-        TOOLCHAIN_VER = 4.8
-      else
-        ifeq ($(ARCH), android_x87)
-          DEFINES = target_arch=x87 v8_target_arch=x87 android_target_arch=x86 android_target_platform=14
-          TOOLCHAIN_ARCH = x86
-          TOOLCHAIN_PREFIX = i686-linux-android
-          TOOLCHAIN_VER = 4.8
-       else
-          $(error Target architecture "${ARCH}" is not supported)
-        endif
-      endif
-    endif
-  endif
+  $(error Target architecture "${ARCH}" is not supported)
 endif
 
 TOOLCHAIN_PATH = \
index aa5f644..22a05cb 100644 (file)
@@ -1,3 +1,4 @@
+adamk@chromium.org
 bmeurer@chromium.org
 danno@chromium.org
 dcarney@chromium.org
index 3a9895d..6d19a4e 100644 (file)
@@ -244,11 +244,11 @@ def GetPreferredTryMasters(project, change):
       'v8_linux_rel': set(['defaulttests']),
       'v8_linux_dbg': set(['defaulttests']),
       'v8_linux_nosnap_rel': set(['defaulttests']),
-      'v8_linux_nosnap_dbg': set(['defaulttests']),
       'v8_linux64_rel': set(['defaulttests']),
       'v8_linux_arm_dbg': set(['defaulttests']),
       'v8_linux_arm64_rel': set(['defaulttests']),
       'v8_linux_layout_dbg': set(['defaulttests']),
+      'v8_linux_chromium_gn_rel': set(['defaulttests']),
       'v8_mac_rel': set(['defaulttests']),
       'v8_win_rel': set(['defaulttests']),
       'v8_win64_compile_rel': set(['defaulttests']),
index 7ce52a0..bc1685a 100644 (file)
@@ -16,8 +16,15 @@ V8 Project page: https://code.google.com/p/v8/
 Getting the Code
 =============
 
-V8 Git repository: https://chromium.googlesource.com/v8/v8.git
-GitHub mirror: https://github.com/v8/v8-git-mirror
+Checkout [depot tools](http://www.chromium.org/developers/how-tos/install-depot-tools), and run
+
+> `fetch v8`
+
+This will checkout V8 into the directory `v8` and fetch all of its dependencies.
+To stay up to date, run
+
+> `git pull origin`
+> `gclient sync`
 
 For fetching all branches, add the following into your remote
 configuration in `.git/config`:
index f984ea3..5d3b25a 100644 (file)
         ],
       },  # Release
     },  # configurations
-    'cflags': [ '-Wno-abi', '-Wall', '-W', '-Wno-unused-parameter',
-                '-Wnon-virtual-dtor', '-fno-rtti', '-fno-exceptions',
-                # Note: Using -std=c++0x will define __STRICT_ANSI__, which in
-                # turn will leave out some template stuff for 'long long'. What
-                # we want is -std=c++11, but this is not supported by GCC 4.6 or
-                # Xcode 4.2
-                '-std=gnu++0x' ],
+    'cflags': [ '-Wno-abi', '-Wall', '-W', '-Wno-unused-parameter'],
+    'cflags_cc': [ '-Wnon-virtual-dtor', '-fno-rtti', '-fno-exceptions',
+                   # Note: Using -std=c++0x will define __STRICT_ANSI__, which
+                   # in turn will leave out some template stuff for 'long
+                   # long'.  What we want is -std=c++11, but this is not
+                   # supported by GCC 4.6 or Xcode 4.2
+                   '-std=gnu++0x' ],
     'target_conditions': [
       ['_toolset=="target"', {
         'cflags!': [
           '-fno-short-enums',
           '-finline-limit=64',
           '-Wa,--noexecstack',
-          '-Wno-error=non-virtual-dtor',  # TODO(michaelbai): Fix warnings.
           # Note: This include is in cflags to ensure that it comes after
           # all of the includes.
           '-I<(android_include)',
         ],
+        'cflags_cc': [
+          '-Wno-error=non-virtual-dtor',  # TODO(michaelbai): Fix warnings.
+        ],
         'defines': [
           'ANDROID',
           #'__GNU_SOURCE=1',  # Necessary for clone()
               '-fno-stack-protector',
             ],
           }],
-          ['target_arch=="arm64" or target_arch=="x64"', {
-            # TODO(ulan): Enable PIE for other architectures (crbug.com/373219).
+          ['(target_arch=="arm" or target_arch=="arm64" or target_arch=="x64") and component!="shared_library"', {
             'cflags': [
               '-fPIE',
             ],
index 7ce66e4..25041ce 100644 (file)
@@ -29,8 +29,6 @@
 
 {
   'variables': {
-    'v8_compress_startup_data%': 'off',
-
     'v8_enable_disassembler%': 0,
 
     'v8_enable_gdbjit%': 0,
@@ -39,6 +37,8 @@
 
     'v8_enable_verify_heap%': 0,
 
+    'v8_trace_maps%': 0,
+
     'v8_use_snapshot%': 'true',
 
     'v8_enable_verify_predictable%': 0,
@@ -59,9 +59,8 @@
     # Enable compiler warnings when using V8_DEPRECATED apis.
     'v8_deprecation_warnings%': 0,
 
-    # Use external files for startup data blobs:
-    # the JS builtins sources and the start snapshot.
-    'v8_use_external_startup_data%': 0,
+    # Set to 1 to enable DCHECKs in release builds.
+    'dcheck_always_on%': 0,
   },
   'target_defaults': {
     'conditions': [
@@ -77,6 +76,9 @@
       ['v8_enable_verify_heap==1', {
         'defines': ['VERIFY_HEAP',],
       }],
+      ['v8_trace_maps==1', {
+        'defines': ['TRACE_MAPS',],
+      }],
       ['v8_enable_verify_predictable==1', {
         'defines': ['VERIFY_PREDICTABLE',],
       }],
       ['v8_enable_i18n_support==1', {
         'defines': ['V8_I18N_SUPPORT',],
       }],
-      ['v8_compress_startup_data=="bz2"', {
-        'defines': ['COMPRESS_STARTUP_DATA_BZ2',],
-      }],
       ['v8_use_external_startup_data==1', {
         'defines': ['V8_USE_EXTERNAL_STARTUP_DATA',],
       }],
+      ['dcheck_always_on!=0', {
+        'defines': ['DEBUG',],
+      }],
     ],  # conditions
     'configurations': {
       'DebugBaseCommon': {
index 47b2763..ee91e78 100644 (file)
@@ -33,6 +33,8 @@
   'includes': ['toolchain.gypi'],
   'variables': {
     'component%': 'static_library',
+    'make_clang_dir%': '../third_party/llvm-build/Release+Asserts',
+    'clang_xcode%': 0,
     'asan%': 0,
     'tsan%': 0,
     'visibility%': 'hidden',
     #     near-release speeds.
     'v8_optimized_debug%': 0,
 
+    # Use external files for startup data blobs:
+    # the JS builtins sources and the start snapshot.
+    # Embedders that don't use standalone.gypi will need to add
+    # their own default value.
+    'v8_use_external_startup_data%': 0,
+
     # Relative path to icu.gyp from this file.
     'icu_gyp_path': '../third_party/icu/icu.gyp',
 
     'arm_fpu%': 'vfpv3',
     'arm_float_abi%': 'default',
     'arm_thumb': 'default',
+
+    # Default MIPS variable settings.
+    'mips_arch_variant%': 'r2',
+    # Possible values fp32, fp64, fpxx.
+    # fp32 - 32 32-bit FPU registers are available, doubles are placed in
+    #        register pairs.
+    # fp64 - 32 64-bit FPU registers are available.
+    # fpxx - compatibility mode, it chooses fp32 or fp64 depending on runtime
+    #        detection
+    'mips_fpu_mode%': 'fp32',
   },
   'target_defaults': {
     'variables': {
     }],  # OS=="win"
     ['OS=="mac"', {
       'xcode_settings': {
+        'SDKROOT': 'macosx',
         'SYMROOT': '<(DEPTH)/xcodebuild',
       },
       'target_defaults': {
         ],  # target_conditions
       },  # target_defaults
     }],  # OS=="mac"
+    ['clang==1 and ((OS!="mac" and OS!="ios") or clang_xcode==0) '
+        'and OS!="win"', {
+      'make_global_settings': [
+        ['CC', '<(make_clang_dir)/bin/clang'],
+        ['CXX', '<(make_clang_dir)/bin/clang++'],
+        ['CC.host', '$(CC)'],
+        ['CXX.host', '$(CXX)'],
+      ],
+    }],
+    ['clang==1 and OS=="win"', {
+      'make_global_settings': [
+        # On Windows, gyp's ninja generator only looks at CC.
+        ['CC', '<(make_clang_dir)/bin/clang-cl'],
+      ],
+    }],
   ],
 }
index 20c2c94..ac10065 100644 (file)
@@ -30,7 +30,6 @@
 {
   'variables': {
     'msvs_use_common_release': 0,
-    'gcc_version%': 'unknown',
     'clang%': 0,
     'v8_target_arch%': '<(target_arch)',
     # Native Client builds currently use the V8 ARM JIT and
     # Similar to the ARM hard float ABI but on MIPS.
     'v8_use_mips_abi_hardfloat%': 'true',
 
-    # Default arch variant for MIPS.
-    'mips_arch_variant%': 'r2',
-
-    # Possible values fp32, fp64, fpxx.
-    # fp32 - 32 32-bit FPU registers are available, doubles are placed in
-    #        register pairs.
-    # fp64 - 32 64-bit FPU registers are available.
-    # fpxx - compatibility mode, it chooses fp32 or fp64 depending on runtime
-    #        detection
-    'mips_fpu_mode%': 'fp32',
-
     'v8_enable_backtrace%': 0,
 
     # Enable profiling support. Only required on Windows.
           'V8_TARGET_ARCH_MIPS',
         ],
         'conditions': [
-          ['v8_target_arch==target_arch and android_webview_build==0', {
-            # Target built with a Mips CXX compiler.
-            'target_conditions': [
-              ['_toolset=="target"', {
+          [ 'v8_can_use_fpu_instructions=="true"', {
+            'defines': [
+              'CAN_USE_FPU_INSTRUCTIONS',
+            ],
+          }],
+          [ 'v8_use_mips_abi_hardfloat=="true"', {
+            'defines': [
+              '__mips_hard_float=1',
+              'CAN_USE_FPU_INSTRUCTIONS',
+            ],
+          }, {
+            'defines': [
+              '__mips_soft_float=1'
+            ]
+          }],
+        ],
+        'target_conditions': [
+          ['_toolset=="target"', {
+            'conditions': [
+              ['v8_target_arch==target_arch and android_webview_build==0', {
+                # Target built with a Mips CXX compiler.
                 'cflags': ['-EB'],
                 'ldflags': ['-EB'],
                 'conditions': [
                     'cflags': ['-msoft-float'],
                     'ldflags': ['-msoft-float'],
                   }],
-                  ['mips_fpu_mode=="fp64"', {
-                    'cflags': ['-mfp64'],
-                  }],
-                  ['mips_fpu_mode=="fpxx"', {
-                    'cflags': ['-mfpxx'],
-                  }],
-                  ['mips_fpu_mode=="fp32"', {
-                    'cflags': ['-mfp32'],
-                  }],
                   ['mips_arch_variant=="r6"', {
+                    'defines': [
+                      '_MIPS_ARCH_MIPS32R6',
+                      'FPU_MODE_FP64',
+                    ],
                     'cflags!': ['-mfp32', '-mfpxx'],
                     'cflags': ['-mips32r6', '-Wa,-mips32r6'],
                     'ldflags': [
                     ],
                   }],
                   ['mips_arch_variant=="r2"', {
+                    'conditions': [
+                      [ 'mips_fpu_mode=="fp64"', {
+                        'defines': [
+                          '_MIPS_ARCH_MIPS32R2',
+                          'FPU_MODE_FP64',
+                        ],
+                        'cflags': ['-mfp64'],
+                      }],
+                      ['mips_fpu_mode=="fpxx"', {
+                        'defines': [
+                          '_MIPS_ARCH_MIPS32R2',
+                          'FPU_MODE_FPXX',
+                        ],
+                        'cflags': ['-mfpxx'],
+                      }],
+                      ['mips_fpu_mode=="fp32"', {
+                        'defines': [
+                          '_MIPS_ARCH_MIPS32R2',
+                          'FPU_MODE_FP32',
+                        ],
+                        'cflags': ['-mfp32'],
+                      }],
+                    ],
                     'cflags': ['-mips32r2', '-Wa,-mips32r2'],
                     'ldflags': ['-mips32r2'],
                   }],
                   ['mips_arch_variant=="r1"', {
+                    'defines': [
+                      'FPU_MODE_FP32',
+                    ],
                     'cflags!': ['-mfp64', '-mfpxx'],
                     'cflags': ['-mips32', '-Wa,-mips32'],
                     'ldflags': ['-mips32'],
                   }],
                   ['mips_arch_variant=="rx"', {
+                    'defines': [
+                      '_MIPS_ARCH_MIPS32RX',
+                      'FPU_MODE_FPXX',
+                    ],
                     'cflags!': ['-mfp64', '-mfp32'],
                     'cflags': ['-mips32', '-Wa,-mips32', '-mfpxx'],
                     'ldflags': ['-mips32'],
                   }],
                 ],
+              }, {
+                # 'v8_target_arch!=target_arch'
+                # Target not built with an MIPS CXX compiler (simulator build).
+                'conditions': [
+                  ['mips_arch_variant=="r6"', {
+                    'defines': [
+                      '_MIPS_ARCH_MIPS32R6',
+                      'FPU_MODE_FP64',
+                    ],
+                  }],
+                  ['mips_arch_variant=="r2"', {
+                    'conditions': [
+                      [ 'mips_fpu_mode=="fp64"', {
+                        'defines': [
+                          '_MIPS_ARCH_MIPS32R2',
+                          'FPU_MODE_FP64',
+                        ],
+                      }],
+                      ['mips_fpu_mode=="fpxx"', {
+                        'defines': [
+                          '_MIPS_ARCH_MIPS32R2',
+                          'FPU_MODE_FPXX',
+                        ],
+                      }],
+                      ['mips_fpu_mode=="fp32"', {
+                        'defines': [
+                          '_MIPS_ARCH_MIPS32R2',
+                          'FPU_MODE_FP32',
+                        ],
+                      }],
+                    ],
+                  }],
+                  ['mips_arch_variant=="r1"', {
+                    'defines': [
+                      'FPU_MODE_FP32',
+                    ],
+                  }],
+                  ['mips_arch_variant=="rx"', {
+                    'defines': [
+                      '_MIPS_ARCH_MIPS32RX',
+                      'FPU_MODE_FPXX',
+                    ],
+                  }],
+                ],
               }],
             ],
-          }],
+          }],  #_toolset=="target"
+          ['_toolset=="host"', {
+            'conditions': [
+              ['mips_arch_variant=="rx"', {
+                'defines': [
+                  '_MIPS_ARCH_MIPS32RX',
+                  'FPU_MODE_FPXX',
+                ],
+              }],
+              ['mips_arch_variant=="r6"', {
+                'defines': [
+                  '_MIPS_ARCH_MIPS32R6',
+                  'FPU_MODE_FP64',
+                ],
+              }],
+              ['mips_arch_variant=="r2"', {
+                'conditions': [
+                  ['mips_fpu_mode=="fp64"', {
+                    'defines': [
+                      '_MIPS_ARCH_MIPS32R2',
+                      'FPU_MODE_FP64',
+                    ],
+                  }],
+                  ['mips_fpu_mode=="fpxx"', {
+                    'defines': [
+                      '_MIPS_ARCH_MIPS32R2',
+                      'FPU_MODE_FPXX',
+                    ],
+                  }],
+                  ['mips_fpu_mode=="fp32"', {
+                    'defines': [
+                      '_MIPS_ARCH_MIPS32R2',
+                      'FPU_MODE_FP32'
+                    ],
+                  }],
+                ],
+              }],
+              ['mips_arch_variant=="r1"', {
+                'defines': ['FPU_MODE_FP32',],
+              }],
+            ]
+          }],  #_toolset=="host"
+        ],
+      }],  # v8_target_arch=="mips"
+      ['v8_target_arch=="mipsel"', {
+        'defines': [
+          'V8_TARGET_ARCH_MIPS',
+        ],
+        'conditions': [
           [ 'v8_can_use_fpu_instructions=="true"', {
             'defines': [
               'CAN_USE_FPU_INSTRUCTIONS',
               '__mips_soft_float=1'
             ],
           }],
-          ['mips_arch_variant=="rx"', {
-            'defines': [
-              '_MIPS_ARCH_MIPS32RX',
-              'FPU_MODE_FPXX',
-            ],
-          }],
-          ['mips_arch_variant=="r6"', {
-            'defines': [
-              '_MIPS_ARCH_MIPS32R6',
-              'FPU_MODE_FP64',
-            ],
-          }],
-          ['mips_arch_variant=="r2"', {
-            'defines': ['_MIPS_ARCH_MIPS32R2',],
-            'conditions': [
-              ['mips_fpu_mode=="fp64"', {
-                'defines': ['FPU_MODE_FP64',],
-              }],
-              ['mips_fpu_mode=="fpxx"', {
-                'defines': ['FPU_MODE_FPXX',],
-              }],
-              ['mips_fpu_mode=="fp32"', {
-                'defines': ['FPU_MODE_FP32',],
-              }],
-            ],
-          }],
-          ['mips_arch_variant=="r1"', {
-            'defines': ['FPU_MODE_FP32',],
-          }],
         ],
-      }],  # v8_target_arch=="mips"
-      ['v8_target_arch=="mipsel"', {
-        'defines': [
-          'V8_TARGET_ARCH_MIPS',
-        ],
-        'conditions': [
-          ['v8_target_arch==target_arch and android_webview_build==0', {
-            # Target built with a Mips CXX compiler.
-            'target_conditions': [
-              ['_toolset=="target"', {
+        'target_conditions': [
+          ['_toolset=="target"', {
+            'conditions': [
+              ['v8_target_arch==target_arch and android_webview_build==0', {
+                # Target built with a Mips CXX compiler.
                 'cflags': ['-EL'],
                 'ldflags': ['-EL'],
                 'conditions': [
                     'cflags': ['-msoft-float'],
                     'ldflags': ['-msoft-float'],
                   }],
-                  ['mips_fpu_mode=="fp64"', {
-                    'cflags': ['-mfp64'],
-                  }],
-                  ['mips_fpu_mode=="fpxx"', {
-                    'cflags': ['-mfpxx'],
-                  }],
-                  ['mips_fpu_mode=="fp32"', {
-                    'cflags': ['-mfp32'],
-                  }],
                   ['mips_arch_variant=="r6"', {
+                    'defines': [
+                      '_MIPS_ARCH_MIPS32R6',
+                      'FPU_MODE_FP64',
+                    ],
                     'cflags!': ['-mfp32', '-mfpxx'],
                     'cflags': ['-mips32r6', '-Wa,-mips32r6'],
                     'ldflags': [
                     ],
                   }],
                   ['mips_arch_variant=="r2"', {
+                    'conditions': [
+                      [ 'mips_fpu_mode=="fp64"', {
+                        'defines': [
+                          '_MIPS_ARCH_MIPS32R2',
+                          'FPU_MODE_FP64',
+                        ],
+                        'cflags': ['-mfp64'],
+                      }],
+                      ['mips_fpu_mode=="fpxx"', {
+                        'defines': [
+                          '_MIPS_ARCH_MIPS32R2',
+                          'FPU_MODE_FPXX',
+                        ],
+                        'cflags': ['-mfpxx'],
+                      }],
+                      ['mips_fpu_mode=="fp32"', {
+                        'defines': [
+                          '_MIPS_ARCH_MIPS32R2',
+                          'FPU_MODE_FP32',
+                        ],
+                        'cflags': ['-mfp32'],
+                      }],
+                    ],
                     'cflags': ['-mips32r2', '-Wa,-mips32r2'],
                     'ldflags': ['-mips32r2'],
                   }],
                     'ldflags': ['-mips32'],
                   }],
                   ['mips_arch_variant=="rx"', {
+                    'defines': [
+                      '_MIPS_ARCH_MIPS32RX',
+                      'FPU_MODE_FPXX',
+                    ],
                     'cflags!': ['-mfp64', '-mfp32'],
                     'cflags': ['-mips32', '-Wa,-mips32', '-mfpxx'],
                     'ldflags': ['-mips32'],
                   }],
                   ['mips_arch_variant=="loongson"', {
+                    'defines': [
+                      '_MIPS_ARCH_LOONGSON',
+                      'FPU_MODE_FP32',
+                    ],
                     'cflags!': ['-mfp64', '-mfp32', '-mfpxx'],
                     'cflags': ['-mips3', '-Wa,-mips3'],
                   }],
                 ],
+              }, {
+                # 'v8_target_arch!=target_arch'
+                # Target not built with an MIPS CXX compiler (simulator build).
+                'conditions': [
+                  ['mips_arch_variant=="r6"', {
+                    'defines': [
+                      '_MIPS_ARCH_MIPS32R6',
+                      'FPU_MODE_FP64',
+                    ],
+                  }],
+                  ['mips_arch_variant=="r2"', {
+                    'conditions': [
+                      [ 'mips_fpu_mode=="fp64"', {
+                        'defines': [
+                          '_MIPS_ARCH_MIPS32R2',
+                          'FPU_MODE_FP64',
+                        ],
+                      }],
+                      ['mips_fpu_mode=="fpxx"', {
+                        'defines': [
+                          '_MIPS_ARCH_MIPS32R2',
+                          'FPU_MODE_FPXX',
+                        ],
+                      }],
+                      ['mips_fpu_mode=="fp32"', {
+                        'defines': [
+                          '_MIPS_ARCH_MIPS32R2',
+                          'FPU_MODE_FP32',
+                        ],
+                      }],
+                    ],
+                  }],
+                  ['mips_arch_variant=="r1"', {
+                    'defines': [
+                      'FPU_MODE_FP32',
+                    ],
+                  }],
+                  ['mips_arch_variant=="rx"', {
+                    'defines': [
+                      '_MIPS_ARCH_MIPS32RX',
+                      'FPU_MODE_FPXX',
+                    ],
+                  }],
+                  ['mips_arch_variant=="loongson"', {
+                    'defines': [
+                      '_MIPS_ARCH_LOONGSON',
+                      'FPU_MODE_FP32',
+                    ],
+                  }],
+                ],
               }],
             ],
+          }], #_toolset=="target
+          ['_toolset=="host"', {
+            'conditions': [
+              ['mips_arch_variant=="rx"', {
+                'defines': [
+                  '_MIPS_ARCH_MIPS32RX',
+                  'FPU_MODE_FPXX',
+                ],
+              }],
+              ['mips_arch_variant=="r6"', {
+                'defines': [
+                  '_MIPS_ARCH_MIPS32R6',
+                  'FPU_MODE_FP64',
+                ],
+              }],
+              ['mips_arch_variant=="r2"', {
+                'conditions': [
+                  ['mips_fpu_mode=="fp64"', {
+                    'defines': [
+                      '_MIPS_ARCH_MIPS32R2',
+                      'FPU_MODE_FP64',
+                    ],
+                  }],
+                  ['mips_fpu_mode=="fpxx"', {
+                    'defines': [
+                      '_MIPS_ARCH_MIPS32R2',
+                      'FPU_MODE_FPXX',
+                    ],
+                  }],
+                  ['mips_fpu_mode=="fp32"', {
+                    'defines': [
+                      '_MIPS_ARCH_MIPS32R2',
+                      'FPU_MODE_FP32'
+                    ],
+                  }],
+                ],
+              }],
+              ['mips_arch_variant=="r1"', {
+                'defines': ['FPU_MODE_FP32',],
+              }],
+              ['mips_arch_variant=="loongson"', {
+                'defines': [
+                  '_MIPS_ARCH_LOONGSON',
+                  'FPU_MODE_FP32',
+                ],
+              }],
+            ]
           }],
+        ],
+      }],  # v8_target_arch=="mipsel"
+      ['v8_target_arch=="mips64el"', {
+        'defines': [
+          'V8_TARGET_ARCH_MIPS64',
+        ],
+        'conditions': [
           [ 'v8_can_use_fpu_instructions=="true"', {
             'defines': [
               'CAN_USE_FPU_INSTRUCTIONS',
               '__mips_soft_float=1'
             ],
           }],
-          ['mips_arch_variant=="rx"', {
-            'defines': [
-              '_MIPS_ARCH_MIPS32RX',
-              'FPU_MODE_FPXX',
-            ],
-          }],
-          ['mips_arch_variant=="r6"', {
-            'defines': [
-              '_MIPS_ARCH_MIPS32R6',
-               'FPU_MODE_FP64',
-            ],
-          }],
-          ['mips_arch_variant=="r2"', {
-            'defines': ['_MIPS_ARCH_MIPS32R2',],
+         ],
+        'target_conditions': [
+          ['_toolset=="target"', {
             'conditions': [
-              ['mips_fpu_mode=="fp64"', {
-                'defines': ['FPU_MODE_FP64',],
-              }],
-              ['mips_fpu_mode=="fpxx"', {
-                'defines': ['FPU_MODE_FPXX',],
-              }],
-              ['mips_fpu_mode=="fp32"', {
-                'defines': ['FPU_MODE_FP32',],
-              }],
-            ],
-          }],
-          ['mips_arch_variant=="r1"', {
-            'defines': ['FPU_MODE_FP32',],
-          }],
-          ['mips_arch_variant=="loongson"', {
-            'defines': [
-              '_MIPS_ARCH_LOONGSON',
-              'FPU_MODE_FP32',
-            ],
-          }],
-        ],
-      }],  # v8_target_arch=="mipsel"
-      ['v8_target_arch=="mips64el"', {
-        'defines': [
-          'V8_TARGET_ARCH_MIPS64',
-        ],
-        'conditions': [
-          ['v8_target_arch==target_arch and android_webview_build==0', {
-            # Target built with a Mips CXX compiler.
-            'target_conditions': [
-              ['_toolset=="target"', {
+              ['v8_target_arch==target_arch and android_webview_build==0', {
                 'cflags': ['-EL'],
                 'ldflags': ['-EL'],
                 'conditions': [
                     'ldflags': ['-msoft-float'],
                   }],
                   ['mips_arch_variant=="r6"', {
+                    'defines': ['_MIPS_ARCH_MIPS64R6',],
                     'cflags': ['-mips64r6', '-mabi=64', '-Wa,-mips64r6'],
                     'ldflags': [
                       '-mips64r6', '-mabi=64',
                     ],
                   }],
                   ['mips_arch_variant=="r2"', {
+                    'defines': ['_MIPS_ARCH_MIPS64R2',],
                     'cflags': ['-mips64r2', '-mabi=64', '-Wa,-mips64r2'],
                     'ldflags': [
                       '-mips64r2', '-mabi=64',
                     ],
                   }],
                 ],
+              }, {
+                # 'v8_target_arch!=target_arch'
+                # Target not built with an MIPS CXX compiler (simulator build).
+                'conditions': [
+                  ['mips_arch_variant=="r6"', {
+                    'defines': ['_MIPS_ARCH_MIPS64R6',],
+                  }],
+                  ['mips_arch_variant=="r2"', {
+                    'defines': ['_MIPS_ARCH_MIPS64R2',],
+                  }],
+                ],
               }],
             ],
-          }],
-          [ 'v8_can_use_fpu_instructions=="true"', {
-            'defines': [
-              'CAN_USE_FPU_INSTRUCTIONS',
-            ],
-          }],
-          [ 'v8_use_mips_abi_hardfloat=="true"', {
-            'defines': [
-              '__mips_hard_float=1',
-              'CAN_USE_FPU_INSTRUCTIONS',
-            ],
-          }, {
-            'defines': [
-              '__mips_soft_float=1'
+          }],  #'_toolset=="target"
+          ['_toolset=="host"', {
+            'conditions': [
+              ['mips_arch_variant=="r6"', {
+                'defines': ['_MIPS_ARCH_MIPS64R6',],
+              }],
+              ['mips_arch_variant=="r2"', {
+                'defines': ['_MIPS_ARCH_MIPS64R2',],
+              }],
             ],
-          }],
-          ['mips_arch_variant=="r6"', {
-            'defines': ['_MIPS_ARCH_MIPS64R6',],
-          }],
-          ['mips_arch_variant=="r2"', {
-            'defines': ['_MIPS_ARCH_MIPS64R2',],
-          }],
+          }],  #'_toolset=="host"
         ],
       }],  # v8_target_arch=="mips64el"
       ['v8_target_arch=="x64"', {
             },
           }],
         ],
+        'defines': [
+          'ENABLE_SLOW_DCHECKS',
+        ],
       },  # DebugBase0
       # Abstract configuration for v8_optimized_debug == 1.
       'DebugBase1': {
             'LinkIncremental': '2',
           },
         },
+        'defines': [
+          'ENABLE_SLOW_DCHECKS',
+        ],
         'conditions': [
           ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd" or \
             OS=="qnx"', {
               '-ffunction-sections',
               '-O1', # TODO(2807) should be -O3.
             ],
-            'conditions': [
-              ['gcc_version==44 and clang==0', {
-                'cflags': [
-                  # Avoid crashes with gcc 4.4 in the v8 test suite.
-                  '-fno-tree-vrp',
-                ],
-              }],
-            ],
           }],
           ['OS=="mac"', {
             'xcode_settings': {
               '-fdata-sections',
               '-ffunction-sections',
             ],
-            'defines': [
-              'OPTIMIZED_DEBUG'
-            ],
             'conditions': [
               # TODO(crbug.com/272548): Avoid -O3 in NaCl
               ['nacl_target_arch=="none"', {
                 'cflags': ['-O2'],
                 'cflags!': ['-O3'],
               }],
-              ['gcc_version==44 and clang==0', {
-                'cflags': [
-                  # Avoid crashes with gcc 4.4 in the v8 test suite.
-                  '-fno-tree-vrp',
-                ],
-              }],
             ],
           }],
           ['OS=="mac"', {
           'V8_ENABLE_CHECKS',
           'OBJECT_PRINT',
           'VERIFY_HEAP',
-          'DEBUG'
+          'DEBUG',
+          'TRACE_MAPS'
         ],
         'conditions': [
           ['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd" or \
                 # TODO(2304): pass DISABLE_DEBUG_ASSERT instead of hiding DEBUG.
                 'defines!': [
                   'DEBUG',
+                  'ENABLE_SLOW_DCHECKS',
                 ],
               }],
             ],
               '<(wno_array_bounds)',
             ],
             'conditions': [
-              [ 'gcc_version==44 and clang==0', {
-                'cflags': [
-                  # Avoid crashes with gcc 4.4 in the v8 test suite.
-                  '-fno-tree-vrp',
-                ],
-              }],
               # TODO(crbug.com/272548): Avoid -O3 in NaCl
               ['nacl_target_arch=="none"', {
                 'cflags': ['-O3'],
               '-ffunction-sections',
               '-O2',
             ],
-            'conditions': [
-              [ 'gcc_version==44 and clang==0', {
-                'cflags': [
-                  # Avoid crashes with gcc 4.4 in the v8 test suite.
-                  '-fno-tree-vrp',
-                ],
-              }],
-            ],
           }],
           ['OS=="mac"', {
             'xcode_settings': {
index b7f853c..a7ee88e 100644 (file)
@@ -1,8 +1,9 @@
 CODE_REVIEW_SERVER: https://codereview.chromium.org
 CC_LIST: v8-dev@googlegroups.com
-VIEW_VC: https://code.google.com/p/v8/source/detail?r=
+VIEW_VC: https://chromium.googlesource.com/v8/v8/+/
 STATUS: http://v8-status.appspot.com/status
 TRY_ON_UPLOAD: False
 TRYSERVER_SVN_URL: svn://svn.chromium.org/chrome-try-v8
 TRYSERVER_ROOT: v8
 PROJECT: v8
+PENDING_REF_PREFIX: refs/pending/
index d5433a6..d35f2fc 100644 (file)
@@ -140,6 +140,17 @@ template<typename T> class CustomArguments;
 class PropertyCallbackArguments;
 class FunctionCallbackArguments;
 class GlobalHandles;
+
+class CallbackData {
+ public:
+  V8_INLINE v8::Isolate* GetIsolate() const { return isolate_; }
+
+ protected:
+  explicit CallbackData(v8::Isolate* isolate) : isolate_(isolate) {}
+
+ private:
+  v8::Isolate* isolate_;
+};
 }
 
 
@@ -418,22 +429,53 @@ template <class T> class Eternal {
 };
 
 
-template<class T, class P>
-class WeakCallbackData {
+template <typename T>
+class PhantomCallbackData : public internal::CallbackData {
+ public:
+  typedef void (*Callback)(const PhantomCallbackData<T>& data);
+
+  V8_INLINE T* GetParameter() const { return parameter_; }
+
+  PhantomCallbackData<T>(Isolate* isolate, T* parameter)
+      : internal::CallbackData(isolate), parameter_(parameter) {}
+
+ private:
+  T* parameter_;
+};
+
+
+template <class T, class P>
+class WeakCallbackData : public PhantomCallbackData<P> {
  public:
   typedef void (*Callback)(const WeakCallbackData<T, P>& data);
 
-  V8_INLINE Isolate* GetIsolate() const { return isolate_; }
   V8_INLINE Local<T> GetValue() const { return handle_; }
-  V8_INLINE P* GetParameter() const { return parameter_; }
 
  private:
   friend class internal::GlobalHandles;
-  WeakCallbackData(Isolate* isolate, Local<T> handle, P* parameter)
-    : isolate_(isolate), handle_(handle), parameter_(parameter) { }
-  Isolate* isolate_;
+  WeakCallbackData(Isolate* isolate, P* parameter, Local<T> handle)
+      : PhantomCallbackData<P>(isolate, parameter), handle_(handle) {}
   Local<T> handle_;
-  P* parameter_;
+};
+
+
+template <typename T, typename U>
+class InternalFieldsCallbackData : public internal::CallbackData {
+ public:
+  typedef void (*Callback)(const InternalFieldsCallbackData<T, U>& data);
+
+  InternalFieldsCallbackData(Isolate* isolate, T* internalField1,
+                             U* internalField2)
+      : internal::CallbackData(isolate),
+        internal_field1_(internalField1),
+        internal_field2_(internalField2) {}
+
+  V8_INLINE T* GetInternalField1() const { return internal_field1_; }
+  V8_INLINE U* GetInternalField2() const { return internal_field2_; }
+
+ private:
+  T* internal_field1_;
+  U* internal_field2_;
 };
 
 
@@ -471,22 +513,23 @@ template <class T> class PersistentBase {
   template <class S>
   V8_INLINE void Reset(Isolate* isolate, const PersistentBase<S>& other);
 
-  V8_INLINE bool IsEmpty() const { return val_ == 0; }
+  V8_INLINE bool IsEmpty() const { return val_ == NULL; }
+  V8_INLINE void Empty() { val_ = 0; }
 
   template <class S>
   V8_INLINE bool operator==(const PersistentBase<S>& that) const {
     internal::Object** a = reinterpret_cast<internal::Object**>(this->val_);
     internal::Object** b = reinterpret_cast<internal::Object**>(that.val_);
-    if (a == 0) return b == 0;
-    if (b == 0) return false;
+    if (a == NULL) return b == NULL;
+    if (b == NULL) return false;
     return *a == *b;
   }
 
   template <class S> V8_INLINE bool operator==(const Handle<S>& that) const {
     internal::Object** a = reinterpret_cast<internal::Object**>(this->val_);
     internal::Object** b = reinterpret_cast<internal::Object**>(that.val_);
-    if (a == 0) return b == 0;
-    if (b == 0) return false;
+    if (a == NULL) return b == NULL;
+    if (b == NULL) return false;
     return *a == *b;
   }
 
@@ -519,14 +562,17 @@ template <class T> class PersistentBase {
   // Phantom persistents work like weak persistents, except that the pointer to
   // the object being collected is not available in the finalization callback.
   // This enables the garbage collector to collect the object and any objects
-  // it references transitively in one GC cycle.
+  // it references transitively in one GC cycle. At the moment you can either
+  // specify a parameter for the callback or the location of two internal
+  // fields in the dying object.
   template <typename P>
   V8_INLINE void SetPhantom(P* parameter,
-                            typename WeakCallbackData<T, P>::Callback callback);
+                            typename PhantomCallbackData<P>::Callback callback);
 
-  template <typename S, typename P>
-  V8_INLINE void SetPhantom(P* parameter,
-                            typename WeakCallbackData<S, P>::Callback callback);
+  template <typename P, typename Q>
+  V8_INLINE void SetPhantom(
+      void (*callback)(const InternalFieldsCallbackData<P, Q>&),
+      int internal_field_index1, int internal_field_index2);
 
   template<typename P>
   V8_INLINE P* ClearWeak();
@@ -1011,7 +1057,7 @@ class V8_EXPORT Script {
   /**
    * Runs the script returning the resulting value. It will be run in the
    * context in which it was created (ScriptCompiler::CompileBound or
-   * UnboundScript::BindToGlobalContext()).
+   * UnboundScript::BindToCurrentContext()).
    */
   Local<Value> Run();
 
@@ -1045,7 +1091,11 @@ class V8_EXPORT ScriptCompiler {
       BufferOwned
     };
 
-    CachedData() : data(NULL), length(0), buffer_policy(BufferNotOwned) {}
+    CachedData()
+        : data(NULL),
+          length(0),
+          rejected(false),
+          buffer_policy(BufferNotOwned) {}
 
     // If buffer_policy is BufferNotOwned, the caller keeps the ownership of
     // data and guarantees that it stays alive until the CachedData object is
@@ -1058,6 +1108,7 @@ class V8_EXPORT ScriptCompiler {
     // which will be called when V8 no longer needs the data.
     const uint8_t* data;
     int length;
+    bool rejected;
     BufferPolicy buffer_policy;
 
    private:
@@ -1238,6 +1289,26 @@ class V8_EXPORT ScriptCompiler {
   static Local<Script> Compile(Isolate* isolate, StreamedSource* source,
                                Handle<String> full_source_string,
                                const ScriptOrigin& origin);
+
+  /**
+   * Return a version tag for CachedData for the current V8 version & flags.
+   *
+   * This value is meant only for determining whether a previously generated
+   * CachedData instance is still valid; the tag has no other meaing.
+   *
+   * Background: The data carried by CachedData may depend on the exact
+   *   V8 version number or currently compiler flags. This means when
+   *   persisting CachedData, the embedder must take care to not pass in
+   *   data from another V8 version, or the same version with different
+   *   features enabled.
+   *
+   *   The easiest way to do so is to clear the embedder's cache on any
+   *   such change.
+   *
+   *   Alternatively, this tag can be stored alongside the cached data and
+   *   compared when it is being used.
+   */
+  static uint32_t CachedDataVersionTag();
 };
 
 
@@ -1797,6 +1868,15 @@ class V8_EXPORT Boolean : public Primitive {
  */
 class V8_EXPORT Name : public Primitive {
  public:
+  /**
+   * Returns the identity hash for this object. The current implementation
+   * uses an inline property on the object to store the identity hash.
+   *
+   * The return value will never be 0. Also, it is not guaranteed to be
+   * unique.
+   */
+  int GetIdentityHash();
+
   V8_INLINE static Name* Cast(v8::Value* obj);
  private:
   static void CheckCast(v8::Value* obj);
@@ -2458,6 +2538,8 @@ class V8_EXPORT Object : public Value {
   /** Gets the number of internal fields for this Object. */
   int InternalFieldCount();
 
+  static const int kNoInternalFieldIndex = -1;
+
   /** Same as above, but works for Persistents */
   V8_INLINE static int InternalFieldCount(
       const PersistentBase<Object>& object) {
@@ -3520,6 +3602,51 @@ typedef void (*NamedPropertyEnumeratorCallback)(
     const PropertyCallbackInfo<Array>& info);
 
 
+// TODO(dcarney): Deprecate and remove previous typedefs, and replace
+// GenericNamedPropertyFooCallback with just NamedPropertyFooCallback.
+/**
+ * GenericNamedProperty[Getter|Setter] are used as interceptors on object.
+ * See ObjectTemplate::SetNamedPropertyHandler.
+ */
+typedef void (*GenericNamedPropertyGetterCallback)(
+    Local<Name> property, const PropertyCallbackInfo<Value>& info);
+
+
+/**
+ * Returns the value if the setter intercepts the request.
+ * Otherwise, returns an empty handle.
+ */
+typedef void (*GenericNamedPropertySetterCallback)(
+    Local<Name> property, Local<Value> value,
+    const PropertyCallbackInfo<Value>& info);
+
+
+/**
+ * Returns a non-empty handle if the interceptor intercepts the request.
+ * The result is an integer encoding property attributes (like v8::None,
+ * v8::DontEnum, etc.)
+ */
+typedef void (*GenericNamedPropertyQueryCallback)(
+    Local<Name> property, const PropertyCallbackInfo<Integer>& info);
+
+
+/**
+ * Returns a non-empty handle if the deleter intercepts the request.
+ * The return value is true if the property could be deleted and false
+ * otherwise.
+ */
+typedef void (*GenericNamedPropertyDeleterCallback)(
+    Local<Name> property, const PropertyCallbackInfo<Boolean>& info);
+
+
+/**
+ * Returns an array containing the names of the properties the named
+ * property getter intercepts.
+ */
+typedef void (*GenericNamedPropertyEnumeratorCallback)(
+    const PropertyCallbackInfo<Array>& info);
+
+
 /**
  * Returns the value of the property if the getter intercepts the
  * request.  Otherwise, returns an empty handle.
@@ -3772,6 +3899,56 @@ class V8_EXPORT FunctionTemplate : public Template {
 };
 
 
+struct NamedPropertyHandlerConfiguration {
+  NamedPropertyHandlerConfiguration(
+      /** Note: getter is required **/
+      GenericNamedPropertyGetterCallback getter = 0,
+      GenericNamedPropertySetterCallback setter = 0,
+      GenericNamedPropertyQueryCallback query = 0,
+      GenericNamedPropertyDeleterCallback deleter = 0,
+      GenericNamedPropertyEnumeratorCallback enumerator = 0,
+      Handle<Value> data = Handle<Value>())
+      : getter(getter),
+        setter(setter),
+        query(query),
+        deleter(deleter),
+        enumerator(enumerator),
+        data(data) {}
+
+  GenericNamedPropertyGetterCallback getter;
+  GenericNamedPropertySetterCallback setter;
+  GenericNamedPropertyQueryCallback query;
+  GenericNamedPropertyDeleterCallback deleter;
+  GenericNamedPropertyEnumeratorCallback enumerator;
+  Handle<Value> data;
+};
+
+
+struct IndexedPropertyHandlerConfiguration {
+  IndexedPropertyHandlerConfiguration(
+      /** Note: getter is required **/
+      IndexedPropertyGetterCallback getter = 0,
+      IndexedPropertySetterCallback setter = 0,
+      IndexedPropertyQueryCallback query = 0,
+      IndexedPropertyDeleterCallback deleter = 0,
+      IndexedPropertyEnumeratorCallback enumerator = 0,
+      Handle<Value> data = Handle<Value>())
+      : getter(getter),
+        setter(setter),
+        query(query),
+        deleter(deleter),
+        enumerator(enumerator),
+        data(data) {}
+
+  IndexedPropertyGetterCallback getter;
+  IndexedPropertySetterCallback setter;
+  IndexedPropertyQueryCallback query;
+  IndexedPropertyDeleterCallback deleter;
+  IndexedPropertyEnumeratorCallback enumerator;
+  Handle<Value> data;
+};
+
+
 /**
  * An ObjectTemplate is used to create objects at runtime.
  *
@@ -3841,6 +4018,9 @@ class V8_EXPORT ObjectTemplate : public Template {
    * from this object template, the provided callback is invoked instead of
    * accessing the property directly on the JavaScript object.
    *
+   * Note that new code should use the second version that can intercept
+   * symbol-named properties as well as string-named properties.
+   *
    * \param getter The callback to invoke when getting a property.
    * \param setter The callback to invoke when setting a property.
    * \param query The callback to invoke to check if a property is present,
@@ -3851,6 +4031,7 @@ class V8_EXPORT ObjectTemplate : public Template {
    * \param data A piece of data that will be passed to the callbacks
    *   whenever they are invoked.
    */
+  // TODO(dcarney): deprecate
   void SetNamedPropertyHandler(
       NamedPropertyGetterCallback getter,
       NamedPropertySetterCallback setter = 0,
@@ -3858,6 +4039,7 @@ class V8_EXPORT ObjectTemplate : public Template {
       NamedPropertyDeleterCallback deleter = 0,
       NamedPropertyEnumeratorCallback enumerator = 0,
       Handle<Value> data = Handle<Value>());
+  void SetHandler(const NamedPropertyHandlerConfiguration& configuration);
 
   /**
    * Sets an indexed property handler on the object template.
@@ -3875,14 +4057,18 @@ class V8_EXPORT ObjectTemplate : public Template {
    * \param data A piece of data that will be passed to the callbacks
    *   whenever they are invoked.
    */
+  void SetHandler(const IndexedPropertyHandlerConfiguration& configuration);
+  // TODO(dcarney): deprecate
   void SetIndexedPropertyHandler(
       IndexedPropertyGetterCallback getter,
       IndexedPropertySetterCallback setter = 0,
       IndexedPropertyQueryCallback query = 0,
       IndexedPropertyDeleterCallback deleter = 0,
       IndexedPropertyEnumeratorCallback enumerator = 0,
-      Handle<Value> data = Handle<Value>());
-
+      Handle<Value> data = Handle<Value>()) {
+    SetHandler(IndexedPropertyHandlerConfiguration(getter, setter, query,
+                                                   deleter, enumerator, data));
+  }
   /**
    * Sets the callback to be used when calling instances created from
    * this template as a function.  If no callback is set, instances
@@ -4188,9 +4374,17 @@ class V8_EXPORT Exception {
   static Local<Value> TypeError(Handle<String> message);
   static Local<Value> Error(Handle<String> message);
 
-  static Local<Message> GetMessage(Handle<Value> exception);
+  /**
+   * Creates an error message for the given exception.
+   * Will try to reconstruct the original stack trace from the exception value,
+   * or capture the current stack trace if not available.
+   */
+  static Local<Message> CreateMessage(Handle<Value> exception);
 
-  // DEPRECATED. Use GetMessage()->GetStackTrace()
+  /**
+   * Returns the original stack trace that was captured at the creation time
+   * of a given exception, or an empty handle if not available.
+   */
   static Local<StackTrace> GetStackTrace(Handle<Value> exception);
 };
 
@@ -4207,18 +4401,19 @@ typedef void* (*CreateHistogramCallback)(const char* name,
 typedef void (*AddHistogramSampleCallback)(void* histogram, int sample);
 
 // --- Memory Allocation Callback ---
-  enum ObjectSpace {
-    kObjectSpaceNewSpace = 1 << 0,
-    kObjectSpaceOldPointerSpace = 1 << 1,
-    kObjectSpaceOldDataSpace = 1 << 2,
-    kObjectSpaceCodeSpace = 1 << 3,
-    kObjectSpaceMapSpace = 1 << 4,
-    kObjectSpaceLoSpace = 1 << 5,
-
-    kObjectSpaceAll = kObjectSpaceNewSpace | kObjectSpaceOldPointerSpace |
-      kObjectSpaceOldDataSpace | kObjectSpaceCodeSpace | kObjectSpaceMapSpace |
-      kObjectSpaceLoSpace
-  };
+enum ObjectSpace {
+  kObjectSpaceNewSpace = 1 << 0,
+  kObjectSpaceOldPointerSpace = 1 << 1,
+  kObjectSpaceOldDataSpace = 1 << 2,
+  kObjectSpaceCodeSpace = 1 << 3,
+  kObjectSpaceMapSpace = 1 << 4,
+  kObjectSpaceCellSpace = 1 << 5,
+  kObjectSpacePropertyCellSpace = 1 << 6,
+  kObjectSpaceLoSpace = 1 << 7,
+  kObjectSpaceAll = kObjectSpaceNewSpace | kObjectSpaceOldPointerSpace |
+                    kObjectSpaceOldDataSpace | kObjectSpaceCodeSpace |
+                    kObjectSpaceMapSpace | kObjectSpaceLoSpace
+};
 
   enum AllocationAction {
     kAllocationActionAllocate = 1 << 0,
@@ -4252,7 +4447,7 @@ class PromiseRejectMessage {
   V8_INLINE PromiseRejectEvent GetEvent() const { return event_; }
   V8_INLINE Handle<Value> GetValue() const { return value_; }
 
-  // DEPRECATED. Use v8::Exception::GetMessage(GetValue())->GetStackTrace()
+  // DEPRECATED. Use v8::Exception::CreateMessage(GetValue())->GetStackTrace()
   V8_INLINE Handle<StackTrace> GetStackTrace() const { return stack_trace_; }
 
  private:
@@ -4617,6 +4812,8 @@ class V8_EXPORT Isolate {
   /**
    * Returns the entered isolate for the current thread or NULL in
    * case there is no current isolate.
+   *
+   * This method must not be invoked before V8::Initialize() was invoked.
    */
   static Isolate* GetCurrent();
 
@@ -4854,8 +5051,7 @@ class V8_EXPORT Isolate {
    * Request V8 to interrupt long running JavaScript code and invoke
    * the given |callback| passing the given |data| to it. After |callback|
    * returns control will be returned to the JavaScript code.
-   * At any given moment V8 can remember only a single callback for the very
-   * last interrupt request.
+   * There may be a number of interrupt requests in flight.
    * Can be called from another thread without acquiring a |Locker|.
    * Registered |callback| must not reenter interrupted Isolate.
    */
@@ -4865,7 +5061,8 @@ class V8_EXPORT Isolate {
    * Clear interrupt request created by |RequestInterrupt|.
    * Can be called from another thread without acquiring a |Locker|.
    */
-  void ClearInterrupt();
+  V8_DEPRECATED("There's no way to clear interrupts in flight.",
+                void ClearInterrupt());
 
   /**
    * Request garbage collection in this Isolate. It is only valid to call this
@@ -4954,17 +5151,23 @@ class V8_EXPORT Isolate {
 
   /**
    * Optional notification that the embedder is idle.
-   * V8 uses the notification to reduce memory footprint.
+   * V8 uses the notification to perform garbage collection.
    * This call can be used repeatedly if the embedder remains idle.
    * Returns true if the embedder should stop calling IdleNotification
    * until real work has been done.  This indicates that V8 has done
    * as much cleanup as it will be able to do.
    *
-   * The idle_time_in_ms argument specifies the time V8 has to do reduce
-   * the memory footprint. There is no guarantee that the actual work will be
+   * The idle_time_in_ms argument specifies the time V8 has to perform
+   * garbage collection. There is no guarantee that the actual work will be
    * done within the time limit.
+   * The deadline_in_seconds argument specifies the deadline V8 has to finish
+   * garbage collection work. deadline_in_seconds is compared with
+   * MonotonicallyIncreasingTime() and should be based on the same timebase as
+   * that function. There is no guarantee that the actual work will be done
+   * within the time limit.
    */
   bool IdleNotification(int idle_time_in_ms);
+  bool IdleNotificationDeadline(double deadline_in_seconds);
 
   /**
    * Optional notification that the system is running low on memory.
@@ -4977,8 +5180,11 @@ class V8_EXPORT Isolate {
    * these notifications to guide the GC heuristic. Returns the number
    * of context disposals - including this one - since the last time
    * V8 had a chance to clean up.
+   *
+   * The optional parameter |dependant_context| specifies whether the disposed
+   * context was depending on state from other contexts or not.
    */
-  int ContextDisposedNotification();
+  int ContextDisposedNotification(bool dependant_context = true);
 
   /**
    * Allows the host application to provide the address of a function that is
@@ -5127,43 +5333,12 @@ class V8_EXPORT Isolate {
 
 class V8_EXPORT StartupData {
  public:
-  enum CompressionAlgorithm {
-    kUncompressed,
-    kBZip2
-  };
-
   const char* data;
-  int compressed_size;
   int raw_size;
 };
 
 
 /**
- * A helper class for driving V8 startup data decompression.  It is based on
- * "CompressedStartupData" API functions from the V8 class.  It isn't mandatory
- * for an embedder to use this class, instead, API functions can be used
- * directly.
- *
- * For an example of the class usage, see the "shell.cc" sample application.
- */
-class V8_EXPORT StartupDataDecompressor {  // NOLINT
- public:
-  StartupDataDecompressor();
-  virtual ~StartupDataDecompressor();
-  int Decompress();
-
- protected:
-  virtual int DecompressData(char* raw_data,
-                             int* raw_data_size,
-                             const char* compressed_data,
-                             int compressed_data_size) = 0;
-
- private:
-  char** raw_data;
-};
-
-
-/**
  * EntropySource is used as a callback function when v8 needs a source
  * of entropy.
  */
@@ -5220,30 +5395,6 @@ class V8_EXPORT V8 {
   V8_INLINE static bool IsDead();
 
   /**
-   * The following 4 functions are to be used when V8 is built with
-   * the 'compress_startup_data' flag enabled. In this case, the
-   * embedder must decompress startup data prior to initializing V8.
-   *
-   * This is how interaction with V8 should look like:
-   *   int compressed_data_count = v8::V8::GetCompressedStartupDataCount();
-   *   v8::StartupData* compressed_data =
-   *     new v8::StartupData[compressed_data_count];
-   *   v8::V8::GetCompressedStartupData(compressed_data);
-   *   ... decompress data (compressed_data can be updated in-place) ...
-   *   v8::V8::SetDecompressedStartupData(compressed_data);
-   *   ... now V8 can be initialized
-   *   ... make sure the decompressed data stays valid until V8 shutdown
-   *
-   * A helper class StartupDataDecompressor is provided. It implements
-   * the protocol of the interaction described above, and can be used in
-   * most cases instead of calling these API functions directly.
-   */
-  static StartupData::CompressionAlgorithm GetCompressedStartupDataAlgorithm();
-  static int GetCompressedStartupDataCount();
-  static void GetCompressedStartupData(StartupData* compressed_data);
-  static void SetDecompressedStartupData(StartupData* decompressed_data);
-
-  /**
    * Hand startup data to V8, in case the embedder has chosen to build
    * V8 with external startup data.
    *
@@ -5262,6 +5413,13 @@ class V8_EXPORT V8 {
   static void SetSnapshotDataBlob(StartupData* startup_blob);
 
   /**
+   * Create a new isolate and context for the purpose of capturing a snapshot
+   * Returns { NULL, 0 } on failure.
+   * The caller owns the data array in the return value.
+   */
+  static StartupData CreateSnapshotDataBlob();
+
+  /**
    * Adds a message listener.
    *
    * The same message listener can be added more than once and in that
@@ -5509,7 +5667,14 @@ class V8_EXPORT V8 {
   static void DisposeGlobal(internal::Object** global_handle);
   typedef WeakCallbackData<Value, void>::Callback WeakCallback;
   static void MakeWeak(internal::Object** global_handle, void* data,
-                       WeakCallback weak_callback, WeakHandleType phantom);
+                       WeakCallback weak_callback);
+  static void MakePhantom(internal::Object** global_handle, void* data,
+                          PhantomCallbackData<void>::Callback weak_callback);
+  static void MakePhantom(
+      internal::Object** global_handle,
+      InternalFieldsCallbackData<void, void>::Callback weak_callback,
+      int internal_field_index1,
+      int internal_field_index2 = Object::kNoInternalFieldIndex);
   static void* ClearWeak(internal::Object** global_handle);
   static void Eternalize(Isolate* isolate,
                          Value* handle,
@@ -6118,12 +6283,12 @@ class Internals {
 
   static const int kNodeClassIdOffset = 1 * kApiPointerSize;
   static const int kNodeFlagsOffset = 1 * kApiPointerSize + 3;
-  static const int kNodeStateMask = 0xf;
+  static const int kNodeStateMask = 0x7;
   static const int kNodeStateIsWeakValue = 2;
   static const int kNodeStateIsPendingValue = 3;
   static const int kNodeStateIsNearDeathValue = 4;
-  static const int kNodeIsIndependentShift = 4;
-  static const int kNodeIsPartiallyDependentShift = 5;
+  static const int kNodeIsIndependentShift = 3;
+  static const int kNodeIsPartiallyDependentShift = 4;
 
   static const int kJSObjectType = 0xbd;
   static const int kFirstNonstringType = 0x80;
@@ -6381,7 +6546,7 @@ void PersistentBase<T>::SetWeak(
   TYPE_CHECK(S, T);
   typedef typename WeakCallbackData<Value, void>::Callback Callback;
   V8::MakeWeak(reinterpret_cast<internal::Object**>(this->val_), parameter,
-               reinterpret_cast<Callback>(callback), V8::NonphantomHandle);
+               reinterpret_cast<Callback>(callback));
 }
 
 
@@ -6395,21 +6560,24 @@ void PersistentBase<T>::SetWeak(
 
 
 template <class T>
-template <typename S, typename P>
+template <typename P>
 void PersistentBase<T>::SetPhantom(
-    P* parameter, typename WeakCallbackData<S, P>::Callback callback) {
-  TYPE_CHECK(S, T);
-  typedef typename WeakCallbackData<Value, void>::Callback Callback;
-  V8::MakeWeak(reinterpret_cast<internal::Object**>(this->val_), parameter,
-               reinterpret_cast<Callback>(callback), V8::PhantomHandle);
+    P* parameter, typename PhantomCallbackData<P>::Callback callback) {
+  typedef typename PhantomCallbackData<void>::Callback Callback;
+  V8::MakePhantom(reinterpret_cast<internal::Object**>(this->val_), parameter,
+                  reinterpret_cast<Callback>(callback));
 }
 
 
 template <class T>
-template <typename P>
+template <typename U, typename V>
 void PersistentBase<T>::SetPhantom(
-    P* parameter, typename WeakCallbackData<T, P>::Callback callback) {
-  SetPhantom<T, P>(parameter, callback);
+    void (*callback)(const InternalFieldsCallbackData<U, V>&),
+    int internal_field_index1, int internal_field_index2) {
+  typedef typename InternalFieldsCallbackData<void, void>::Callback Callback;
+  V8::MakePhantom(reinterpret_cast<internal::Object**>(this->val_),
+                  reinterpret_cast<Callback>(callback), internal_field_index1,
+                  internal_field_index2);
 }
 
 
index 721ef37..d1ca22c 100644 (file)
 // -----------------------------------------------------------------------------
 // Compiler detection
 //
-//  V8_CC_CLANG   - Clang
-//  V8_CC_GNU     - GNU C++
+//  V8_CC_GNU     - GCC, or clang in gcc mode
 //  V8_CC_INTEL   - Intel C++
 //  V8_CC_MINGW   - Minimalist GNU for Windows
 //  V8_CC_MINGW32 - Minimalist GNU for Windows (mingw32)
 //  V8_CC_MINGW64 - Minimalist GNU for Windows (mingw-w64)
-//  V8_CC_MSVC    - Microsoft Visual C/C++
+//  V8_CC_MSVC    - Microsoft Visual C/C++, or clang in cl.exe mode
 //
 // C++11 feature detection
 //
 
 #if defined(__clang__)
 
-# define V8_CC_CLANG 1
+#if defined(__GNUC__)  // Clang in gcc mode.
+# define V8_CC_GNU 1
+#elif defined(_MSC_VER)  // Clang in cl mode.
+# define V8_CC_MSVC 1
+#endif
 
 // Clang defines __alignof__ as alias for __alignof
 # define V8_HAS___ALIGNOF 1
diff --git a/deps/v8/samples/lineprocessor.cc b/deps/v8/samples/lineprocessor.cc
deleted file mode 100644 (file)
index 69bfab4..0000000
+++ /dev/null
@@ -1,385 +0,0 @@
-// Copyright 2012 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include <include/v8.h>
-
-#include <include/libplatform/libplatform.h>
-#include <include/v8-debug.h>
-
-#include <fcntl.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-
-/**
- * This sample program should demonstrate certain aspects of debugging
- * standalone V8-based application.
- *
- * The program reads input stream, processes it line by line and print
- * the result to output. The actual processing is done by custom JavaScript
- * script. The script is specified with command line parameters.
- *
- * The main cycle of the program will sequentially read lines from standard
- * input, process them and print to standard output until input closes.
- * There are 2 possible configuration in regard to main cycle.
- *
- * 1. The main cycle is on C++ side. Program should be run with
- * --main-cycle-in-cpp option. Script must declare a function named
- * "ProcessLine". The main cycle in C++ reads lines and calls this function
- * for processing every time. This is a sample script:
-
-function ProcessLine(input_line) {
-  return ">>>" + input_line + "<<<";
-}
-
- *
- * 2. The main cycle is in JavaScript. Program should be run with
- * --main-cycle-in-js option. Script gets run one time at all and gets
- * API of 2 global functions: "read_line" and "print". It should read input
- * and print converted lines to output itself. This a sample script:
-
-while (true) {
-  var line = read_line();
-  if (!line) {
-    break;
-  }
-  var res = line + " | " + line;
-  print(res);
-}
- */
-
-enum MainCycleType {
-  CycleInCpp,
-  CycleInJs
-};
-
-const char* ToCString(const v8::String::Utf8Value& value);
-void ReportException(v8::Isolate* isolate, v8::TryCatch* handler);
-v8::Handle<v8::String> ReadFile(v8::Isolate* isolate, const char* name);
-v8::Handle<v8::String> ReadLine();
-
-void Print(const v8::FunctionCallbackInfo<v8::Value>& args);
-void ReadLine(const v8::FunctionCallbackInfo<v8::Value>& args);
-bool RunCppCycle(v8::Handle<v8::Script> script,
-                 v8::Local<v8::Context> context,
-                 bool report_exceptions);
-
-
-v8::Persistent<v8::Context> debug_message_context;
-
-int RunMain(int argc, char* argv[]) {
-  v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
-  v8::Isolate* isolate = v8::Isolate::New();
-  v8::Isolate::Scope isolate_scope(isolate);
-  v8::HandleScope handle_scope(isolate);
-
-  v8::Handle<v8::String> script_source;
-  v8::Handle<v8::Value> script_name;
-  int script_param_counter = 0;
-
-  MainCycleType cycle_type = CycleInCpp;
-
-  for (int i = 1; i < argc; i++) {
-    const char* str = argv[i];
-    if (strcmp(str, "-f") == 0) {
-      // Ignore any -f flags for compatibility with the other stand-
-      // alone JavaScript engines.
-      continue;
-    } else if (strcmp(str, "--main-cycle-in-cpp") == 0) {
-      cycle_type = CycleInCpp;
-    } else if (strcmp(str, "--main-cycle-in-js") == 0) {
-      cycle_type = CycleInJs;
-    } else if (strncmp(str, "--", 2) == 0) {
-      printf("Warning: unknown flag %s.\nTry --help for options\n", str);
-    } else if (strcmp(str, "-e") == 0 && i + 1 < argc) {
-      script_source = v8::String::NewFromUtf8(isolate, argv[i + 1]);
-      script_name = v8::String::NewFromUtf8(isolate, "unnamed");
-      i++;
-      script_param_counter++;
-    } else {
-      // Use argument as a name of file to load.
-      script_source = ReadFile(isolate, str);
-      script_name = v8::String::NewFromUtf8(isolate, str);
-      if (script_source.IsEmpty()) {
-        printf("Error reading '%s'\n", str);
-        return 1;
-      }
-      script_param_counter++;
-    }
-  }
-
-  if (script_param_counter == 0) {
-    printf("Script is not specified\n");
-    return 1;
-  }
-  if (script_param_counter != 1) {
-    printf("Only one script may be specified\n");
-    return 1;
-  }
-
-  // Create a template for the global object.
-  v8::Handle<v8::ObjectTemplate> global = v8::ObjectTemplate::New(isolate);
-
-  // Bind the global 'print' function to the C++ Print callback.
-  global->Set(v8::String::NewFromUtf8(isolate, "print"),
-              v8::FunctionTemplate::New(isolate, Print));
-
-  if (cycle_type == CycleInJs) {
-    // Bind the global 'read_line' function to the C++ Print callback.
-    global->Set(v8::String::NewFromUtf8(isolate, "read_line"),
-                v8::FunctionTemplate::New(isolate, ReadLine));
-  }
-
-  // Create a new execution environment containing the built-in
-  // functions
-  v8::Handle<v8::Context> context = v8::Context::New(isolate, NULL, global);
-  // Enter the newly created execution environment.
-  v8::Context::Scope context_scope(context);
-
-  debug_message_context.Reset(isolate, context);
-
-  bool report_exceptions = true;
-
-  v8::Handle<v8::Script> script;
-  {
-    // Compile script in try/catch context.
-    v8::TryCatch try_catch;
-    v8::ScriptOrigin origin(script_name);
-    script = v8::Script::Compile(script_source, &origin);
-    if (script.IsEmpty()) {
-      // Print errors that happened during compilation.
-      if (report_exceptions)
-        ReportException(isolate, &try_catch);
-      return 1;
-    }
-  }
-
-  {
-    v8::TryCatch try_catch;
-
-    script->Run();
-    if (try_catch.HasCaught()) {
-      if (report_exceptions)
-        ReportException(isolate, &try_catch);
-      return 1;
-    }
-  }
-
-  if (cycle_type == CycleInCpp) {
-    bool res = RunCppCycle(script,
-                           isolate->GetCurrentContext(),
-                           report_exceptions);
-    return !res;
-  } else {
-    // All is already done.
-  }
-  return 0;
-}
-
-
-bool RunCppCycle(v8::Handle<v8::Script> script,
-                 v8::Local<v8::Context> context,
-                 bool report_exceptions) {
-  v8::Isolate* isolate = context->GetIsolate();
-
-  v8::Handle<v8::String> fun_name =
-      v8::String::NewFromUtf8(isolate, "ProcessLine");
-  v8::Handle<v8::Value> process_val = context->Global()->Get(fun_name);
-
-  // If there is no Process function, or if it is not a function,
-  // bail out
-  if (!process_val->IsFunction()) {
-    printf("Error: Script does not declare 'ProcessLine' global function.\n");
-    return 1;
-  }
-
-  // It is a function; cast it to a Function
-  v8::Handle<v8::Function> process_fun =
-      v8::Handle<v8::Function>::Cast(process_val);
-
-
-  while (!feof(stdin)) {
-    v8::HandleScope handle_scope(isolate);
-
-    v8::Handle<v8::String> input_line = ReadLine();
-    if (input_line == v8::Undefined(isolate)) {
-      continue;
-    }
-
-    const int argc = 1;
-    v8::Handle<v8::Value> argv[argc] = { input_line };
-
-    v8::Handle<v8::Value> result;
-    {
-      v8::TryCatch try_catch;
-      result = process_fun->Call(isolate->GetCurrentContext()->Global(),
-                                 argc, argv);
-      if (try_catch.HasCaught()) {
-        if (report_exceptions)
-          ReportException(isolate, &try_catch);
-        return false;
-      }
-    }
-    v8::String::Utf8Value str(result);
-    const char* cstr = ToCString(str);
-    printf("%s\n", cstr);
-  }
-
-  return true;
-}
-
-
-int main(int argc, char* argv[]) {
-  v8::V8::InitializeICU();
-  v8::Platform* platform = v8::platform::CreateDefaultPlatform();
-  v8::V8::InitializePlatform(platform);
-  v8::V8::Initialize();
-  int result = RunMain(argc, argv);
-  v8::V8::Dispose();
-  v8::V8::ShutdownPlatform();
-  delete platform;
-  return result;
-}
-
-
-// Extracts a C string from a V8 Utf8Value.
-const char* ToCString(const v8::String::Utf8Value& value) {
-  return *value ? *value : "<string conversion failed>";
-}
-
-
-// Reads a file into a v8 string.
-v8::Handle<v8::String> ReadFile(v8::Isolate* isolate, const char* name) {
-  FILE* file = fopen(name, "rb");
-  if (file == NULL) return v8::Handle<v8::String>();
-
-  fseek(file, 0, SEEK_END);
-  int size = ftell(file);
-  rewind(file);
-
-  char* chars = new char[size + 1];
-  chars[size] = '\0';
-  for (int i = 0; i < size;) {
-    int read = static_cast<int>(fread(&chars[i], 1, size - i, file));
-    i += read;
-  }
-  fclose(file);
-  v8::Handle<v8::String> result =
-      v8::String::NewFromUtf8(isolate, chars, v8::String::kNormalString, size);
-  delete[] chars;
-  return result;
-}
-
-
-void ReportException(v8::Isolate* isolate, v8::TryCatch* try_catch) {
-  v8::HandleScope handle_scope(isolate);
-  v8::String::Utf8Value exception(try_catch->Exception());
-  const char* exception_string = ToCString(exception);
-  v8::Handle<v8::Message> message = try_catch->Message();
-  if (message.IsEmpty()) {
-    // V8 didn't provide any extra information about this error; just
-    // print the exception.
-    printf("%s\n", exception_string);
-  } else {
-    // Print (filename):(line number): (message).
-    v8::String::Utf8Value filename(message->GetScriptOrigin().ResourceName());
-    const char* filename_string = ToCString(filename);
-    int linenum = message->GetLineNumber();
-    printf("%s:%i: %s\n", filename_string, linenum, exception_string);
-    // Print line of source code.
-    v8::String::Utf8Value sourceline(message->GetSourceLine());
-    const char* sourceline_string = ToCString(sourceline);
-    printf("%s\n", sourceline_string);
-    // Print wavy underline (GetUnderline is deprecated).
-    int start = message->GetStartColumn();
-    for (int i = 0; i < start; i++) {
-      printf(" ");
-    }
-    int end = message->GetEndColumn();
-    for (int i = start; i < end; i++) {
-      printf("^");
-    }
-    printf("\n");
-  }
-}
-
-
-// The callback that is invoked by v8 whenever the JavaScript 'print'
-// function is called.  Prints its arguments on stdout separated by
-// spaces and ending with a newline.
-void Print(const v8::FunctionCallbackInfo<v8::Value>& args) {
-  bool first = true;
-  for (int i = 0; i < args.Length(); i++) {
-    v8::HandleScope handle_scope(args.GetIsolate());
-    if (first) {
-      first = false;
-    } else {
-      printf(" ");
-    }
-    v8::String::Utf8Value str(args[i]);
-    const char* cstr = ToCString(str);
-    printf("%s", cstr);
-  }
-  printf("\n");
-  fflush(stdout);
-}
-
-
-// The callback that is invoked by v8 whenever the JavaScript 'read_line'
-// function is called. Reads a string from standard input and returns.
-void ReadLine(const v8::FunctionCallbackInfo<v8::Value>& args) {
-  if (args.Length() > 0) {
-    args.GetIsolate()->ThrowException(
-        v8::String::NewFromUtf8(args.GetIsolate(), "Unexpected arguments"));
-    return;
-  }
-  args.GetReturnValue().Set(ReadLine());
-}
-
-
-v8::Handle<v8::String> ReadLine() {
-  const int kBufferSize = 1024 + 1;
-  char buffer[kBufferSize];
-
-  char* res;
-  {
-    res = fgets(buffer, kBufferSize, stdin);
-  }
-  v8::Isolate* isolate = v8::Isolate::GetCurrent();
-  if (res == NULL) {
-    v8::Handle<v8::Primitive> t = v8::Undefined(isolate);
-    return v8::Handle<v8::String>::Cast(t);
-  }
-  // Remove newline char
-  for (char* pos = buffer; *pos != '\0'; pos++) {
-    if (*pos == '\n') {
-      *pos = '\0';
-      break;
-    }
-  }
-  return v8::String::NewFromUtf8(isolate, buffer);
-}
index e5c9b7a..f447970 100644 (file)
 #include <map>
 #include <string>
 
-#ifdef COMPRESS_STARTUP_DATA_BZ2
-#error Using compressed startup data is not supported for this sample
-#endif
-
 using namespace std;
 using namespace v8;
 
@@ -116,10 +112,8 @@ class JsHttpRequestProcessor : public HttpRequestProcessor {
                            const PropertyCallbackInfo<Value>& info);
 
   // Callbacks that access maps
-  static void MapGet(Local<String> name,
-                     const PropertyCallbackInfo<Value>& info);
-  static void MapSet(Local<String> name,
-                     Local<Value> value,
+  static void MapGet(Local<Name> name, const PropertyCallbackInfo<Value>& info);
+  static void MapSet(Local<Name> name, Local<Value> value,
                      const PropertyCallbackInfo<Value>& info);
 
   // Utility methods for wrapping C++ objects as JavaScript objects,
@@ -359,13 +353,15 @@ string ObjectToString(Local<Value> value) {
 }
 
 
-void JsHttpRequestProcessor::MapGet(Local<String> name,
+void JsHttpRequestProcessor::MapGet(Local<Name> name,
                                     const PropertyCallbackInfo<Value>& info) {
+  if (name->IsSymbol()) return;
+
   // Fetch the map wrapped by this object.
   map<string, string>* obj = UnwrapMap(info.Holder());
 
   // Convert the JavaScript string to a std::string.
-  string key = ObjectToString(name);
+  string key = ObjectToString(Local<String>::Cast(name));
 
   // Look up the value if it exists using the standard STL ideom.
   map<string, string>::iterator iter = obj->find(key);
@@ -381,14 +377,15 @@ void JsHttpRequestProcessor::MapGet(Local<String> name,
 }
 
 
-void JsHttpRequestProcessor::MapSet(Local<String> name,
-                                    Local<Value> value_obj,
+void JsHttpRequestProcessor::MapSet(Local<Name> name, Local<Value> value_obj,
                                     const PropertyCallbackInfo<Value>& info) {
+  if (name->IsSymbol()) return;
+
   // Fetch the map wrapped by this object.
   map<string, string>* obj = UnwrapMap(info.Holder());
 
   // Convert the key and value to std::strings.
-  string key = ObjectToString(name);
+  string key = ObjectToString(Local<String>::Cast(name));
   string value = ObjectToString(value_obj);
 
   // Update the map.
@@ -405,7 +402,7 @@ Handle<ObjectTemplate> JsHttpRequestProcessor::MakeMapTemplate(
 
   Local<ObjectTemplate> result = ObjectTemplate::New(isolate);
   result->SetInternalFieldCount(1);
-  result->SetNamedPropertyHandler(MapGet, MapSet);
+  result->SetHandler(NamedPropertyHandlerConfiguration(MapGet, MapSet));
 
   // Again, return the result through the current handle scope.
   return handle_scope.Escape(result);
index 0c4c705..31e96f4 100644 (file)
         'process.cc',
       ],
     },
-    {
-      'target_name': 'lineprocessor',
-      'sources': [
-        'lineprocessor.cc',
-      ],
-    }
   ],
 }
index b66e8f7..1a08aff 100644 (file)
 #include <stdlib.h>
 #include <string.h>
 
-#ifdef COMPRESS_STARTUP_DATA_BZ2
-#error Using compressed startup data is not supported for this sample
-#endif
-
 /**
  * This sample program shows how to implement a simple javascript shell
  * based on V8.  This includes initializing V8 with command line options,
index ed9d294..6b7ec05 100644 (file)
@@ -181,7 +181,7 @@ void Accessors::ArgumentsIteratorSetter(
 
 Handle<AccessorInfo> Accessors::ArgumentsIteratorInfo(
     Isolate* isolate, PropertyAttributes attributes) {
-  Handle<Name> name(isolate->native_context()->iterator_symbol(), isolate);
+  Handle<Name> name = isolate->factory()->iterator_symbol();
   return MakeAccessor(isolate, name, &ArgumentsIteratorGetter,
                       &ArgumentsIteratorSetter, attributes);
 }
@@ -322,6 +322,98 @@ Handle<AccessorInfo> Accessors::StringLengthInfo(
 }
 
 
+template <typename Char>
+inline int CountRequiredEscapes(Handle<String> source) {
+  DisallowHeapAllocation no_gc;
+  int escapes = 0;
+  Vector<const Char> src = source->GetCharVector<Char>();
+  for (int i = 0; i < src.length(); i++) {
+    if (src[i] == '/' && (i == 0 || src[i - 1] != '\\')) escapes++;
+  }
+  return escapes;
+}
+
+
+template <typename Char, typename StringType>
+inline Handle<StringType> WriteEscapedRegExpSource(Handle<String> source,
+                                                   Handle<StringType> result) {
+  DisallowHeapAllocation no_gc;
+  Vector<const Char> src = source->GetCharVector<Char>();
+  Vector<Char> dst(result->GetChars(), result->length());
+  int s = 0;
+  int d = 0;
+  while (s < src.length()) {
+    if (src[s] == '/' && (s == 0 || src[s - 1] != '\\')) dst[d++] = '\\';
+    dst[d++] = src[s++];
+  }
+  DCHECK_EQ(result->length(), d);
+  return result;
+}
+
+
+MaybeHandle<String> EscapeRegExpSource(Isolate* isolate,
+                                       Handle<String> source) {
+  String::Flatten(source);
+  if (source->length() == 0) return isolate->factory()->query_colon_string();
+  bool one_byte = source->IsOneByteRepresentationUnderneath();
+  int escapes = one_byte ? CountRequiredEscapes<uint8_t>(source)
+                         : CountRequiredEscapes<uc16>(source);
+  if (escapes == 0) return source;
+  int length = source->length() + escapes;
+  if (one_byte) {
+    Handle<SeqOneByteString> result;
+    ASSIGN_RETURN_ON_EXCEPTION(isolate, result,
+                               isolate->factory()->NewRawOneByteString(length),
+                               String);
+    return WriteEscapedRegExpSource<uint8_t>(source, result);
+  } else {
+    Handle<SeqTwoByteString> result;
+    ASSIGN_RETURN_ON_EXCEPTION(isolate, result,
+                               isolate->factory()->NewRawTwoByteString(length),
+                               String);
+    return WriteEscapedRegExpSource<uc16>(source, result);
+  }
+}
+
+
+// Implements ECMA262 ES6 draft 21.2.5.9
+void Accessors::RegExpSourceGetter(
+    v8::Local<v8::Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(info.GetIsolate());
+  HandleScope scope(isolate);
+
+  Handle<Object> receiver =
+      Utils::OpenHandle(*v8::Local<v8::Value>(info.This()));
+  Handle<JSRegExp> regexp = Handle<JSRegExp>::cast(receiver);
+  Handle<String> result;
+  if (regexp->TypeTag() == JSRegExp::NOT_COMPILED) {
+    result = isolate->factory()->empty_string();
+  } else {
+    Handle<String> pattern(regexp->Pattern(), isolate);
+    MaybeHandle<String> maybe = EscapeRegExpSource(isolate, pattern);
+    if (!maybe.ToHandle(&result)) {
+      isolate->OptionalRescheduleException(false);
+      return;
+    }
+  }
+  info.GetReturnValue().Set(Utils::ToLocal(result));
+}
+
+
+void Accessors::RegExpSourceSetter(v8::Local<v8::Name> name,
+                                   v8::Local<v8::Value> value,
+                                   const v8::PropertyCallbackInfo<void>& info) {
+  UNREACHABLE();
+}
+
+
+Handle<AccessorInfo> Accessors::RegExpSourceInfo(
+    Isolate* isolate, PropertyAttributes attributes) {
+  return MakeAccessor(isolate, isolate->factory()->source_string(),
+                      &RegExpSourceGetter, &RegExpSourceSetter, attributes);
+}
+
+
 //
 // Accessors::ScriptColumnOffset
 //
index 678064d..0210d53 100644 (file)
@@ -21,6 +21,7 @@ namespace internal {
   V(FunctionName)                 \
   V(FunctionLength)               \
   V(FunctionPrototype)            \
+  V(RegExpSource)                 \
   V(ScriptColumnOffset)           \
   V(ScriptCompilationType)        \
   V(ScriptContextData)            \
index 75fc2d7..61e565f 100644 (file)
@@ -14,6 +14,7 @@
 #include "include/v8-testing.h"
 #include "src/assert-scope.h"
 #include "src/background-parsing-task.h"
+#include "src/base/functional.h"
 #include "src/base/platform/platform.h"
 #include "src/base/platform/time.h"
 #include "src/base/utils/random-number-generator.h"
@@ -195,149 +196,60 @@ static inline bool IsExecutionTerminatingCheck(i::Isolate* isolate) {
 }
 
 
-StartupDataDecompressor::StartupDataDecompressor()
-    : raw_data(i::NewArray<char*>(V8::GetCompressedStartupDataCount())) {
-  for (int i = 0; i < V8::GetCompressedStartupDataCount(); ++i) {
-    raw_data[i] = NULL;
-  }
+void V8::SetNativesDataBlob(StartupData* natives_blob) {
+  i::V8::SetNativesBlob(natives_blob);
 }
 
 
-StartupDataDecompressor::~StartupDataDecompressor() {
-  for (int i = 0; i < V8::GetCompressedStartupDataCount(); ++i) {
-    i::DeleteArray(raw_data[i]);
-  }
-  i::DeleteArray(raw_data);
+void V8::SetSnapshotDataBlob(StartupData* snapshot_blob) {
+  i::V8::SetSnapshotBlob(snapshot_blob);
 }
 
 
-int StartupDataDecompressor::Decompress() {
-  int compressed_data_count = V8::GetCompressedStartupDataCount();
-  StartupData* compressed_data =
-      i::NewArray<StartupData>(compressed_data_count);
-  V8::GetCompressedStartupData(compressed_data);
-  for (int i = 0; i < compressed_data_count; ++i) {
-    char* decompressed = raw_data[i] =
-        i::NewArray<char>(compressed_data[i].raw_size);
-    if (compressed_data[i].compressed_size != 0) {
-      int result = DecompressData(decompressed,
-                                  &compressed_data[i].raw_size,
-                                  compressed_data[i].data,
-                                  compressed_data[i].compressed_size);
-      if (result != 0) return result;
-    } else {
-      DCHECK_EQ(0, compressed_data[i].raw_size);
+StartupData V8::CreateSnapshotDataBlob() {
+  Isolate::CreateParams params;
+  params.enable_serializer = true;
+  Isolate* isolate = v8::Isolate::New(params);
+  StartupData result = {NULL, 0};
+  {
+    Isolate::Scope isolate_scope(isolate);
+    i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
+    Persistent<Context> context;
+    {
+      HandleScope handle_scope(isolate);
+      context.Reset(isolate, Context::New(isolate));
     }
-    compressed_data[i].data = decompressed;
-  }
-  V8::SetDecompressedStartupData(compressed_data);
-  i::DeleteArray(compressed_data);
-  return 0;
-}
-
-
-StartupData::CompressionAlgorithm V8::GetCompressedStartupDataAlgorithm() {
-#ifdef COMPRESS_STARTUP_DATA_BZ2
-  return StartupData::kBZip2;
-#else
-  return StartupData::kUncompressed;
-#endif
-}
-
-
-enum CompressedStartupDataItems {
-  kSnapshot = 0,
-  kSnapshotContext,
-  kLibraries,
-  kExperimentalLibraries,
-  kCompressedStartupDataCount
-};
-
-
-int V8::GetCompressedStartupDataCount() {
-#ifdef COMPRESS_STARTUP_DATA_BZ2
-  return kCompressedStartupDataCount;
-#else
-  return 0;
-#endif
-}
-
-
-void V8::GetCompressedStartupData(StartupData* compressed_data) {
-#ifdef COMPRESS_STARTUP_DATA_BZ2
-  compressed_data[kSnapshot].data =
-      reinterpret_cast<const char*>(i::Snapshot::data());
-  compressed_data[kSnapshot].compressed_size = i::Snapshot::size();
-  compressed_data[kSnapshot].raw_size = i::Snapshot::raw_size();
-
-  compressed_data[kSnapshotContext].data =
-      reinterpret_cast<const char*>(i::Snapshot::context_data());
-  compressed_data[kSnapshotContext].compressed_size =
-      i::Snapshot::context_size();
-  compressed_data[kSnapshotContext].raw_size = i::Snapshot::context_raw_size();
-
-  i::Vector<const i::byte> libraries_source = i::Natives::GetScriptsSource();
-  compressed_data[kLibraries].data =
-      reinterpret_cast<const char*>(libraries_source.start());
-  compressed_data[kLibraries].compressed_size = libraries_source.length();
-  compressed_data[kLibraries].raw_size = i::Natives::GetRawScriptsSize();
-
-  i::Vector<const i::byte> exp_libraries_source =
-      i::ExperimentalNatives::GetScriptsSource();
-  compressed_data[kExperimentalLibraries].data =
-      reinterpret_cast<const char*>(exp_libraries_source.start());
-  compressed_data[kExperimentalLibraries].compressed_size =
-      exp_libraries_source.length();
-  compressed_data[kExperimentalLibraries].raw_size =
-      i::ExperimentalNatives::GetRawScriptsSize();
-#endif
-}
-
-
-void V8::SetDecompressedStartupData(StartupData* decompressed_data) {
-#ifdef COMPRESS_STARTUP_DATA_BZ2
-  DCHECK_EQ(i::Snapshot::raw_size(), decompressed_data[kSnapshot].raw_size);
-  i::Snapshot::set_raw_data(
-      reinterpret_cast<const i::byte*>(decompressed_data[kSnapshot].data));
-
-  DCHECK_EQ(i::Snapshot::context_raw_size(),
-            decompressed_data[kSnapshotContext].raw_size);
-  i::Snapshot::set_context_raw_data(
-      reinterpret_cast<const i::byte*>(
-          decompressed_data[kSnapshotContext].data));
-
-  DCHECK_EQ(i::Natives::GetRawScriptsSize(),
-            decompressed_data[kLibraries].raw_size);
-  i::Vector<const char> libraries_source(
-      decompressed_data[kLibraries].data,
-      decompressed_data[kLibraries].raw_size);
-  i::Natives::SetRawScriptsSource(libraries_source);
-
-  DCHECK_EQ(i::ExperimentalNatives::GetRawScriptsSize(),
-            decompressed_data[kExperimentalLibraries].raw_size);
-  i::Vector<const char> exp_libraries_source(
-      decompressed_data[kExperimentalLibraries].data,
-      decompressed_data[kExperimentalLibraries].raw_size);
-  i::ExperimentalNatives::SetRawScriptsSource(exp_libraries_source);
-#endif
-}
+    if (!context.IsEmpty()) {
+      // Make sure all builtin scripts are cached.
+      {
+        HandleScope scope(isolate);
+        for (int i = 0; i < i::Natives::GetBuiltinsCount(); i++) {
+          internal_isolate->bootstrapper()->NativesSourceLookup(i);
+        }
+      }
+      // If we don't do this then we end up with a stray root pointing at the
+      // context even after we have disposed of the context.
+      internal_isolate->heap()->CollectAllAvailableGarbage("mksnapshot");
+      i::Object* raw_context = *v8::Utils::OpenPersistent(context);
+      context.Reset();
 
+      i::SnapshotByteSink snapshot_sink;
+      i::StartupSerializer ser(internal_isolate, &snapshot_sink);
+      ser.SerializeStrongReferences();
 
-void V8::SetNativesDataBlob(StartupData* natives_blob) {
-#ifdef V8_USE_EXTERNAL_STARTUP_DATA
-  i::SetNativesFromFile(natives_blob);
-#else
-  CHECK(false);
-#endif
-}
+      i::SnapshotByteSink context_sink;
+      i::PartialSerializer context_ser(internal_isolate, &ser, &context_sink);
+      context_ser.Serialize(&raw_context);
+      ser.SerializeWeakReferences();
 
+      i::SnapshotData sd(snapshot_sink, ser);
+      i::SnapshotData csd(context_sink, context_ser);
 
-void V8::SetSnapshotDataBlob(StartupData* snapshot_blob) {
-#ifdef V8_USE_EXTERNAL_STARTUP_DATA
-  i::SetSnapshotFromFile(snapshot_blob);
-#else
-  CHECK(false);
-#endif
+      result = i::Snapshot::CreateSnapshotBlob(sd.RawData(), csd.RawData());
+    }
+  }
+  isolate->Dispose();
+  return result;
 }
 
 
@@ -474,28 +386,40 @@ void SetResourceConstraints(i::Isolate* isolate,
 i::Object** V8::GlobalizeReference(i::Isolate* isolate, i::Object** obj) {
   LOG_API(isolate, "Persistent::New");
   i::Handle<i::Object> result = isolate->global_handles()->Create(*obj);
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
   (*obj)->ObjectVerify();
-#endif  // DEBUG
+#endif  // VERIFY_HEAP
   return result.location();
 }
 
 
 i::Object** V8::CopyPersistent(i::Object** obj) {
   i::Handle<i::Object> result = i::GlobalHandles::CopyGlobal(obj);
-#ifdef DEBUG
+#ifdef VERIFY_HEAP
   (*obj)->ObjectVerify();
-#endif  // DEBUG
+#endif  // VERIFY_HEAP
   return result.location();
 }
 
 
-void V8::MakeWeak(i::Object** object, void* parameters,
-                  WeakCallback weak_callback, V8::WeakHandleType weak_type) {
-  i::GlobalHandles::PhantomState phantom;
-  phantom = weak_type == V8::PhantomHandle ? i::GlobalHandles::Phantom
-                                           : i::GlobalHandles::Nonphantom;
-  i::GlobalHandles::MakeWeak(object, parameters, weak_callback, phantom);
+void V8::MakeWeak(i::Object** object, void* parameter,
+                  WeakCallback weak_callback) {
+  i::GlobalHandles::MakeWeak(object, parameter, weak_callback);
+}
+
+
+void V8::MakePhantom(i::Object** object, void* parameter,
+                     PhantomCallbackData<void>::Callback weak_callback) {
+  i::GlobalHandles::MakePhantom(object, parameter, weak_callback);
+}
+
+
+void V8::MakePhantom(
+    i::Object** object,
+    InternalFieldsCallbackData<void, void>::Callback weak_callback,
+    int internal_field_index1, int internal_field_index2) {
+  i::GlobalHandles::MakePhantom(object, weak_callback, internal_field_index1,
+                                internal_field_index2);
 }
 
 
@@ -890,6 +814,9 @@ Local<FunctionTemplate> FunctionTemplate::New(
     v8::Handle<Signature> signature,
     int length) {
   i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+  // Changes to the environment cannot be captured in the snapshot. Expect no
+  // function templates when the isolate is created for serialization.
+  DCHECK(!i_isolate->serializer_enabled());
   LOG_API(i_isolate, "FunctionTemplate::New");
   ENTER_V8(i_isolate);
   return FunctionTemplateNew(
@@ -1229,6 +1156,9 @@ Local<ObjectTemplate> ObjectTemplate::New() {
 Local<ObjectTemplate> ObjectTemplate::New(
     i::Isolate* isolate,
     v8::Handle<FunctionTemplate> constructor) {
+  // Changes to the environment cannot be captured in the snapshot. Expect no
+  // object templates when the isolate is created for serialization.
+  DCHECK(!isolate->serializer_enabled());
   LOG_API(isolate, "ObjectTemplate::New");
   ENTER_V8(isolate);
   i::Handle<i::Struct> struct_obj =
@@ -1374,19 +1304,20 @@ void ObjectTemplate::SetAccessor(v8::Handle<Name> name,
 }
 
 
-void ObjectTemplate::SetNamedPropertyHandler(
-    NamedPropertyGetterCallback getter,
-    NamedPropertySetterCallback setter,
-    NamedPropertyQueryCallback query,
-    NamedPropertyDeleterCallback remover,
-    NamedPropertyEnumeratorCallback enumerator,
-    Handle<Value> data) {
-  i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+template <typename Getter, typename Setter, typename Query, typename Deleter,
+          typename Enumerator>
+static void ObjectTemplateSetNamedPropertyHandler(ObjectTemplate* templ,
+                                                  Getter getter, Setter setter,
+                                                  Query query, Deleter remover,
+                                                  Enumerator enumerator,
+                                                  Handle<Value> data,
+                                                  bool can_intercept_symbols) {
+  i::Isolate* isolate = Utils::OpenHandle(templ)->GetIsolate();
   ENTER_V8(isolate);
   i::HandleScope scope(isolate);
-  EnsureConstructor(isolate, this);
-  i::FunctionTemplateInfo* constructor = i::FunctionTemplateInfo::cast(
-      Utils::OpenHandle(this)->constructor());
+  EnsureConstructor(isolate, templ);
+  i::FunctionTemplateInfo* constructor =
+      i::FunctionTemplateInfo::cast(Utils::OpenHandle(templ)->constructor());
   i::Handle<i::FunctionTemplateInfo> cons(constructor);
   i::Handle<i::Struct> struct_obj =
       isolate->factory()->NewStruct(i::INTERCEPTOR_INFO_TYPE);
@@ -1398,6 +1329,8 @@ void ObjectTemplate::SetNamedPropertyHandler(
   if (query != 0) SET_FIELD_WRAPPED(obj, set_query, query);
   if (remover != 0) SET_FIELD_WRAPPED(obj, set_deleter, remover);
   if (enumerator != 0) SET_FIELD_WRAPPED(obj, set_enumerator, enumerator);
+  obj->set_flags(0);
+  obj->set_can_intercept_symbols(can_intercept_symbols);
 
   if (data.IsEmpty()) {
     data = v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
@@ -1407,6 +1340,23 @@ void ObjectTemplate::SetNamedPropertyHandler(
 }
 
 
+void ObjectTemplate::SetNamedPropertyHandler(
+    NamedPropertyGetterCallback getter, NamedPropertySetterCallback setter,
+    NamedPropertyQueryCallback query, NamedPropertyDeleterCallback remover,
+    NamedPropertyEnumeratorCallback enumerator, Handle<Value> data) {
+  ObjectTemplateSetNamedPropertyHandler(this, getter, setter, query, remover,
+                                        enumerator, data, false);
+}
+
+
+void ObjectTemplate::SetHandler(
+    const NamedPropertyHandlerConfiguration& config) {
+  ObjectTemplateSetNamedPropertyHandler(this, config.getter, config.setter,
+                                        config.query, config.deleter,
+                                        config.enumerator, config.data, true);
+}
+
+
 void ObjectTemplate::MarkAsUndetectable() {
   i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
   ENTER_V8(isolate);
@@ -1450,13 +1400,8 @@ void ObjectTemplate::SetAccessCheckCallbacks(
 }
 
 
-void ObjectTemplate::SetIndexedPropertyHandler(
-    IndexedPropertyGetterCallback getter,
-    IndexedPropertySetterCallback setter,
-    IndexedPropertyQueryCallback query,
-    IndexedPropertyDeleterCallback remover,
-    IndexedPropertyEnumeratorCallback enumerator,
-    Handle<Value> data) {
+void ObjectTemplate::SetHandler(
+    const IndexedPropertyHandlerConfiguration& config) {
   i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
   ENTER_V8(isolate);
   i::HandleScope scope(isolate);
@@ -1469,12 +1414,16 @@ void ObjectTemplate::SetIndexedPropertyHandler(
   i::Handle<i::InterceptorInfo> obj =
       i::Handle<i::InterceptorInfo>::cast(struct_obj);
 
-  if (getter != 0) SET_FIELD_WRAPPED(obj, set_getter, getter);
-  if (setter != 0) SET_FIELD_WRAPPED(obj, set_setter, setter);
-  if (query != 0) SET_FIELD_WRAPPED(obj, set_query, query);
-  if (remover != 0) SET_FIELD_WRAPPED(obj, set_deleter, remover);
-  if (enumerator != 0) SET_FIELD_WRAPPED(obj, set_enumerator, enumerator);
+  if (config.getter != 0) SET_FIELD_WRAPPED(obj, set_getter, config.getter);
+  if (config.setter != 0) SET_FIELD_WRAPPED(obj, set_setter, config.setter);
+  if (config.query != 0) SET_FIELD_WRAPPED(obj, set_query, config.query);
+  if (config.deleter != 0) SET_FIELD_WRAPPED(obj, set_deleter, config.deleter);
+  if (config.enumerator != 0) {
+    SET_FIELD_WRAPPED(obj, set_enumerator, config.enumerator);
+  }
+  obj->set_flags(0);
 
+  v8::Local<v8::Value> data = config.data;
   if (data.IsEmpty()) {
     data = v8::Undefined(reinterpret_cast<v8::Isolate*>(isolate));
   }
@@ -1536,7 +1485,10 @@ void ObjectTemplate::SetInternalFieldCount(int value) {
 
 ScriptCompiler::CachedData::CachedData(const uint8_t* data_, int length_,
                                        BufferPolicy buffer_policy_)
-    : data(data_), length(length_), buffer_policy(buffer_policy_) {}
+    : data(data_),
+      length(length_),
+      rejected(false),
+      buffer_policy(buffer_policy_) {}
 
 
 ScriptCompiler::CachedData::~CachedData() {
@@ -1567,7 +1519,7 @@ Local<Script> UnboundScript::BindToCurrentContext() {
       function_info(i::SharedFunctionInfo::cast(*obj), obj->GetIsolate());
   i::Handle<i::JSFunction> function =
       obj->GetIsolate()->factory()->NewFunctionFromSharedFunctionInfo(
-          function_info, obj->GetIsolate()->global_context());
+          function_info, obj->GetIsolate()->native_context());
   return ToApiHandle<Script>(function);
 }
 
@@ -1697,6 +1649,12 @@ Local<UnboundScript> ScriptCompiler::CompileUnbound(
     options = kConsumeParserCache;
   }
 
+  // Don't try to produce any kind of cache when the debugger is loaded.
+  if (isolate->debug()->is_loaded() &&
+      (options == kProduceParserCache || options == kProduceCodeCache)) {
+    options = kNoCompileOptions;
+  }
+
   i::ScriptData* script_data = NULL;
   if (options == kConsumeParserCache || options == kConsumeCodeCache) {
     DCHECK(source->cached_data);
@@ -1732,7 +1690,7 @@ Local<UnboundScript> ScriptCompiler::CompileUnbound(
     EXCEPTION_PREAMBLE(isolate);
     i::Handle<i::SharedFunctionInfo> result = i::Compiler::CompileScript(
         str, name_obj, line_offset, column_offset, is_shared_cross_origin,
-        isolate->global_context(), NULL, &script_data, options,
+        isolate->native_context(), NULL, &script_data, options,
         i::NOT_NATIVES_CODE);
     has_pending_exception = result.is_null();
     if (has_pending_exception && script_data != NULL) {
@@ -1752,6 +1710,8 @@ Local<UnboundScript> ScriptCompiler::CompileUnbound(
       source->cached_data = new CachedData(
           script_data->data(), script_data->length(), CachedData::BufferOwned);
       script_data->ReleaseDataOwnership();
+    } else if (options == kConsumeParserCache || options == kConsumeCodeCache) {
+      source->cached_data->rejected = script_data->rejected();
     }
     delete script_data;
   }
@@ -1777,17 +1737,6 @@ Local<Script> ScriptCompiler::Compile(
 ScriptCompiler::ScriptStreamingTask* ScriptCompiler::StartStreamingScript(
     Isolate* v8_isolate, StreamedSource* source, CompileOptions options) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(v8_isolate);
-  if (!isolate->global_context().is_null() &&
-      !isolate->global_context()->IsNativeContext()) {
-    // The context chain is non-trivial, and constructing the corresponding
-    // non-trivial Scope chain outside the V8 heap is not implemented. Don't
-    // stream the script. This will only occur if Harmony scoping is enabled and
-    // a previous script has introduced "let" or "const" variables. TODO(marja):
-    // Implement externalizing ScopeInfos and constructing non-trivial Scope
-    // chains independent of the V8 heap so that we can stream also in this
-    // case.
-    return NULL;
-  }
   return new i::BackgroundParsingTask(source->impl(), options,
                                       i::FLAG_stack_size, isolate);
 }
@@ -1824,7 +1773,7 @@ Local<Script> ScriptCompiler::Compile(Isolate* v8_isolate,
                                          v8::True(v8_isolate));
     }
     source->info->set_script(script);
-    source->info->SetContext(isolate->global_context());
+    source->info->SetContext(isolate->native_context());
 
     EXCEPTION_PREAMBLE(isolate);
 
@@ -1857,6 +1806,13 @@ Local<Script> ScriptCompiler::Compile(Isolate* v8_isolate,
 }
 
 
+uint32_t ScriptCompiler::CachedDataVersionTag() {
+  return static_cast<uint32_t>(base::hash_combine(
+      internal::Version::Hash(), internal::FlagList::Hash(),
+      static_cast<uint32_t>(internal::CpuFeatures::SupportedFeatures())));
+}
+
+
 Local<Script> Script::Compile(v8::Handle<String> source,
                               v8::ScriptOrigin* origin) {
   i::Handle<i::String> str = Utils::OpenHandle(*source);
@@ -3022,8 +2978,13 @@ int32_t Value::Int32Value() const {
 
 
 bool Value::Equals(Handle<Value> that) const {
-  i::Isolate* isolate = i::Isolate::Current();
   i::Handle<i::Object> obj = Utils::OpenHandle(this, true);
+  i::Handle<i::Object> other = Utils::OpenHandle(*that);
+  if (obj->IsSmi() && other->IsSmi()) {
+    return obj->Number() == other->Number();
+  }
+  i::Object* ho = obj->IsSmi() ? *other : *obj;
+  i::Isolate* isolate = i::HeapObject::cast(ho)->GetIsolate();
   if (!Utils::ApiCheck(!obj.is_null() && !that.IsEmpty(),
                        "v8::Value::Equals()",
                        "Reading from empty handle")) {
@@ -3031,7 +2992,6 @@ bool Value::Equals(Handle<Value> that) const {
   }
   LOG_API(isolate, "Equals");
   ENTER_V8(isolate);
-  i::Handle<i::Object> other = Utils::OpenHandle(*that);
   // If both obj and other are JSObjects, we'd better compare by identity
   // immediately when going into JS builtin.  The reason is Invoke
   // would overwrite global object receiver with global proxy.
@@ -3050,15 +3010,18 @@ bool Value::Equals(Handle<Value> that) const {
 
 
 bool Value::StrictEquals(Handle<Value> that) const {
-  i::Isolate* isolate = i::Isolate::Current();
   i::Handle<i::Object> obj = Utils::OpenHandle(this, true);
+  i::Handle<i::Object> other = Utils::OpenHandle(*that);
+  if (obj->IsSmi()) {
+    return other->IsNumber() && obj->Number() == other->Number();
+  }
+  i::Isolate* isolate = i::HeapObject::cast(*obj)->GetIsolate();
   if (!Utils::ApiCheck(!obj.is_null() && !that.IsEmpty(),
                        "v8::Value::StrictEquals()",
                        "Reading from empty handle")) {
     return false;
   }
   LOG_API(isolate, "StrictEquals");
-  i::Handle<i::Object> other = Utils::OpenHandle(*that);
   // Must check HeapNumber first, since NaN !== NaN.
   if (obj->IsHeapNumber()) {
     if (!other->IsNumber()) return false;
@@ -3636,7 +3599,9 @@ static inline bool ObjectSetAccessor(Object* obj,
       i::JSObject::SetAccessor(Utils::OpenHandle(obj), info),
       false);
   if (result->IsUndefined()) return false;
-  if (fast) i::JSObject::MigrateSlowToFast(Utils::OpenHandle(obj), 0);
+  if (fast) {
+    i::JSObject::MigrateSlowToFast(Utils::OpenHandle(obj), 0, "APISetAccessor");
+  }
   return true;
 }
 
@@ -3822,7 +3787,8 @@ void v8::Object::TurnOnAccessCheck() {
   // as optimized code does not always handle access checks.
   i::Deoptimizer::DeoptimizeGlobalObject(*obj);
 
-  i::Handle<i::Map> new_map = i::Map::Copy(i::Handle<i::Map>(obj->map()));
+  i::Handle<i::Map> new_map =
+      i::Map::Copy(i::Handle<i::Map>(obj->map()), "APITurnOnAccessCheck");
   new_map->set_is_access_check_needed(true);
   i::JSObject::MigrateToMap(obj, new_map);
 }
@@ -4334,6 +4300,16 @@ Local<v8::Value> Function::GetBoundFunction() const {
 }
 
 
+int Name::GetIdentityHash() {
+  i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
+  ON_BAILOUT(isolate, "v8::Name::GetIdentityHash()", return 0);
+  ENTER_V8(isolate);
+  i::HandleScope scope(isolate);
+  i::Handle<i::Name> self = Utils::OpenHandle(this);
+  return static_cast<int>(self->Hash());
+}
+
+
 int String::Length() const {
   i::Handle<i::String> str = Utils::OpenHandle(this);
   return str->length();
@@ -5545,7 +5521,11 @@ Local<String> v8::String::Concat(Handle<String> left, Handle<String> right) {
   LOG_API(isolate, "String::New(char)");
   ENTER_V8(isolate);
   i::Handle<i::String> right_string = Utils::OpenHandle(*right);
-  // We do not expect this to fail. Change this if it does.
+  // If we are steering towards a range error, do not wait for the error to be
+  // thrown, and return the null handle instead.
+  if (left_string->length() + right_string->length() > i::String::kMaxLength) {
+    return Local<String>();
+  }
   i::Handle<i::String> result = isolate->factory()->NewConsString(
       left_string, right_string).ToHandleChecked();
   return Utils::ToLocal(result);
@@ -5651,7 +5631,6 @@ bool v8::String::MakeExternal(
 
 
 bool v8::String::CanMakeExternal() {
-  if (!internal::FLAG_clever_optimizations) return false;
   i::Handle<i::String> obj = Utils::OpenHandle(this);
   i::Isolate* isolate = obj->GetIsolate();
 
@@ -6235,27 +6214,21 @@ Local<Symbol> v8::Symbol::ForApi(Isolate* isolate, Local<String> name) {
 }
 
 
-static Local<Symbol> GetWellKnownSymbol(Isolate* isolate, const char* name) {
-  i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
-  i::Handle<i::String> i_name =
-      Utils::OpenHandle(*String::NewFromUtf8(isolate, name));
-  i::Handle<i::String> part = i_isolate->factory()->for_intern_string();
-  return Utils::ToLocal(SymbolFor(i_isolate, i_name, part));
-}
-
-
 Local<Symbol> v8::Symbol::GetIterator(Isolate* isolate) {
-  return GetWellKnownSymbol(isolate, "Symbol.iterator");
+  i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+  return Utils::ToLocal(i_isolate->factory()->iterator_symbol());
 }
 
 
 Local<Symbol> v8::Symbol::GetUnscopables(Isolate* isolate) {
-  return GetWellKnownSymbol(isolate, "Symbol.unscopables");
+  i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+  return Utils::ToLocal(i_isolate->factory()->unscopables_symbol());
 }
 
 
 Local<Symbol> v8::Symbol::GetToStringTag(Isolate* isolate) {
-  return GetWellKnownSymbol(isolate, "Symbol.toStringTag");
+  i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
+  return Utils::ToLocal(i_isolate->factory()->to_string_tag_symbol());
 }
 
 
@@ -6503,17 +6476,11 @@ void Isolate::CancelTerminateExecution() {
 
 void Isolate::RequestInterrupt(InterruptCallback callback, void* data) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
-  isolate->set_api_interrupt_callback(callback);
-  isolate->set_api_interrupt_callback_data(data);
-  isolate->stack_guard()->RequestApiInterrupt();
+  isolate->RequestInterrupt(callback, data);
 }
 
 
 void Isolate::ClearInterrupt() {
-  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
-  isolate->stack_guard()->ClearApiInterrupt();
-  isolate->set_api_interrupt_callback(NULL);
-  isolate->set_api_interrupt_callback_data(NULL);
 }
 
 
@@ -6756,6 +6723,15 @@ bool Isolate::IdleNotification(int idle_time_in_ms) {
 }
 
 
+bool Isolate::IdleNotificationDeadline(double deadline_in_seconds) {
+  // Returning true tells the caller that it need not
+  // continue to call IdleNotification.
+  i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
+  if (!i::FLAG_use_idle_notification) return true;
+  return isolate->heap()->IdleNotification(deadline_in_seconds);
+}
+
+
 void Isolate::LowMemoryNotification() {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
   {
@@ -6766,9 +6742,9 @@ void Isolate::LowMemoryNotification() {
 }
 
 
-int Isolate::ContextDisposedNotification() {
+int Isolate::ContextDisposedNotification(bool dependant_context) {
   i::Isolate* isolate = reinterpret_cast<i::Isolate*>(this);
-  return isolate->heap()->NotifyContextDisposed();
+  return isolate->heap()->NotifyContextDisposed(dependant_context);
 }
 
 
@@ -6983,7 +6959,7 @@ DEFINE_ERROR(Error)
 #undef DEFINE_ERROR
 
 
-Local<Message> Exception::GetMessage(Handle<Value> exception) {
+Local<Message> Exception::CreateMessage(Handle<Value> exception) {
   i::Handle<i::Object> obj = Utils::OpenHandle(*exception);
   if (!obj->IsHeapObject()) return Local<Message>();
   i::Isolate* isolate = i::HeapObject::cast(*obj)->GetIsolate();
index 9fb2da3..e6c6db5 100644 (file)
@@ -67,39 +67,24 @@ class Arguments BASE_EMBEDDED {
 // For each type of callback, we have a list of arguments
 // They are used to generate the Call() functions below
 // These aren't included in the list as they have duplicate signatures
-// F(NamedPropertyEnumeratorCallback, ...)
+// F(GenericNamedPropertyEnumeratorCallback, ...)
+// F(GenericNamedPropertyGetterCallback, ...)
 
 #define FOR_EACH_CALLBACK_TABLE_MAPPING_0(F) \
-  F(IndexedPropertyEnumeratorCallback, v8::Array) \
-
-#define FOR_EACH_CALLBACK_TABLE_MAPPING_1(F) \
-  F(NamedPropertyGetterCallback, v8::Value, v8::Local<v8::String>) \
-  F(AccessorNameGetterCallback, v8::Value, v8::Local<v8::Name>) \
-  F(NamedPropertyQueryCallback, \
-    v8::Integer, \
-    v8::Local<v8::String>) \
-  F(NamedPropertyDeleterCallback, \
-    v8::Boolean, \
-    v8::Local<v8::String>) \
-  F(IndexedPropertyGetterCallback, \
-    v8::Value, \
-    uint32_t) \
-  F(IndexedPropertyQueryCallback, \
-    v8::Integer, \
-    uint32_t) \
-  F(IndexedPropertyDeleterCallback, \
-    v8::Boolean, \
-    uint32_t) \
-
-#define FOR_EACH_CALLBACK_TABLE_MAPPING_2(F) \
-  F(NamedPropertySetterCallback, \
-    v8::Value, \
-    v8::Local<v8::String>, \
-    v8::Local<v8::Value>) \
-  F(IndexedPropertySetterCallback, \
-    v8::Value, \
-    uint32_t, \
-    v8::Local<v8::Value>) \
+  F(IndexedPropertyEnumeratorCallback, v8::Array)
+
+#define FOR_EACH_CALLBACK_TABLE_MAPPING_1(F)                               \
+  F(AccessorNameGetterCallback, v8::Value, v8::Local<v8::Name>)            \
+  F(GenericNamedPropertyQueryCallback, v8::Integer, v8::Local<v8::Name>)   \
+  F(GenericNamedPropertyDeleterCallback, v8::Boolean, v8::Local<v8::Name>) \
+  F(IndexedPropertyGetterCallback, v8::Value, uint32_t)                    \
+  F(IndexedPropertyQueryCallback, v8::Integer, uint32_t)                   \
+  F(IndexedPropertyDeleterCallback, v8::Boolean, uint32_t)
+
+#define FOR_EACH_CALLBACK_TABLE_MAPPING_2(F)                            \
+  F(GenericNamedPropertySetterCallback, v8::Value, v8::Local<v8::Name>, \
+    v8::Local<v8::Value>)                                               \
+  F(IndexedPropertySetterCallback, v8::Value, uint32_t, v8::Local<v8::Value>)
 
 #define FOR_EACH_CALLBACK_TABLE_MAPPING_2_VOID_RETURN(F) \
   F(AccessorNameSetterCallback, \
index eae38be..105d711 100644 (file)
@@ -127,6 +127,11 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
   }
 
   if (FLAG_enable_32dregs && cpu.has_vfp3_d32()) supported_ |= 1u << VFP32DREGS;
+
+  if (cpu.implementer() == base::CPU::NVIDIA &&
+      cpu.variant() == base::CPU::NVIDIA_DENVER) {
+    supported_ |= 1u << COHERENT_CACHE;
+  }
 #endif
 
   DCHECK(!IsSupported(VFP3) || IsSupported(ARMv7));
@@ -188,14 +193,15 @@ void CpuFeatures::PrintTarget() {
 void CpuFeatures::PrintFeatures() {
   printf(
     "ARMv7=%d VFP3=%d VFP32DREGS=%d NEON=%d SUDIV=%d UNALIGNED_ACCESSES=%d "
-    "MOVW_MOVT_IMMEDIATE_LOADS=%d",
+    "MOVW_MOVT_IMMEDIATE_LOADS=%d COHERENT_CACHE=%d",
     CpuFeatures::IsSupported(ARMv7),
     CpuFeatures::IsSupported(VFP3),
     CpuFeatures::IsSupported(VFP32DREGS),
     CpuFeatures::IsSupported(NEON),
     CpuFeatures::IsSupported(SUDIV),
     CpuFeatures::IsSupported(UNALIGNED_ACCESSES),
-    CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS));
+    CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS),
+    CpuFeatures::IsSupported(COHERENT_CACHE));
 #ifdef __arm__
   bool eabi_hardfloat = base::OS::ArmUsingHardFloat();
 #elif USE_EABI_HARDFLOAT
@@ -1338,7 +1344,7 @@ int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
 void Assembler::b(int branch_offset, Condition cond) {
   DCHECK((branch_offset & 3) == 0);
   int imm24 = branch_offset >> 2;
-  DCHECK(is_int24(imm24));
+  CHECK(is_int24(imm24));
   emit(cond | B27 | B25 | (imm24 & kImm24Mask));
 
   if (cond == al) {
@@ -1352,7 +1358,7 @@ void Assembler::bl(int branch_offset, Condition cond) {
   positions_recorder()->WriteRecordedPositions();
   DCHECK((branch_offset & 3) == 0);
   int imm24 = branch_offset >> 2;
-  DCHECK(is_int24(imm24));
+  CHECK(is_int24(imm24));
   emit(cond | B27 | B25 | B24 | (imm24 & kImm24Mask));
 }
 
@@ -1362,7 +1368,7 @@ void Assembler::blx(int branch_offset) {  // v5 and above
   DCHECK((branch_offset & 1) == 0);
   int h = ((branch_offset & 2) >> 1)*B24;
   int imm24 = branch_offset >> 2;
-  DCHECK(is_int24(imm24));
+  CHECK(is_int24(imm24));
   emit(kSpecialCondition | B27 | B25 | h | (imm24 & kImm24Mask));
 }
 
@@ -1504,7 +1510,7 @@ void Assembler::mov_label_offset(Register dst, Label* label) {
     //
     // When the label gets bound: target_at extracts the link and target_at_put
     // patches the instructions.
-    DCHECK(is_uint24(link));
+    CHECK(is_uint24(link));
     BlockConstPoolScope block_const_pool(this);
     emit(link);
     nop(dst.code());
@@ -1798,71 +1804,119 @@ void Assembler::pkhtb(Register dst,
 }
 
 
-void Assembler::uxtb(Register dst,
-                     const Operand& src,
-                     Condition cond) {
+void Assembler::sxtb(Register dst, Register src, int rotate, Condition cond) {
+  // Instruction details available in ARM DDI 0406C.b, A8.8.233.
+  // cond(31-28) | 01101010(27-20) | 1111(19-16) |
+  // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
+  DCHECK(!dst.is(pc));
+  DCHECK(!src.is(pc));
+  DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
+  emit(cond | 0x6A * B20 | 0xF * B16 | dst.code() * B12 |
+       ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src.code());
+}
+
+
+void Assembler::sxtab(Register dst, Register src1, Register src2, int rotate,
+                      Condition cond) {
+  // Instruction details available in ARM DDI 0406C.b, A8.8.233.
+  // cond(31-28) | 01101010(27-20) | Rn(19-16) |
+  // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
+  DCHECK(!dst.is(pc));
+  DCHECK(!src1.is(pc));
+  DCHECK(!src2.is(pc));
+  DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
+  emit(cond | 0x6A * B20 | src1.code() * B16 | dst.code() * B12 |
+       ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src2.code());
+}
+
+
+void Assembler::sxth(Register dst, Register src, int rotate, Condition cond) {
+  // Instruction details available in ARM DDI 0406C.b, A8.8.235.
+  // cond(31-28) | 01101011(27-20) | 1111(19-16) |
+  // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
+  DCHECK(!dst.is(pc));
+  DCHECK(!src.is(pc));
+  DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
+  emit(cond | 0x6B * B20 | 0xF * B16 | dst.code() * B12 |
+       ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src.code());
+}
+
+
+void Assembler::sxtah(Register dst, Register src1, Register src2, int rotate,
+                      Condition cond) {
+  // Instruction details available in ARM DDI 0406C.b, A8.8.235.
+  // cond(31-28) | 01101011(27-20) | Rn(19-16) |
+  // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
+  DCHECK(!dst.is(pc));
+  DCHECK(!src1.is(pc));
+  DCHECK(!src2.is(pc));
+  DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
+  emit(cond | 0x6B * B20 | src1.code() * B16 | dst.code() * B12 |
+       ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src2.code());
+}
+
+
+void Assembler::uxtb(Register dst, Register src, int rotate, Condition cond) {
   // Instruction details available in ARM DDI 0406C.b, A8.8.274.
   // cond(31-28) | 01101110(27-20) | 1111(19-16) |
   // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
   DCHECK(!dst.is(pc));
-  DCHECK(!src.rm().is(pc));
-  DCHECK(!src.rm().is(no_reg));
-  DCHECK(src.rs().is(no_reg));
-  DCHECK((src.shift_imm_ == 0) ||
-         (src.shift_imm_ == 8) ||
-         (src.shift_imm_ == 16) ||
-         (src.shift_imm_ == 24));
-  // Operand maps ROR #0 to LSL #0.
-  DCHECK((src.shift_op() == ROR) ||
-         ((src.shift_op() == LSL) && (src.shift_imm_ == 0)));
-  emit(cond | 0x6E*B20 | 0xF*B16 | dst.code()*B12 |
-       ((src.shift_imm_ >> 1)&0xC)*B8 | 7*B4 | src.rm().code());
-}
-
-
-void Assembler::uxtab(Register dst,
-                      Register src1,
-                      const Operand& src2,
+  DCHECK(!src.is(pc));
+  DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
+  emit(cond | 0x6E * B20 | 0xF * B16 | dst.code() * B12 |
+       ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src.code());
+}
+
+
+void Assembler::uxtab(Register dst, Register src1, Register src2, int rotate,
                       Condition cond) {
   // Instruction details available in ARM DDI 0406C.b, A8.8.271.
   // cond(31-28) | 01101110(27-20) | Rn(19-16) |
   // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
   DCHECK(!dst.is(pc));
   DCHECK(!src1.is(pc));
-  DCHECK(!src2.rm().is(pc));
-  DCHECK(!src2.rm().is(no_reg));
-  DCHECK(src2.rs().is(no_reg));
-  DCHECK((src2.shift_imm_ == 0) ||
-         (src2.shift_imm_ == 8) ||
-         (src2.shift_imm_ == 16) ||
-         (src2.shift_imm_ == 24));
-  // Operand maps ROR #0 to LSL #0.
-  DCHECK((src2.shift_op() == ROR) ||
-         ((src2.shift_op() == LSL) && (src2.shift_imm_ == 0)));
-  emit(cond | 0x6E*B20 | src1.code()*B16 | dst.code()*B12 |
-       ((src2.shift_imm_ >> 1) &0xC)*B8 | 7*B4 | src2.rm().code());
+  DCHECK(!src2.is(pc));
+  DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
+  emit(cond | 0x6E * B20 | src1.code() * B16 | dst.code() * B12 |
+       ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src2.code());
 }
 
 
-void Assembler::uxtb16(Register dst,
-                       const Operand& src,
-                       Condition cond) {
+void Assembler::uxtb16(Register dst, Register src, int rotate, Condition cond) {
   // Instruction details available in ARM DDI 0406C.b, A8.8.275.
   // cond(31-28) | 01101100(27-20) | 1111(19-16) |
   // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
   DCHECK(!dst.is(pc));
-  DCHECK(!src.rm().is(pc));
-  DCHECK(!src.rm().is(no_reg));
-  DCHECK(src.rs().is(no_reg));
-  DCHECK((src.shift_imm_ == 0) ||
-         (src.shift_imm_ == 8) ||
-         (src.shift_imm_ == 16) ||
-         (src.shift_imm_ == 24));
-  // Operand maps ROR #0 to LSL #0.
-  DCHECK((src.shift_op() == ROR) ||
-         ((src.shift_op() == LSL) && (src.shift_imm_ == 0)));
-  emit(cond | 0x6C*B20 | 0xF*B16 | dst.code()*B12 |
-       ((src.shift_imm_ >> 1)&0xC)*B8 | 7*B4 | src.rm().code());
+  DCHECK(!src.is(pc));
+  DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
+  emit(cond | 0x6C * B20 | 0xF * B16 | dst.code() * B12 |
+       ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src.code());
+}
+
+
+void Assembler::uxth(Register dst, Register src, int rotate, Condition cond) {
+  // Instruction details available in ARM DDI 0406C.b, A8.8.276.
+  // cond(31-28) | 01101111(27-20) | 1111(19-16) |
+  // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
+  DCHECK(!dst.is(pc));
+  DCHECK(!src.is(pc));
+  DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
+  emit(cond | 0x6F * B20 | 0xF * B16 | dst.code() * B12 |
+       ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src.code());
+}
+
+
+void Assembler::uxtah(Register dst, Register src1, Register src2, int rotate,
+                      Condition cond) {
+  // Instruction details available in ARM DDI 0406C.b, A8.8.273.
+  // cond(31-28) | 01101111(27-20) | Rn(19-16) |
+  // Rd(15-12) | rotate(11-10) | 00(9-8)| 0111(7-4) | Rm(3-0)
+  DCHECK(!dst.is(pc));
+  DCHECK(!src1.is(pc));
+  DCHECK(!src2.is(pc));
+  DCHECK(rotate == 0 || rotate == 8 || rotate == 16 || rotate == 24);
+  emit(cond | 0x6F * B20 | src1.code() * B16 | dst.code() * B12 |
+       ((rotate >> 1) & 0xC) * B8 | 7 * B4 | src2.code());
 }
 
 
@@ -2437,6 +2491,12 @@ void  Assembler::vstm(BlockAddrMode am,
 }
 
 
+void Assembler::vmov(const SwVfpRegister dst, float imm) {
+  mov(ip, Operand(bit_cast<int32_t>(imm)));
+  vmov(dst, ip);
+}
+
+
 static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
   uint64_t i;
   memcpy(&i, &d, 8);
index 9087fab..4a719e6 100644 (file)
@@ -1034,12 +1034,20 @@ class Assembler : public AssemblerBase {
   void pkhtb(Register dst, Register src1, const Operand& src2,
              Condition cond = al);
 
-  void uxtb(Register dst, const Operand& src, Condition cond = al);
-
-  void uxtab(Register dst, Register src1, const Operand& src2,
+  void sxtb(Register dst, Register src, int rotate = 0, Condition cond = al);
+  void sxtab(Register dst, Register src1, Register src2, int rotate = 0,
+             Condition cond = al);
+  void sxth(Register dst, Register src, int rotate = 0, Condition cond = al);
+  void sxtah(Register dst, Register src1, Register src2, int rotate = 0,
              Condition cond = al);
 
-  void uxtb16(Register dst, const Operand& src, Condition cond = al);
+  void uxtb(Register dst, Register src, int rotate = 0, Condition cond = al);
+  void uxtab(Register dst, Register src1, Register src2, int rotate = 0,
+             Condition cond = al);
+  void uxtb16(Register dst, Register src, int rotate = 0, Condition cond = al);
+  void uxth(Register dst, Register src, int rotate = 0, Condition cond = al);
+  void uxtah(Register dst, Register src1, Register src2, int rotate = 0,
+             Condition cond = al);
 
   // Status register access instructions
 
@@ -1172,6 +1180,7 @@ class Assembler : public AssemblerBase {
             SwVfpRegister last,
             Condition cond = al);
 
+  void vmov(const SwVfpRegister dst, float imm);
   void vmov(const DwVfpRegister dst,
             double imm,
             const Register scratch = no_reg);
index db7033d..8ce57d5 100644 (file)
@@ -372,13 +372,13 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
         MemOperand bit_field3 = FieldMemOperand(r2, Map::kBitField3Offset);
         // Check if slack tracking is enabled.
         __ ldr(r4, bit_field3);
-        __ DecodeField<Map::ConstructionCount>(r3, r4);
-        __ cmp(r3, Operand(JSFunction::kNoSlackTracking));
-        __ b(eq, &allocate);
+        __ DecodeField<Map::Counter>(r3, r4);
+        __ cmp(r3, Operand(Map::kSlackTrackingCounterEnd));
+        __ b(lt, &allocate);
         // Decrease generous allocation count.
-        __ sub(r4, r4, Operand(1 << Map::ConstructionCount::kShift));
+        __ sub(r4, r4, Operand(1 << Map::Counter::kShift));
         __ str(r4, bit_field3);
-        __ cmp(r3, Operand(JSFunction::kFinishSlackTracking));
+        __ cmp(r3, Operand(Map::kSlackTrackingCounterEnd));
         __ b(ne, &allocate);
 
         __ push(r1);
@@ -431,9 +431,9 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
 
         // Check if slack tracking is enabled.
         __ ldr(ip, FieldMemOperand(r2, Map::kBitField3Offset));
-        __ DecodeField<Map::ConstructionCount>(ip);
-        __ cmp(ip, Operand(JSFunction::kNoSlackTracking));
-        __ b(eq, &no_inobject_slack_tracking);
+        __ DecodeField<Map::Counter>(ip);
+        __ cmp(ip, Operand(Map::kSlackTrackingCounterEnd));
+        __ b(lt, &no_inobject_slack_tracking);
 
         // Allocate object with a slack.
         __ ldr(r0, FieldMemOperand(r2, Map::kInstanceSizesOffset));
index a0e7e4a..ef286bb 100644 (file)
@@ -234,61 +234,6 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
 }
 
 
-void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(
-    Isolate* isolate) {
-  WriteInt32ToHeapNumberStub stub1(isolate, r1, r0, r2);
-  WriteInt32ToHeapNumberStub stub2(isolate, r2, r0, r3);
-  stub1.GetCode();
-  stub2.GetCode();
-}
-
-
-// See comment for class.
-void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
-  Label max_negative_int;
-  // the_int_ has the answer which is a signed int32 but not a Smi.
-  // We test for the special value that has a different exponent.  This test
-  // has the neat side effect of setting the flags according to the sign.
-  STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
-  __ cmp(the_int(), Operand(0x80000000u));
-  __ b(eq, &max_negative_int);
-  // Set up the correct exponent in scratch_.  All non-Smi int32s have the same.
-  // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
-  uint32_t non_smi_exponent =
-      (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
-  __ mov(scratch(), Operand(non_smi_exponent));
-  // Set the sign bit in scratch_ if the value was negative.
-  __ orr(scratch(), scratch(), Operand(HeapNumber::kSignMask), LeaveCC, cs);
-  // Subtract from 0 if the value was negative.
-  __ rsb(the_int(), the_int(), Operand::Zero(), LeaveCC, cs);
-  // We should be masking the implict first digit of the mantissa away here,
-  // but it just ends up combining harmlessly with the last digit of the
-  // exponent that happens to be 1.  The sign bit is 0 so we shift 10 to get
-  // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
-  DCHECK(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
-  const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
-  __ orr(scratch(), scratch(), Operand(the_int(), LSR, shift_distance));
-  __ str(scratch(),
-         FieldMemOperand(the_heap_number(), HeapNumber::kExponentOffset));
-  __ mov(scratch(), Operand(the_int(), LSL, 32 - shift_distance));
-  __ str(scratch(),
-         FieldMemOperand(the_heap_number(), HeapNumber::kMantissaOffset));
-  __ Ret();
-
-  __ bind(&max_negative_int);
-  // The max negative int32 is stored as a positive number in the mantissa of
-  // a double because it uses a sign bit instead of using two's complement.
-  // The actual mantissa bits stored are all 0 because the implicit most
-  // significant 1 bit is not stored.
-  non_smi_exponent += 1 << HeapNumber::kExponentShift;
-  __ mov(ip, Operand(HeapNumber::kSignMask | non_smi_exponent));
-  __ str(ip, FieldMemOperand(the_heap_number(), HeapNumber::kExponentOffset));
-  __ mov(ip, Operand::Zero());
-  __ str(ip, FieldMemOperand(the_heap_number(), HeapNumber::kMantissaOffset));
-  __ Ret();
-}
-
-
 // Handle the case where the lhs and rhs are the same object.
 // Equality is almost reflexive (everything but NaN), so this is a test
 // for "identity and not NaN".
@@ -967,7 +912,6 @@ bool CEntryStub::NeedsImmovableCode() {
 
 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
   CEntryStub::GenerateAheadOfTime(isolate);
-  WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate);
   StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
   StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
   ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
@@ -1494,9 +1438,14 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
 void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
   Label miss;
   Register receiver = LoadDescriptor::ReceiverRegister();
-
-  NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, r3,
-                                                          r4, &miss);
+  // Ensure that the vector and slot registers won't be clobbered before
+  // calling the miss handler.
+  DCHECK(!FLAG_vector_ics ||
+         !AreAliased(r4, r5, VectorLoadICDescriptor::VectorRegister(),
+                     VectorLoadICDescriptor::SlotRegister()));
+
+  NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, r4,
+                                                          r5, &miss);
   __ bind(&miss);
   PropertyAccessCompiler::TailCallBuiltin(
       masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
@@ -1509,10 +1458,16 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
 
   Register receiver = LoadDescriptor::ReceiverRegister();
   Register index = LoadDescriptor::NameRegister();
-  Register scratch = r3;
+  Register scratch = r5;
   Register result = r0;
   DCHECK(!scratch.is(receiver) && !scratch.is(index));
+  DCHECK(!FLAG_vector_ics ||
+         (!scratch.is(VectorLoadICDescriptor::VectorRegister()) &&
+          result.is(VectorLoadICDescriptor::SlotRegister())));
 
+  // StringCharAtGenerator doesn't use the result register until it's passed
+  // the different miss possibilities. If it did, we would have a conflict
+  // when FLAG_vector_ics is true.
   StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
                                           &miss,  // When not a string.
                                           &miss,  // When not a number.
@@ -2686,6 +2641,10 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
 void CallICStub::Generate(MacroAssembler* masm) {
   // r1 - function
   // r3 - slot id (Smi)
+  const int with_types_offset =
+      FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
+  const int generic_offset =
+      FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
   Label extra_checks_or_miss, slow_start;
   Label slow, non_function, wrap, cont;
   Label have_js_function;
@@ -2724,37 +2683,70 @@ void CallICStub::Generate(MacroAssembler* masm) {
   }
 
   __ bind(&extra_checks_or_miss);
-  Label miss;
+  Label uninitialized, miss;
 
   __ CompareRoot(r4, Heap::kmegamorphic_symbolRootIndex);
   __ b(eq, &slow_start);
+
+  // The following cases attempt to handle MISS cases without going to the
+  // runtime.
+  if (FLAG_trace_ic) {
+    __ jmp(&miss);
+  }
+
   __ CompareRoot(r4, Heap::kuninitialized_symbolRootIndex);
+  __ b(eq, &uninitialized);
+
+  // We are going megamorphic. If the feedback is a JSFunction, it is fine
+  // to handle it here. More complex cases are dealt with in the runtime.
+  __ AssertNotSmi(r4);
+  __ CompareObjectType(r4, r5, r5, JS_FUNCTION_TYPE);
+  __ b(ne, &miss);
+  __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
+  __ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
+  __ str(ip, FieldMemOperand(r4, FixedArray::kHeaderSize));
+  // We have to update statistics for runtime profiling.
+  __ ldr(r4, FieldMemOperand(r2, with_types_offset));
+  __ sub(r4, r4, Operand(Smi::FromInt(1)));
+  __ str(r4, FieldMemOperand(r2, with_types_offset));
+  __ ldr(r4, FieldMemOperand(r2, generic_offset));
+  __ add(r4, r4, Operand(Smi::FromInt(1)));
+  __ str(r4, FieldMemOperand(r2, generic_offset));
+  __ jmp(&slow_start);
+
+  __ bind(&uninitialized);
+
+  // We are going monomorphic, provided we actually have a JSFunction.
+  __ JumpIfSmi(r1, &miss);
+
+  // Goto miss case if we do not have a function.
+  __ CompareObjectType(r1, r4, r4, JS_FUNCTION_TYPE);
+  __ b(ne, &miss);
+
+  // Make sure the function is not the Array() function, which requires special
+  // behavior on MISS.
+  __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r4);
+  __ cmp(r1, r4);
   __ b(eq, &miss);
 
-  if (!FLAG_trace_ic) {
-    // We are going megamorphic. If the feedback is a JSFunction, it is fine
-    // to handle it here. More complex cases are dealt with in the runtime.
-    __ AssertNotSmi(r4);
-    __ CompareObjectType(r4, r5, r5, JS_FUNCTION_TYPE);
-    __ b(ne, &miss);
-    __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
-    __ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
-    __ str(ip, FieldMemOperand(r4, FixedArray::kHeaderSize));
-    // We have to update statistics for runtime profiling.
-    const int with_types_offset =
-        FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
-    __ ldr(r4, FieldMemOperand(r2, with_types_offset));
-    __ sub(r4, r4, Operand(Smi::FromInt(1)));
-    __ str(r4, FieldMemOperand(r2, with_types_offset));
-    const int generic_offset =
-        FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
-    __ ldr(r4, FieldMemOperand(r2, generic_offset));
-    __ add(r4, r4, Operand(Smi::FromInt(1)));
-    __ str(r4, FieldMemOperand(r2, generic_offset));
-    __ jmp(&slow_start);
-  }
+  // Update stats.
+  __ ldr(r4, FieldMemOperand(r2, with_types_offset));
+  __ add(r4, r4, Operand(Smi::FromInt(1)));
+  __ str(r4, FieldMemOperand(r2, with_types_offset));
+
+  // Store the function.
+  __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3));
+  __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ str(r1, MemOperand(r4, 0));
 
-  // We are here because tracing is on or we are going monomorphic.
+  // Update the write barrier.
+  __ mov(r5, r1);
+  __ RecordWrite(r2, r4, r5, kLRHasNotBeenSaved, kDontSaveFPRegs,
+                 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+  __ jmp(&have_js_function);
+
+  // We are here because tracing is on or we encountered a MISS case we can't
+  // handle here.
   __ bind(&miss);
   GenerateMiss(masm);
 
@@ -3189,18 +3181,43 @@ void SubStringStub::Generate(MacroAssembler* masm) {
 
 void ToNumberStub::Generate(MacroAssembler* masm) {
   // The ToNumber stub takes one argument in r0.
-  Label check_heap_number, call_builtin;
-  __ JumpIfNotSmi(r0, &check_heap_number);
+  Label not_smi;
+  __ JumpIfNotSmi(r0, &not_smi);
   __ Ret();
+  __ bind(&not_smi);
 
-  __ bind(&check_heap_number);
+  Label not_heap_number;
   __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
-  __ CompareRoot(r1, Heap::kHeapNumberMapRootIndex);
-  __ b(ne, &call_builtin);
+  __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
+  // r0: object
+  // r1: instance type.
+  __ cmp(r1, Operand(HEAP_NUMBER_TYPE));
+  __ b(ne, &not_heap_number);
+  __ Ret();
+  __ bind(&not_heap_number);
+
+  Label not_string, slow_string;
+  __ cmp(r1, Operand(FIRST_NONSTRING_TYPE));
+  __ b(hs, &not_string);
+  // Check if string has a cached array index.
+  __ ldr(r2, FieldMemOperand(r0, String::kHashFieldOffset));
+  __ tst(r2, Operand(String::kContainsCachedArrayIndexMask));
+  __ b(ne, &slow_string);
+  __ IndexFromHash(r2, r0);
+  __ Ret();
+  __ bind(&slow_string);
+  __ push(r0);  // Push argument.
+  __ TailCallRuntime(Runtime::kStringToNumber, 1, 1);
+  __ bind(&not_string);
+
+  Label not_oddball;
+  __ cmp(r1, Operand(ODDBALL_TYPE));
+  __ b(ne, &not_oddball);
+  __ ldr(r0, FieldMemOperand(r0, Oddball::kToNumberOffset));
   __ Ret();
+  __ bind(&not_oddball);
 
-  __ bind(&call_builtin);
-  __ push(r0);
+  __ push(r0);  // Push argument.
   __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
 }
 
index 727bb1b..e1fab29 100644 (file)
@@ -46,44 +46,6 @@ class StringHelper : public AllStatic {
 };
 
 
-// This stub can convert a signed int32 to a heap number (double).  It does
-// not work for int32s that are in Smi range!  No GC occurs during this stub
-// so you don't have to set up the frame.
-class WriteInt32ToHeapNumberStub : public PlatformCodeStub {
- public:
-  WriteInt32ToHeapNumberStub(Isolate* isolate, Register the_int,
-                             Register the_heap_number, Register scratch)
-      : PlatformCodeStub(isolate) {
-    minor_key_ = IntRegisterBits::encode(the_int.code()) |
-                 HeapNumberRegisterBits::encode(the_heap_number.code()) |
-                 ScratchRegisterBits::encode(scratch.code());
-  }
-
-  static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
-
- private:
-  Register the_int() const {
-    return Register::from_code(IntRegisterBits::decode(minor_key_));
-  }
-
-  Register the_heap_number() const {
-    return Register::from_code(HeapNumberRegisterBits::decode(minor_key_));
-  }
-
-  Register scratch() const {
-    return Register::from_code(ScratchRegisterBits::decode(minor_key_));
-  }
-
-  // Minor key encoding in 16 bits.
-  class IntRegisterBits: public BitField<int, 0, 4> {};
-  class HeapNumberRegisterBits: public BitField<int, 4, 4> {};
-  class ScratchRegisterBits: public BitField<int, 8, 4> {};
-
-  DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
-  DEFINE_PLATFORM_CODE_STUB(WriteInt32ToHeapNumber, PlatformCodeStub);
-};
-
-
 class RecordWriteStub: public PlatformCodeStub {
  public:
   RecordWriteStub(Isolate* isolate,
@@ -112,7 +74,7 @@ class RecordWriteStub: public PlatformCodeStub {
     INCREMENTAL_COMPACTION
   };
 
-  virtual bool SometimesSetsUpAFrame() { return false; }
+  bool SometimesSetsUpAFrame() OVERRIDE { return false; }
 
   static void PatchBranchIntoNop(MacroAssembler* masm, int pos) {
     masm->instr_at_put(pos, (masm->instr_at(pos) & ~B27) | (B24 | B20));
@@ -235,9 +197,9 @@ class RecordWriteStub: public PlatformCodeStub {
     kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
   };
 
-  virtual inline Major MajorKey() const FINAL OVERRIDE { return RecordWrite; }
+  inline Major MajorKey() const FINAL { return RecordWrite; }
 
-  virtual void Generate(MacroAssembler* masm) OVERRIDE;
+  void Generate(MacroAssembler* masm) OVERRIDE;
   void GenerateIncremental(MacroAssembler* masm, Mode mode);
   void CheckNeedsToInformIncrementalMarker(
       MacroAssembler* masm,
@@ -245,7 +207,7 @@ class RecordWriteStub: public PlatformCodeStub {
       Mode mode);
   void InformIncrementalMarker(MacroAssembler* masm);
 
-  void Activate(Code* code) {
+  void Activate(Code* code) OVERRIDE {
     code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
   }
 
@@ -293,7 +255,7 @@ class DirectCEntryStub: public PlatformCodeStub {
   void GenerateCall(MacroAssembler* masm, Register target);
 
  private:
-  bool NeedsImmovableCode() { return true; }
+  bool NeedsImmovableCode() OVERRIDE { return true; }
 
   DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
   DEFINE_PLATFORM_CODE_STUB(DirectCEntry, PlatformCodeStub);
@@ -325,7 +287,7 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
                                      Register r0,
                                      Register r1);
 
-  virtual bool SometimesSetsUpAFrame() { return false; }
+  bool SometimesSetsUpAFrame() OVERRIDE { return false; }
 
  private:
   static const int kInlinedProbes = 4;
index b577f59..fd1b0ef 100644 (file)
@@ -288,8 +288,8 @@ MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
 
     __ bind(&loop);
     __ ldr(temp1, MemOperand(src, 4, PostIndex));
-    __ uxtb16(temp3, Operand(temp1, ROR, 0));
-    __ uxtb16(temp4, Operand(temp1, ROR, 8));
+    __ uxtb16(temp3, temp1);
+    __ uxtb16(temp4, temp1, 8);
     __ pkhbt(temp1, temp3, Operand(temp4, LSL, 16));
     __ str(temp1, MemOperand(dest));
     __ pkhtb(temp1, temp4, Operand(temp3, ASR, 16));
@@ -301,9 +301,9 @@ MemCopyUint16Uint8Function CreateMemCopyUint16Uint8Function(
     __ mov(chars, Operand(chars, LSL, 31), SetCC);  // bit0 => ne, bit1 => cs
     __ b(&not_two, cc);
     __ ldrh(temp1, MemOperand(src, 2, PostIndex));
-    __ uxtb(temp3, Operand(temp1, ROR, 8));
+    __ uxtb(temp3, temp1, 8);
     __ mov(temp3, Operand(temp3, LSL, 16));
-    __ uxtab(temp3, temp3, Operand(temp1, ROR, 0));
+    __ uxtab(temp3, temp3, temp1);
     __ str(temp3, MemOperand(dest, 4, PostIndex));
     __ bind(&not_two);
     __ ldrb(temp1, MemOperand(src), ne);
index 2a293b3..0037ce1 100644 (file)
@@ -332,9 +332,9 @@ enum NeonSize {
 // standard SoftwareInterrupCode. Bit 23 is reserved for the stop feature.
 enum SoftwareInterruptCodes {
   // transition to C code
-  kCallRtRedirected= 0x10,
+  kCallRtRedirected = 0x10,
   // break point
-  kBreakpoint= 0x20,
+  kBreakpoint = 0x20,
   // stop
   kStopCode = 1 << 23
 };
index 9c7104e..4a34070 100644 (file)
@@ -27,6 +27,8 @@ namespace internal {
 void CpuFeatures::FlushICache(void* start, size_t size) {
   if (size == 0) return;
 
+  if (CpuFeatures::IsSupported(COHERENT_CACHE)) return;
+
 #if defined(USE_SIMULATOR)
   // Not generating ARM instructions for C-code. This means that we are
   // building an ARM emulator based target.  We should notify the simulator
index 0455a3b..b3a6173 100644 (file)
@@ -21,6 +21,12 @@ int Deoptimizer::patch_size() {
 }
 
 
+void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
+  // Empty because there is no need for relocation information for the code
+  // patching in Deoptimizer::PatchCodeForDeoptimization below.
+}
+
+
 void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
   Address code_start_address = code->instruction_start();
   // Invalidate the relocation information, as it will become invalid by the
index dc26018..4e631b0 100644 (file)
@@ -1027,7 +1027,75 @@ void Decoder::DecodeType3(Instruction* instr) {
               UNREACHABLE();
               break;
             case 1:
-              UNREACHABLE();
+              if (instr->Bits(9, 6) == 1) {
+                if (instr->Bit(20) == 0) {
+                  if (instr->Bits(19, 16) == 0xF) {
+                    switch (instr->Bits(11, 10)) {
+                      case 0:
+                        Format(instr, "sxtb'cond 'rd, 'rm");
+                        break;
+                      case 1:
+                        Format(instr, "sxtb'cond 'rd, 'rm, ror #8");
+                        break;
+                      case 2:
+                        Format(instr, "sxtb'cond 'rd, 'rm, ror #16");
+                        break;
+                      case 3:
+                        Format(instr, "sxtb'cond 'rd, 'rm, ror #24");
+                        break;
+                    }
+                  } else {
+                    switch (instr->Bits(11, 10)) {
+                      case 0:
+                        Format(instr, "sxtab'cond 'rd, 'rn, 'rm");
+                        break;
+                      case 1:
+                        Format(instr, "sxtab'cond 'rd, 'rn, 'rm, ror #8");
+                        break;
+                      case 2:
+                        Format(instr, "sxtab'cond 'rd, 'rn, 'rm, ror #16");
+                        break;
+                      case 3:
+                        Format(instr, "sxtab'cond 'rd, 'rn, 'rm, ror #24");
+                        break;
+                    }
+                  }
+                } else {
+                  if (instr->Bits(19, 16) == 0xF) {
+                    switch (instr->Bits(11, 10)) {
+                      case 0:
+                        Format(instr, "sxth'cond 'rd, 'rm");
+                        break;
+                      case 1:
+                        Format(instr, "sxth'cond 'rd, 'rm, ror #8");
+                        break;
+                      case 2:
+                        Format(instr, "sxth'cond 'rd, 'rm, ror #16");
+                        break;
+                      case 3:
+                        Format(instr, "sxth'cond 'rd, 'rm, ror #24");
+                        break;
+                    }
+                  } else {
+                    switch (instr->Bits(11, 10)) {
+                      case 0:
+                        Format(instr, "sxtah'cond 'rd, 'rn, 'rm");
+                        break;
+                      case 1:
+                        Format(instr, "sxtah'cond 'rd, 'rn, 'rm, ror #8");
+                        break;
+                      case 2:
+                        Format(instr, "sxtah'cond 'rd, 'rn, 'rm, ror #16");
+                        break;
+                      case 3:
+                        Format(instr, "sxtah'cond 'rd, 'rn, 'rm, ror #24");
+                        break;
+                    }
+                  }
+                }
+              } else {
+                UNREACHABLE();
+              }
               break;
             case 2:
               if ((instr->Bit(20) == 0) && (instr->Bits(9, 6) == 1)) {
@@ -1054,36 +1122,70 @@ void Decoder::DecodeType3(Instruction* instr) {
               }
               break;
             case 3:
-              if ((instr->Bit(20) == 0) && (instr->Bits(9, 6) == 1)) {
-                if (instr->Bits(19, 16) == 0xF) {
-                  switch (instr->Bits(11, 10)) {
-                    case 0:
-                      Format(instr, "uxtb'cond 'rd, 'rm");
-                      break;
-                    case 1:
-                      Format(instr, "uxtb'cond 'rd, 'rm, ror #8");
-                      break;
-                    case 2:
-                      Format(instr, "uxtb'cond 'rd, 'rm, ror #16");
-                      break;
-                    case 3:
-                      Format(instr, "uxtb'cond 'rd, 'rm, ror #24");
-                      break;
+              if ((instr->Bits(9, 6) == 1)) {
+                if ((instr->Bit(20) == 0)) {
+                  if (instr->Bits(19, 16) == 0xF) {
+                    switch (instr->Bits(11, 10)) {
+                      case 0:
+                        Format(instr, "uxtb'cond 'rd, 'rm");
+                        break;
+                      case 1:
+                        Format(instr, "uxtb'cond 'rd, 'rm, ror #8");
+                        break;
+                      case 2:
+                        Format(instr, "uxtb'cond 'rd, 'rm, ror #16");
+                        break;
+                      case 3:
+                        Format(instr, "uxtb'cond 'rd, 'rm, ror #24");
+                        break;
+                    }
+                  } else {
+                    switch (instr->Bits(11, 10)) {
+                      case 0:
+                        Format(instr, "uxtab'cond 'rd, 'rn, 'rm");
+                        break;
+                      case 1:
+                        Format(instr, "uxtab'cond 'rd, 'rn, 'rm, ror #8");
+                        break;
+                      case 2:
+                        Format(instr, "uxtab'cond 'rd, 'rn, 'rm, ror #16");
+                        break;
+                      case 3:
+                        Format(instr, "uxtab'cond 'rd, 'rn, 'rm, ror #24");
+                        break;
+                    }
                   }
                 } else {
-                  switch (instr->Bits(11, 10)) {
-                    case 0:
-                      Format(instr, "uxtab'cond 'rd, 'rn, 'rm");
-                      break;
-                    case 1:
-                      Format(instr, "uxtab'cond 'rd, 'rn, 'rm, ror #8");
-                      break;
-                    case 2:
-                      Format(instr, "uxtab'cond 'rd, 'rn, 'rm, ror #16");
-                      break;
-                    case 3:
-                      Format(instr, "uxtab'cond 'rd, 'rn, 'rm, ror #24");
-                      break;
+                  if (instr->Bits(19, 16) == 0xF) {
+                    switch (instr->Bits(11, 10)) {
+                      case 0:
+                        Format(instr, "uxth'cond 'rd, 'rm");
+                        break;
+                      case 1:
+                        Format(instr, "uxth'cond 'rd, 'rm, ror #8");
+                        break;
+                      case 2:
+                        Format(instr, "uxth'cond 'rd, 'rm, ror #16");
+                        break;
+                      case 3:
+                        Format(instr, "uxth'cond 'rd, 'rm, ror #24");
+                        break;
+                    }
+                  } else {
+                    switch (instr->Bits(11, 10)) {
+                      case 0:
+                        Format(instr, "uxtah'cond 'rd, 'rn, 'rm");
+                        break;
+                      case 1:
+                        Format(instr, "uxtah'cond 'rd, 'rn, 'rm, ror #8");
+                        break;
+                      case 2:
+                        Format(instr, "uxtah'cond 'rd, 'rn, 'rm, ror #16");
+                        break;
+                      case 3:
+                        Format(instr, "uxtah'cond 'rd, 'rn, 'rm, ror #24");
+                        break;
+                    }
                   }
                 }
               } else {
index 1710b60..3dc5420 100644 (file)
@@ -195,10 +195,10 @@ void FullCodeGenerator::Generate() {
     // Argument to NewContext is the function, which is still in r1.
     Comment cmnt(masm_, "[ Allocate context");
     bool need_write_barrier = true;
-    if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
+    if (FLAG_harmony_scoping && info->scope()->is_script_scope()) {
       __ push(r1);
       __ Push(info->scope()->GetScopeInfo());
-      __ CallRuntime(Runtime::kNewGlobalContext, 2);
+      __ CallRuntime(Runtime::kNewScriptContext, 2);
     } else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
       FastNewContextStub stub(isolate(), heap_slots);
       __ CallStub(&stub);
@@ -937,7 +937,7 @@ void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
   EmitDebugCheckDeclarationContext(variable);
 
   // Load instance object.
-  __ LoadContext(r1, scope_->ContextChainLength(scope_->GlobalScope()));
+  __ LoadContext(r1, scope_->ContextChainLength(scope_->ScriptScope()));
   __ ldr(r1, ContextOperand(r1, variable->interface()->Index()));
   __ ldr(r1, ContextOperand(r1, Context::EXTENSION_INDEX));
 
@@ -1111,6 +1111,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
 
   // Get the object to enumerate over. If the object is null or undefined, skip
   // over the loop.  See ECMA-262 version 5, section 12.6.4.
+  SetExpressionPosition(stmt->enumerable());
   VisitForAccumulatorValue(stmt->enumerable());
   __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
   __ cmp(r0, ip);
@@ -1214,6 +1215,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
   // Generate code for doing the condition check.
   PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
   __ bind(&loop);
+  SetExpressionPosition(stmt->each());
+
   // Load the current count to r0, load the length to r1.
   __ Ldrd(r0, r1, MemOperand(sp, 0 * kPointerSize));
   __ cmp(r0, r1);  // Compare to the array length.
@@ -1283,48 +1286,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
 }
 
 
-void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
-  Comment cmnt(masm_, "[ ForOfStatement");
-  SetStatementPosition(stmt);
-
-  Iteration loop_statement(this, stmt);
-  increment_loop_depth();
-
-  // var iterator = iterable[Symbol.iterator]();
-  VisitForEffect(stmt->assign_iterator());
-
-  // Loop entry.
-  __ bind(loop_statement.continue_label());
-
-  // result = iterator.next()
-  VisitForEffect(stmt->next_result());
-
-  // if (result.done) break;
-  Label result_not_done;
-  VisitForControl(stmt->result_done(),
-                  loop_statement.break_label(),
-                  &result_not_done,
-                  &result_not_done);
-  __ bind(&result_not_done);
-
-  // each = result.value
-  VisitForEffect(stmt->assign_each());
-
-  // Generate code for the body of the loop.
-  Visit(stmt->body());
-
-  // Check stack before looping.
-  PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS);
-  EmitBackEdgeBookkeeping(stmt, loop_statement.continue_label());
-  __ jmp(loop_statement.continue_label());
-
-  // Exit and decrement the loop depth.
-  PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
-  __ bind(loop_statement.break_label());
-  decrement_loop_depth();
-}
-
-
 void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
                                        bool pretenure) {
   // Use the fast case closure allocation code that allocates in new
@@ -1383,6 +1344,19 @@ void FullCodeGenerator::EmitLoadHomeObject(SuperReference* expr) {
 }
 
 
+void FullCodeGenerator::EmitSetHomeObjectIfNeeded(Expression* initializer,
+                                                  int offset) {
+  if (NeedsHomeObject(initializer)) {
+    __ ldr(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
+    __ mov(StoreDescriptor::NameRegister(),
+           Operand(isolate()->factory()->home_object_symbol()));
+    __ ldr(StoreDescriptor::ValueRegister(),
+           MemOperand(sp, offset * kPointerSize));
+    CallStoreIC();
+  }
+}
+
+
 void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
                                                       TypeofState typeof_state,
                                                       Label* slow) {
@@ -1739,6 +1713,14 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
             __ ldr(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
             CallStoreIC(key->LiteralFeedbackId());
             PrepareForBailoutForId(key->id(), NO_REGISTERS);
+
+            if (NeedsHomeObject(value)) {
+              __ Move(StoreDescriptor::ReceiverRegister(), r0);
+              __ mov(StoreDescriptor::NameRegister(),
+                     Operand(isolate()->factory()->home_object_symbol()));
+              __ ldr(StoreDescriptor::ValueRegister(), MemOperand(sp));
+              CallStoreIC();
+            }
           } else {
             VisitForEffect(value);
           }
@@ -1750,6 +1732,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
         VisitForStackValue(key);
         VisitForStackValue(value);
         if (property->emit_store()) {
+          EmitSetHomeObjectIfNeeded(value, 2);
           __ mov(r0, Operand(Smi::FromInt(SLOPPY)));  // PropertyAttributes
           __ push(r0);
           __ CallRuntime(Runtime::kSetProperty, 4);
@@ -1787,7 +1770,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
     __ push(r0);
     VisitForStackValue(it->first);
     EmitAccessor(it->second->getter);
+    EmitSetHomeObjectIfNeeded(it->second->getter, 2);
     EmitAccessor(it->second->setter);
+    EmitSetHomeObjectIfNeeded(it->second->setter, 3);
     __ mov(r0, Operand(Smi::FromInt(NONE)));
     __ push(r0);
     __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
@@ -2210,15 +2195,6 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
   VisitForAccumulatorValue(value);
   __ pop(r1);
 
-  // Check generator state.
-  Label wrong_state, closed_state, done;
-  __ ldr(r3, FieldMemOperand(r1, JSGeneratorObject::kContinuationOffset));
-  STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting < 0);
-  STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed == 0);
-  __ cmp(r3, Operand(Smi::FromInt(0)));
-  __ b(eq, &closed_state);
-  __ b(lt, &wrong_state);
-
   // Load suspended function and context.
   __ ldr(cp, FieldMemOperand(r1, JSGeneratorObject::kContextOffset));
   __ ldr(r4, FieldMemOperand(r1, JSGeneratorObject::kFunctionOffset));
@@ -2241,7 +2217,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
 
   // Enter a new JavaScript frame, and initialize its slots as they were when
   // the generator was suspended.
-  Label resume_frame;
+  Label resume_frame, done;
   __ bind(&push_frame);
   __ bl(&resume_frame);
   __ jmp(&done);
@@ -2301,26 +2277,6 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
   // Not reached: the runtime call returns elsewhere.
   __ stop("not-reached");
 
-  // Reach here when generator is closed.
-  __ bind(&closed_state);
-  if (resume_mode == JSGeneratorObject::NEXT) {
-    // Return completed iterator result when generator is closed.
-    __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
-    __ push(r2);
-    // Pop value from top-of-stack slot; box result into result register.
-    EmitCreateIteratorResult(true);
-  } else {
-    // Throw the provided value.
-    __ push(r0);
-    __ CallRuntime(Runtime::kThrow, 1);
-  }
-  __ jmp(&done);
-
-  // Throw error if we attempt to operate on a running generator.
-  __ bind(&wrong_state);
-  __ push(r1);
-  __ CallRuntime(Runtime::kThrowGeneratorStateError, 1);
-
   __ bind(&done);
   context()->Plug(result_register());
 }
@@ -2534,6 +2490,7 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
     __ push(scratch);
     VisitForStackValue(key);
     VisitForStackValue(value);
+    EmitSetHomeObjectIfNeeded(value, 2);
 
     switch (property->kind()) {
       case ObjectLiteral::Property::CONSTANT:
@@ -2728,8 +2685,9 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) {
       }
       EmitStoreToStackLocalOrContextSlot(var, location);
     }
+  } else if (IsSignallingAssignmentToConst(var, op, strict_mode())) {
+    __ CallRuntime(Runtime::kThrowConstAssignError, 0);
   }
-  // Non-initializing assignments to consts are ignored.
 }
 
 
@@ -5085,7 +5043,7 @@ void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
 
 void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
   Scope* declaration_scope = scope()->DeclarationScope();
-  if (declaration_scope->is_global_scope() ||
+  if (declaration_scope->is_script_scope() ||
       declaration_scope->is_module_scope()) {
     // Contexts nested in the native context have a canonical empty function
     // as their closure, not the anonymous closure containing the global
index 46897a3..e5de950 100644 (file)
@@ -1098,9 +1098,17 @@ LInstruction* LChunkBuilder::DoTailCallThroughMegamorphicCache(
       UseFixed(instr->receiver(), LoadDescriptor::ReceiverRegister());
   LOperand* name_register =
       UseFixed(instr->name(), LoadDescriptor::NameRegister());
+  LOperand* slot = NULL;
+  LOperand* vector = NULL;
+  if (FLAG_vector_ics) {
+    slot = UseFixed(instr->slot(), VectorLoadICDescriptor::SlotRegister());
+    vector =
+        UseFixed(instr->vector(), VectorLoadICDescriptor::VectorRegister());
+  }
+
   // Not marked as call. It can't deoptimize, and it never returns.
   return new (zone()) LTailCallThroughMegamorphicCache(
-      context, receiver_register, name_register);
+      context, receiver_register, name_register, slot, vector);
 }
 
 
@@ -1397,8 +1405,16 @@ LInstruction* LChunkBuilder::DoFlooringDivI(HMathFloorOfDiv* instr) {
   LOperand* divisor = UseRegister(instr->right());
   LOperand* temp =
       CpuFeatures::IsSupported(SUDIV) ? NULL : TempDoubleRegister();
-  LFlooringDivI* div = new(zone()) LFlooringDivI(dividend, divisor, temp);
-  return AssignEnvironment(DefineAsRegister(div));
+  LInstruction* result =
+      DefineAsRegister(new (zone()) LFlooringDivI(dividend, divisor, temp));
+  if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+      instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
+      (instr->CheckFlag(HValue::kCanOverflow) &&
+       (!CpuFeatures::IsSupported(SUDIV) ||
+        !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32)))) {
+    result = AssignEnvironment(result);
+  }
+  return result;
 }
 
 
@@ -2111,7 +2127,7 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
   LOperand* global_object =
       UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
   LOperand* vector = NULL;
-  if (FLAG_vector_ics) {
+  if (instr->HasVectorAndSlot()) {
     vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
   }
   LLoadGlobalGeneric* result =
@@ -2170,7 +2186,7 @@ LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
   LOperand* object =
       UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
   LOperand* vector = NULL;
-  if (FLAG_vector_ics) {
+  if (instr->HasVectorAndSlot()) {
     vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
   }
 
@@ -2237,7 +2253,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
       UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
   LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
   LOperand* vector = NULL;
-  if (FLAG_vector_ics) {
+  if (instr->HasVectorAndSlot()) {
     vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
   }
 
index f9feaf6..1920935 100644 (file)
@@ -166,17 +166,13 @@ class LCodeGen;
   V(WrapReceiver)
 
 
-#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)                        \
-  virtual Opcode opcode() const FINAL OVERRIDE {                      \
-    return LInstruction::k##type;                                           \
-  }                                                                         \
-  virtual void CompileToNative(LCodeGen* generator) FINAL OVERRIDE;   \
-  virtual const char* Mnemonic() const FINAL OVERRIDE {               \
-    return mnemonic;                                                        \
-  }                                                                         \
-  static L##type* cast(LInstruction* instr) {                               \
-    DCHECK(instr->Is##type());                                              \
-    return reinterpret_cast<L##type*>(instr);                               \
+#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)            \
+  Opcode opcode() const FINAL { return LInstruction::k##type; } \
+  void CompileToNative(LCodeGen* generator) FINAL;              \
+  const char* Mnemonic() const FINAL { return mnemonic; }       \
+  static L##type* cast(LInstruction* instr) {                   \
+    DCHECK(instr->Is##type());                                  \
+    return reinterpret_cast<L##type*>(instr);                   \
   }
 
 
@@ -291,11 +287,9 @@ class LTemplateResultInstruction : public LInstruction {
  public:
   // Allow 0 or 1 output operands.
   STATIC_ASSERT(R == 0 || R == 1);
-  virtual bool HasResult() const FINAL OVERRIDE {
-    return R != 0 && result() != NULL;
-  }
+  bool HasResult() const FINAL { return R != 0 && result() != NULL; }
   void set_result(LOperand* operand) { results_[0] = operand; }
-  LOperand* result() const { return results_[0]; }
+  LOperand* result() const OVERRIDE { return results_[0]; }
 
  protected:
   EmbeddedContainer<LOperand*, R> results_;
@@ -313,11 +307,11 @@ class LTemplateInstruction : public LTemplateResultInstruction<R> {
 
  private:
   // Iterator support.
-  virtual int InputCount() FINAL OVERRIDE { return I; }
-  virtual LOperand* InputAt(int i) FINAL OVERRIDE { return inputs_[i]; }
+  int InputCount() FINAL { return I; }
+  LOperand* InputAt(int i) FINAL { return inputs_[i]; }
 
-  virtual int TempCount() FINAL OVERRIDE { return T; }
-  virtual LOperand* TempAt(int i) FINAL OVERRIDE { return temps_[i]; }
+  int TempCount() FINAL { return T; }
+  LOperand* TempAt(int i) FINAL { return temps_[i]; }
 };
 
 
@@ -332,8 +326,8 @@ class LGap : public LTemplateInstruction<0, 0, 0> {
   }
 
   // Can't use the DECLARE-macro here because of sub-classes.
-  virtual bool IsGap() const OVERRIDE { return true; }
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  bool IsGap() const OVERRIDE { return true; }
+  void PrintDataTo(StringStream* stream) OVERRIDE;
   static LGap* cast(LInstruction* instr) {
     DCHECK(instr->IsGap());
     return reinterpret_cast<LGap*>(instr);
@@ -373,7 +367,7 @@ class LInstructionGap FINAL : public LGap {
  public:
   explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
 
-  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
+  bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
     return !IsRedundant();
   }
 
@@ -385,10 +379,10 @@ class LGoto FINAL : public LTemplateInstruction<0, 0, 0> {
  public:
   explicit LGoto(HBasicBlock* block) : block_(block) { }
 
-  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE;
+  bool HasInterestingComment(LCodeGen* gen) const OVERRIDE;
   DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
-  virtual bool IsControl() const OVERRIDE { return true; }
+  void PrintDataTo(StringStream* stream) OVERRIDE;
+  bool IsControl() const OVERRIDE { return true; }
 
   int block_id() const { return block_->block_id(); }
 
@@ -431,7 +425,7 @@ class LDummyUse FINAL : public LTemplateInstruction<1, 1, 0> {
 
 class LDeoptimize FINAL : public LTemplateInstruction<0, 0, 0> {
  public:
-  virtual bool IsControl() const OVERRIDE { return true; }
+  bool IsControl() const OVERRIDE { return true; }
   DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
   DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
 };
@@ -442,12 +436,10 @@ class LLabel FINAL : public LGap {
   explicit LLabel(HBasicBlock* block)
       : LGap(block), replacement_(NULL) { }
 
-  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
-    return false;
-  }
+  bool HasInterestingComment(LCodeGen* gen) const OVERRIDE { return false; }
   DECLARE_CONCRETE_INSTRUCTION(Label, "label")
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int block_id() const { return block()->block_id(); }
   bool is_loop_header() const { return block()->IsLoopHeader(); }
@@ -465,7 +457,7 @@ class LLabel FINAL : public LGap {
 
 class LParameter FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
-  virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
+  bool HasInterestingComment(LCodeGen* gen) const OVERRIDE { return false; }
   DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
 };
 
@@ -484,30 +476,33 @@ class LCallStub FINAL : public LTemplateInstruction<1, 1, 0> {
 
 
 class LTailCallThroughMegamorphicCache FINAL
-    : public LTemplateInstruction<0, 3, 0> {
+    : public LTemplateInstruction<0, 5, 0> {
  public:
-  explicit LTailCallThroughMegamorphicCache(LOperand* context,
-                                            LOperand* receiver,
-                                            LOperand* name) {
+  LTailCallThroughMegamorphicCache(LOperand* context, LOperand* receiver,
+                                   LOperand* name, LOperand* slot,
+                                   LOperand* vector) {
     inputs_[0] = context;
     inputs_[1] = receiver;
     inputs_[2] = name;
+    inputs_[3] = slot;
+    inputs_[4] = vector;
   }
 
   LOperand* context() { return inputs_[0]; }
   LOperand* receiver() { return inputs_[1]; }
   LOperand* name() { return inputs_[2]; }
+  LOperand* slot() { return inputs_[3]; }
+  LOperand* vector() { return inputs_[4]; }
 
   DECLARE_CONCRETE_INSTRUCTION(TailCallThroughMegamorphicCache,
                                "tail-call-through-megamorphic-cache")
   DECLARE_HYDROGEN_ACCESSOR(TailCallThroughMegamorphicCache)
 };
 
+
 class LUnknownOSRValue FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
-  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
-    return false;
-  }
+  bool HasInterestingComment(LCodeGen* gen) const OVERRIDE { return false; }
   DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
 };
 
@@ -517,7 +512,7 @@ class LControlInstruction : public LTemplateInstruction<0, I, T> {
  public:
   LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
 
-  virtual bool IsControl() const FINAL OVERRIDE { return true; }
+  bool IsControl() const FINAL { return true; }
 
   int SuccessorCount() { return hydrogen()->SuccessorCount(); }
   HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
@@ -606,7 +601,7 @@ class LAccessArgumentsAt FINAL : public LTemplateInstruction<1, 3, 0> {
   LOperand* length() { return inputs_[1]; }
   LOperand* index() { return inputs_[2]; }
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -869,7 +864,7 @@ class LCompareNumericAndBranch FINAL : public LControlInstruction<2, 0> {
     return hydrogen()->representation().IsDouble();
   }
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1053,7 +1048,7 @@ class LIsObjectAndBranch FINAL : public LControlInstruction<1, 1> {
   DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1070,7 +1065,7 @@ class LIsStringAndBranch FINAL : public LControlInstruction<1, 1> {
   DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1085,7 +1080,7 @@ class LIsSmiAndBranch FINAL : public LControlInstruction<1, 0> {
   DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1103,7 +1098,7 @@ class LIsUndetectableAndBranch FINAL : public LControlInstruction<1, 1> {
                                "is-undetectable-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1125,7 +1120,7 @@ class LStringCompareAndBranch FINAL : public LControlInstruction<3, 0> {
 
   Token::Value op() const { return hydrogen()->token(); }
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1141,7 +1136,7 @@ class LHasInstanceTypeAndBranch FINAL : public LControlInstruction<1, 0> {
                                "has-instance-type-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1171,7 +1166,7 @@ class LHasCachedArrayIndexAndBranch FINAL
                                "has-cached-array-index-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1189,7 +1184,7 @@ class LClassOfTestAndBranch FINAL : public LControlInstruction<1, 1> {
                                "class-of-test-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1401,7 +1396,7 @@ class LBranch FINAL : public LControlInstruction<1, 0> {
   DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
   DECLARE_HYDROGEN_ACCESSOR(Branch)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1546,11 +1541,9 @@ class LArithmeticD FINAL : public LTemplateInstruction<1, 2, 0> {
   LOperand* left() { return inputs_[0]; }
   LOperand* right() { return inputs_[1]; }
 
-  virtual Opcode opcode() const OVERRIDE {
-    return LInstruction::kArithmeticD;
-  }
-  virtual void CompileToNative(LCodeGen* generator) OVERRIDE;
-  virtual const char* Mnemonic() const OVERRIDE;
+  Opcode opcode() const OVERRIDE { return LInstruction::kArithmeticD; }
+  void CompileToNative(LCodeGen* generator) OVERRIDE;
+  const char* Mnemonic() const OVERRIDE;
 
  private:
   Token::Value op_;
@@ -1574,11 +1567,9 @@ class LArithmeticT FINAL : public LTemplateInstruction<1, 3, 0> {
   LOperand* right() { return inputs_[2]; }
   Token::Value op() const { return op_; }
 
-  virtual Opcode opcode() const OVERRIDE {
-    return LInstruction::kArithmeticT;
-  }
-  virtual void CompileToNative(LCodeGen* generator) OVERRIDE;
-  virtual const char* Mnemonic() const OVERRIDE;
+  Opcode opcode() const OVERRIDE { return LInstruction::kArithmeticT; }
+  void CompileToNative(LCodeGen* generator) OVERRIDE;
+  const char* Mnemonic() const OVERRIDE;
 
  private:
   Token::Value op_;
@@ -1687,7 +1678,7 @@ class LLoadKeyed FINAL : public LTemplateInstruction<1, 2, 0> {
   DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
   DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
   uint32_t base_offset() const { return hydrogen()->base_offset(); }
 };
 
@@ -1768,7 +1759,7 @@ class LLoadContextSlot FINAL : public LTemplateInstruction<1, 1, 0> {
 
   int slot_index() { return hydrogen()->slot_index(); }
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1787,7 +1778,7 @@ class LStoreContextSlot FINAL : public LTemplateInstruction<0, 2, 0> {
 
   int slot_index() { return hydrogen()->slot_index(); }
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1826,7 +1817,7 @@ class LStoreCodeEntry FINAL: public LTemplateInstruction<0, 2, 0> {
   LOperand* function() { return inputs_[0]; }
   LOperand* code_object() { return inputs_[1]; }
 
-  virtual void PrintDataTo(StringStream* stream);
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry")
   DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry)
@@ -1843,7 +1834,7 @@ class LInnerAllocatedObject FINAL: public LTemplateInstruction<1, 2, 0> {
   LOperand* base_object() const { return inputs_[0]; }
   LOperand* offset() const { return inputs_[1]; }
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object")
 };
@@ -1887,7 +1878,7 @@ class LCallJSFunction FINAL : public LTemplateInstruction<1, 1, 0> {
   DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
   DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int arity() const { return hydrogen()->argument_count() - 1; }
 };
@@ -1907,11 +1898,12 @@ class LCallWithDescriptor FINAL : public LTemplateResultInstruction<1> {
 
   const CallInterfaceDescriptor descriptor() { return descriptor_; }
 
+  DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
+
  private:
   DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
-  DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int arity() const { return hydrogen()->argument_count() - 1; }
 
@@ -1919,11 +1911,11 @@ class LCallWithDescriptor FINAL : public LTemplateResultInstruction<1> {
   ZoneList<LOperand*> inputs_;
 
   // Iterator support.
-  virtual int InputCount() FINAL OVERRIDE { return inputs_.length(); }
-  virtual LOperand* InputAt(int i) FINAL OVERRIDE { return inputs_[i]; }
+  int InputCount() FINAL { return inputs_.length(); }
+  LOperand* InputAt(int i) FINAL { return inputs_[i]; }
 
-  virtual int TempCount() FINAL OVERRIDE { return 0; }
-  virtual LOperand* TempAt(int i) FINAL OVERRIDE { return NULL; }
+  int TempCount() FINAL { return 0; }
+  LOperand* TempAt(int i) FINAL { return NULL; }
 };
 
 
@@ -1940,7 +1932,7 @@ class LInvokeFunction FINAL : public LTemplateInstruction<1, 2, 0> {
   DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
   DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int arity() const { return hydrogen()->argument_count() - 1; }
 };
@@ -1976,7 +1968,7 @@ class LCallNew FINAL : public LTemplateInstruction<1, 2, 0> {
   DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
   DECLARE_HYDROGEN_ACCESSOR(CallNew)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int arity() const { return hydrogen()->argument_count() - 1; }
 };
@@ -1995,7 +1987,7 @@ class LCallNewArray FINAL : public LTemplateInstruction<1, 2, 0> {
   DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
   DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int arity() const { return hydrogen()->argument_count() - 1; }
 };
@@ -2012,7 +2004,7 @@ class LCallRuntime FINAL : public LTemplateInstruction<1, 1, 0> {
   DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
   DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
 
-  virtual bool ClobbersDoubleRegisters(Isolate* isolate) const OVERRIDE {
+  bool ClobbersDoubleRegisters(Isolate* isolate) const OVERRIDE {
     return save_doubles() == kDontSaveFPRegs;
   }
 
@@ -2206,7 +2198,7 @@ class LStoreNamedField FINAL : public LTemplateInstruction<0, 2, 1> {
   DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
   DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   Representation representation() const {
     return hydrogen()->field_representation();
@@ -2229,7 +2221,7 @@ class LStoreNamedGeneric FINAL : public LTemplateInstruction<0, 3, 0> {
   DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
   DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   Handle<Object> name() const { return hydrogen()->name(); }
   StrictMode strict_mode() { return hydrogen()->strict_mode(); }
@@ -2261,7 +2253,7 @@ class LStoreKeyed FINAL : public LTemplateInstruction<0, 3, 0> {
   DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
   DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
   bool NeedsCanonicalization() {
     if (hydrogen()->value()->IsAdd() || hydrogen()->value()->IsSub() ||
         hydrogen()->value()->IsMul() || hydrogen()->value()->IsDiv()) {
@@ -2293,7 +2285,7 @@ class LStoreKeyedGeneric FINAL : public LTemplateInstruction<0, 4, 0> {
   DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
   DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   StrictMode strict_mode() { return hydrogen()->strict_mode(); }
 };
@@ -2317,7 +2309,7 @@ class LTransitionElementsKind FINAL : public LTemplateInstruction<0, 2, 1> {
                                "transition-elements-kind")
   DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
   Handle<Map> transitioned_map() {
@@ -2611,7 +2603,7 @@ class LTypeofIsAndBranch FINAL : public LControlInstruction<1, 0> {
 
   Handle<String> type_literal() { return hydrogen()->type_literal(); }
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -2632,9 +2624,7 @@ class LOsrEntry FINAL : public LTemplateInstruction<0, 0, 0> {
  public:
   LOsrEntry() {}
 
-  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
-    return false;
-  }
+  bool HasInterestingComment(LCodeGen* gen) const OVERRIDE { return false; }
   DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
 };
 
@@ -2839,7 +2829,7 @@ class LChunkBuilder FINAL : public LChunkBuilderBase {
 
   // An input operand in register, stack slot or a constant operand.
   // Will not be moved to a register even if one is freely available.
-  virtual MUST_USE_RESULT LOperand* UseAny(HValue* value) OVERRIDE;
+  MUST_USE_RESULT LOperand* UseAny(HValue* value) OVERRIDE;
 
   // Temporary operand that must be in a register.
   MUST_USE_RESULT LUnallocated* TempRegister();
index e945a13..9d9591b 100644 (file)
@@ -27,9 +27,9 @@ class SafepointGenerator FINAL : public CallWrapper {
         deopt_mode_(mode) { }
   virtual ~SafepointGenerator() {}
 
-  virtual void BeforeCall(int call_size) const OVERRIDE {}
+  void BeforeCall(int call_size) const OVERRIDE {}
 
-  virtual void AfterCall() const OVERRIDE {
+  void AfterCall() const OVERRIDE {
     codegen_->RecordSafepoint(pointers_, deopt_mode_);
   }
 
@@ -2785,11 +2785,11 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
     DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
                                   LInstanceOfKnownGlobal* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() OVERRIDE {
+    void Generate() OVERRIDE {
       codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_,
                                                  &load_bool_);
     }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    LInstruction* instr() OVERRIDE { return instr_; }
     Label* map_check() { return &map_check_; }
     Label* load_bool() { return &load_bool_; }
 
@@ -2964,6 +2964,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
         __ add(sp, sp, Operand(sp_delta));
       }
     } else {
+      DCHECK(info()->IsStub());  // Functions would need to drop one more value.
       Register reg = ToRegister(instr->parameter_count());
       // The argument count parameter is a smi
       __ SmiUntag(reg);
@@ -2995,13 +2996,17 @@ template <class T>
 void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
   DCHECK(FLAG_vector_ics);
   Register vector_register = ToRegister(instr->temp_vector());
+  Register slot_register = VectorLoadICDescriptor::SlotRegister();
   DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister()));
+  DCHECK(slot_register.is(r0));
+
+  AllowDeferredHandleDereference vector_structure_check;
   Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
   __ Move(vector_register, vector);
   // No need to allocate this register.
-  DCHECK(VectorLoadICDescriptor::SlotRegister().is(r0));
-  int index = vector->GetIndex(instr->hydrogen()->slot());
-  __ mov(VectorLoadICDescriptor::SlotRegister(), Operand(Smi::FromInt(index)));
+  FeedbackVectorICSlot slot = instr->hydrogen()->slot();
+  int index = vector->GetIndex(slot);
+  __ mov(slot_register, Operand(Smi::FromInt(index)));
 }
 
 
@@ -3760,10 +3765,11 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) {
    public:
     DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() OVERRIDE {
+    void Generate() OVERRIDE {
       codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
     }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
    private:
     LMathAbs* instr_;
   };
@@ -3982,54 +3988,91 @@ void LCodeGen::DoTailCallThroughMegamorphicCache(
   DCHECK(name.is(LoadDescriptor::NameRegister()));
   DCHECK(receiver.is(r1));
   DCHECK(name.is(r2));
+  Register scratch = r4;
+  Register extra = r5;
+  Register extra2 = r6;
+  Register extra3 = r9;
 
-  Register scratch = r3;
-  Register extra = r4;
-  Register extra2 = r5;
-  Register extra3 = r6;
+#ifdef DEBUG
+  Register slot = FLAG_vector_ics ? ToRegister(instr->slot()) : no_reg;
+  Register vector = FLAG_vector_ics ? ToRegister(instr->vector()) : no_reg;
+  DCHECK(!FLAG_vector_ics ||
+         !AreAliased(slot, vector, scratch, extra, extra2, extra3));
+#endif
 
   // Important for the tail-call.
   bool must_teardown_frame = NeedsEagerFrame();
 
-  // The probe will tail call to a handler if found.
-  isolate()->stub_cache()->GenerateProbe(masm(), instr->hydrogen()->flags(),
-                                         must_teardown_frame, receiver, name,
-                                         scratch, extra, extra2, extra3);
+  if (!instr->hydrogen()->is_just_miss()) {
+    DCHECK(!instr->hydrogen()->is_keyed_load());
+
+    // The probe will tail call to a handler if found.
+    isolate()->stub_cache()->GenerateProbe(
+        masm(), Code::LOAD_IC, instr->hydrogen()->flags(), must_teardown_frame,
+        receiver, name, scratch, extra, extra2, extra3);
+  }
 
   // Tail call to miss if we ended up here.
   if (must_teardown_frame) __ LeaveFrame(StackFrame::INTERNAL);
-  LoadIC::GenerateMiss(masm());
+  if (instr->hydrogen()->is_keyed_load()) {
+    KeyedLoadIC::GenerateMiss(masm());
+  } else {
+    LoadIC::GenerateMiss(masm());
+  }
 }
 
 
 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
   DCHECK(ToRegister(instr->result()).is(r0));
 
-  LPointerMap* pointers = instr->pointer_map();
-  SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
-
-  if (instr->target()->IsConstantOperand()) {
-    LConstantOperand* target = LConstantOperand::cast(instr->target());
-    Handle<Code> code = Handle<Code>::cast(ToHandle(target));
-    generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
-    PlatformInterfaceDescriptor* call_descriptor =
-        instr->descriptor().platform_specific_descriptor();
-    __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None(), al,
-            call_descriptor->storage_mode());
+  if (instr->hydrogen()->IsTailCall()) {
+    if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL);
+
+    if (instr->target()->IsConstantOperand()) {
+      LConstantOperand* target = LConstantOperand::cast(instr->target());
+      Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+      __ Jump(code, RelocInfo::CODE_TARGET);
+    } else {
+      DCHECK(instr->target()->IsRegister());
+      Register target = ToRegister(instr->target());
+      // Make sure we don't emit any additional entries in the constant pool
+      // before the call to ensure that the CallCodeSize() calculated the
+      // correct
+      // number of instructions for the constant pool load.
+      {
+        ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
+        __ add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
+      }
+      __ Jump(target);
+    }
   } else {
-    DCHECK(instr->target()->IsRegister());
-    Register target = ToRegister(instr->target());
-    generator.BeforeCall(__ CallSize(target));
-    // Make sure we don't emit any additional entries in the constant pool
-    // before the call to ensure that the CallCodeSize() calculated the correct
-    // number of instructions for the constant pool load.
-    {
-      ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
-      __ add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
+    LPointerMap* pointers = instr->pointer_map();
+    SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+
+    if (instr->target()->IsConstantOperand()) {
+      LConstantOperand* target = LConstantOperand::cast(instr->target());
+      Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+      generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
+      PlatformInterfaceDescriptor* call_descriptor =
+          instr->descriptor().platform_specific_descriptor();
+      __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None(), al,
+              call_descriptor->storage_mode());
+    } else {
+      DCHECK(instr->target()->IsRegister());
+      Register target = ToRegister(instr->target());
+      generator.BeforeCall(__ CallSize(target));
+      // Make sure we don't emit any additional entries in the constant pool
+      // before the call to ensure that the CallCodeSize() calculated the
+      // correct
+      // number of instructions for the constant pool load.
+      {
+        ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
+        __ add(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
+      }
+      __ Call(target);
     }
-    __ Call(target);
+    generator.AfterCall();
   }
-  generator.AfterCall();
 }
 
 
@@ -4529,10 +4572,9 @@ void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
    public:
     DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() OVERRIDE {
-      codegen()->DoDeferredStringCharCodeAt(instr_);
-    }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    void Generate() OVERRIDE { codegen()->DoDeferredStringCharCodeAt(instr_); }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
    private:
     LStringCharCodeAt* instr_;
   };
@@ -4585,10 +4627,11 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
    public:
     DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() OVERRIDE {
+    void Generate() OVERRIDE {
       codegen()->DoDeferredStringCharFromCode(instr_);
     }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
    private:
     LStringCharFromCode* instr_;
   };
@@ -4662,14 +4705,15 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
    public:
     DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() OVERRIDE {
+    void Generate() OVERRIDE {
       codegen()->DoDeferredNumberTagIU(instr_,
                                        instr_->value(),
                                        instr_->temp1(),
                                        instr_->temp2(),
                                        SIGNED_INT32);
     }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
    private:
     LNumberTagI* instr_;
   };
@@ -4689,14 +4733,15 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
    public:
     DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() OVERRIDE {
+    void Generate() OVERRIDE {
       codegen()->DoDeferredNumberTagIU(instr_,
                                        instr_->value(),
                                        instr_->temp1(),
                                        instr_->temp2(),
                                        UNSIGNED_INT32);
     }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
    private:
     LNumberTagU* instr_;
   };
@@ -4783,10 +4828,9 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
    public:
     DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() OVERRIDE {
-      codegen()->DoDeferredNumberTagD(instr_);
-    }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    void Generate() OVERRIDE { codegen()->DoDeferredNumberTagD(instr_); }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
    private:
     LNumberTagD* instr_;
   };
@@ -5002,10 +5046,9 @@ void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
    public:
     DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() OVERRIDE {
-      codegen()->DoDeferredTaggedToI(instr_);
-    }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    void Generate() OVERRIDE { codegen()->DoDeferredTaggedToI(instr_); }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
    private:
     LTaggedToI* instr_;
   };
@@ -5199,11 +5242,12 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
         : LDeferredCode(codegen), instr_(instr), object_(object) {
       SetExit(check_maps());
     }
-    virtual void Generate() OVERRIDE {
+    void Generate() OVERRIDE {
       codegen()->DoDeferredInstanceMigration(instr_, object_);
     }
     Label* check_maps() { return &check_maps_; }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
    private:
     LCheckMaps* instr_;
     Label check_maps_;
@@ -5327,10 +5371,9 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
    public:
     DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() OVERRIDE {
-      codegen()->DoDeferredAllocate(instr_);
-    }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    void Generate() OVERRIDE { codegen()->DoDeferredAllocate(instr_); }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
    private:
     LAllocate* instr_;
   };
@@ -5692,10 +5735,9 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
    public:
     DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() OVERRIDE {
-      codegen()->DoDeferredStackCheck(instr_);
-    }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    void Generate() OVERRIDE { codegen()->DoDeferredStackCheck(instr_); }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
    private:
     LStackCheck* instr_;
   };
@@ -5848,10 +5890,11 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
           object_(object),
           index_(index) {
     }
-    virtual void Generate() OVERRIDE {
+    void Generate() OVERRIDE {
       codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
     }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
    private:
     LLoadFieldByIndex* instr_;
     Register result_;
index 9294a8c..0aa886b 100644 (file)
@@ -1699,11 +1699,12 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
   }
 
   bind(&done);
-  // Check that the value is a normal property.
+  // Check that the value is a field property.
   // t2: elements + (index * kPointerSize)
   const int kDetailsOffset =
       SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
   ldr(t1, FieldMemOperand(t2, kDetailsOffset));
+  DCHECK_EQ(FIELD, 0);
   tst(t1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
   b(ne, miss);
 
@@ -2251,23 +2252,37 @@ void MacroAssembler::CheckMap(Register obj,
 }
 
 
-void MacroAssembler::DispatchMap(Register obj,
-                                 Register scratch,
-                                 Handle<Map> map,
-                                 Handle<Code> success,
-                                 SmiCheckType smi_check_type) {
+void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
+                                     Register scratch2, Handle<WeakCell> cell,
+                                     Handle<Code> success,
+                                     SmiCheckType smi_check_type) {
   Label fail;
   if (smi_check_type == DO_SMI_CHECK) {
     JumpIfSmi(obj, &fail);
   }
-  ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
-  mov(ip, Operand(map));
-  cmp(scratch, ip);
+  ldr(scratch1, FieldMemOperand(obj, HeapObject::kMapOffset));
+  CmpWeakValue(scratch1, cell, scratch2);
   Jump(success, RelocInfo::CODE_TARGET, eq);
   bind(&fail);
 }
 
 
+void MacroAssembler::CmpWeakValue(Register value, Handle<WeakCell> cell,
+                                  Register scratch) {
+  mov(scratch, Operand(cell));
+  ldr(scratch, FieldMemOperand(scratch, WeakCell::kValueOffset));
+  cmp(value, scratch);
+}
+
+
+void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
+                                   Label* miss) {
+  mov(value, Operand(cell));
+  ldr(value, FieldMemOperand(value, WeakCell::kValueOffset));
+  JumpIfSmi(value, miss);
+}
+
+
 void MacroAssembler::TryGetFunctionPrototype(Register function,
                                              Register result,
                                              Register scratch,
@@ -3653,18 +3668,6 @@ void MacroAssembler::CheckPageFlag(
 }
 
 
-void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
-                                        Register scratch,
-                                        Label* if_deprecated) {
-  if (map->CanBeDeprecated()) {
-    mov(scratch, Operand(map));
-    ldr(scratch, FieldMemOperand(scratch, Map::kBitField3Offset));
-    tst(scratch, Operand(Map::Deprecated::kMask));
-    b(ne, if_deprecated);
-  }
-}
-
-
 void MacroAssembler::JumpIfBlack(Register object,
                                  Register scratch0,
                                  Register scratch1,
index 79d26f2..d83b64b 100644 (file)
@@ -200,10 +200,6 @@ class MacroAssembler: public Assembler {
                      Condition cc,
                      Label* condition_met);
 
-  void CheckMapDeprecated(Handle<Map> map,
-                          Register scratch,
-                          Label* if_deprecated);
-
   // Check if object is in new space.  Jumps if the object is not in new space.
   // The register scratch can be object itself, but scratch will be clobbered.
   void JumpIfNotInNewSpace(Register object,
@@ -918,15 +914,19 @@ class MacroAssembler: public Assembler {
                 SmiCheckType smi_check_type);
 
 
-  // Check if the map of an object is equal to a specified map and branch to a
-  // specified target if equal. Skip the smi check if not required (object is
-  // known to be a heap object)
-  void DispatchMap(Register obj,
-                   Register scratch,
-                   Handle<Map> map,
-                   Handle<Code> success,
-                   SmiCheckType smi_check_type);
+  // Check if the map of an object is equal to a specified weak map and branch
+  // to a specified target if equal. Skip the smi check if not required
+  // (object is known to be a heap object)
+  void DispatchWeakMap(Register obj, Register scratch1, Register scratch2,
+                       Handle<WeakCell> cell, Handle<Code> success,
+                       SmiCheckType smi_check_type);
+
+  // Compare the given value and the value of weak cell.
+  void CmpWeakValue(Register value, Handle<WeakCell> cell, Register scratch);
 
+  // Load the value of the weak cell in the value register. Branch to the given
+  // miss label if the weak cell was cleared.
+  void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
 
   // Compare the object in a register to a value from the root list.
   // Uses the ip register as scratch.
index 972fd07..e34c311 100644 (file)
@@ -2629,7 +2629,89 @@ void Simulator::DecodeType3(Instruction* instr) {
               UNIMPLEMENTED();
               break;
             case 1:
-              UNIMPLEMENTED();
+              if (instr->Bits(9, 6) == 1) {
+                if (instr->Bit(20) == 0) {
+                  if (instr->Bits(19, 16) == 0xF) {
+                    // Sxtb.
+                    int32_t rm_val = get_register(instr->RmValue());
+                    int32_t rotate = instr->Bits(11, 10);
+                    switch (rotate) {
+                      case 0:
+                        break;
+                      case 1:
+                        rm_val = (rm_val >> 8) | (rm_val << 24);
+                        break;
+                      case 2:
+                        rm_val = (rm_val >> 16) | (rm_val << 16);
+                        break;
+                      case 3:
+                        rm_val = (rm_val >> 24) | (rm_val << 8);
+                        break;
+                    }
+                    set_register(rd, static_cast<int8_t>(rm_val));
+                  } else {
+                    // Sxtab.
+                    int32_t rn_val = get_register(rn);
+                    int32_t rm_val = get_register(instr->RmValue());
+                    int32_t rotate = instr->Bits(11, 10);
+                    switch (rotate) {
+                      case 0:
+                        break;
+                      case 1:
+                        rm_val = (rm_val >> 8) | (rm_val << 24);
+                        break;
+                      case 2:
+                        rm_val = (rm_val >> 16) | (rm_val << 16);
+                        break;
+                      case 3:
+                        rm_val = (rm_val >> 24) | (rm_val << 8);
+                        break;
+                    }
+                    set_register(rd, rn_val + static_cast<int8_t>(rm_val));
+                  }
+                } else {
+                  if (instr->Bits(19, 16) == 0xF) {
+                    // Sxth.
+                    int32_t rm_val = get_register(instr->RmValue());
+                    int32_t rotate = instr->Bits(11, 10);
+                    switch (rotate) {
+                      case 0:
+                        break;
+                      case 1:
+                        rm_val = (rm_val >> 8) | (rm_val << 24);
+                        break;
+                      case 2:
+                        rm_val = (rm_val >> 16) | (rm_val << 16);
+                        break;
+                      case 3:
+                        rm_val = (rm_val >> 24) | (rm_val << 8);
+                        break;
+                    }
+                    set_register(rd, static_cast<int16_t>(rm_val));
+                  } else {
+                    // Sxtah.
+                    int32_t rn_val = get_register(rn);
+                    int32_t rm_val = get_register(instr->RmValue());
+                    int32_t rotate = instr->Bits(11, 10);
+                    switch (rotate) {
+                      case 0:
+                        break;
+                      case 1:
+                        rm_val = (rm_val >> 8) | (rm_val << 24);
+                        break;
+                      case 2:
+                        rm_val = (rm_val >> 16) | (rm_val << 16);
+                        break;
+                      case 3:
+                        rm_val = (rm_val >> 24) | (rm_val << 8);
+                        break;
+                    }
+                    set_register(rd, rn_val + static_cast<int16_t>(rm_val));
+                  }
+                }
+              } else {
+                UNREACHABLE();
+              }
               break;
             case 2:
               if ((instr->Bit(20) == 0) && (instr->Bits(9, 6) == 1)) {
@@ -2650,8 +2732,7 @@ void Simulator::DecodeType3(Instruction* instr) {
                       rm_val = (rm_val >> 24) | (rm_val << 8);
                       break;
                   }
-                  set_register(rd,
-                               (rm_val & 0xFF) | (rm_val & 0xFF0000));
+                  set_register(rd, (rm_val & 0xFF) | (rm_val & 0xFF0000));
                 } else {
                   UNIMPLEMENTED();
                 }
@@ -2660,44 +2741,85 @@ void Simulator::DecodeType3(Instruction* instr) {
               }
               break;
             case 3:
-              if ((instr->Bit(20) == 0) && (instr->Bits(9, 6) == 1)) {
-                if (instr->Bits(19, 16) == 0xF) {
-                  // Uxtb.
-                  uint32_t rm_val = get_register(instr->RmValue());
-                  int32_t rotate = instr->Bits(11, 10);
-                  switch (rotate) {
-                    case 0:
-                      break;
-                    case 1:
-                      rm_val = (rm_val >> 8) | (rm_val << 24);
-                      break;
-                    case 2:
-                      rm_val = (rm_val >> 16) | (rm_val << 16);
-                      break;
-                    case 3:
-                      rm_val = (rm_val >> 24) | (rm_val << 8);
-                      break;
+              if ((instr->Bits(9, 6) == 1)) {
+                if (instr->Bit(20) == 0) {
+                  if (instr->Bits(19, 16) == 0xF) {
+                    // Uxtb.
+                    uint32_t rm_val = get_register(instr->RmValue());
+                    int32_t rotate = instr->Bits(11, 10);
+                    switch (rotate) {
+                      case 0:
+                        break;
+                      case 1:
+                        rm_val = (rm_val >> 8) | (rm_val << 24);
+                        break;
+                      case 2:
+                        rm_val = (rm_val >> 16) | (rm_val << 16);
+                        break;
+                      case 3:
+                        rm_val = (rm_val >> 24) | (rm_val << 8);
+                        break;
+                    }
+                    set_register(rd, (rm_val & 0xFF));
+                  } else {
+                    // Uxtab.
+                    uint32_t rn_val = get_register(rn);
+                    uint32_t rm_val = get_register(instr->RmValue());
+                    int32_t rotate = instr->Bits(11, 10);
+                    switch (rotate) {
+                      case 0:
+                        break;
+                      case 1:
+                        rm_val = (rm_val >> 8) | (rm_val << 24);
+                        break;
+                      case 2:
+                        rm_val = (rm_val >> 16) | (rm_val << 16);
+                        break;
+                      case 3:
+                        rm_val = (rm_val >> 24) | (rm_val << 8);
+                        break;
+                    }
+                    set_register(rd, rn_val + (rm_val & 0xFF));
                   }
-                  set_register(rd, (rm_val & 0xFF));
                 } else {
-                  // Uxtab.
-                  uint32_t rn_val = get_register(rn);
-                  uint32_t rm_val = get_register(instr->RmValue());
-                  int32_t rotate = instr->Bits(11, 10);
-                  switch (rotate) {
-                    case 0:
-                      break;
-                    case 1:
-                      rm_val = (rm_val >> 8) | (rm_val << 24);
-                      break;
-                    case 2:
-                      rm_val = (rm_val >> 16) | (rm_val << 16);
-                      break;
-                    case 3:
-                      rm_val = (rm_val >> 24) | (rm_val << 8);
-                      break;
+                  if (instr->Bits(19, 16) == 0xF) {
+                    // Uxth.
+                    uint32_t rm_val = get_register(instr->RmValue());
+                    int32_t rotate = instr->Bits(11, 10);
+                    switch (rotate) {
+                      case 0:
+                        break;
+                      case 1:
+                        rm_val = (rm_val >> 8) | (rm_val << 24);
+                        break;
+                      case 2:
+                        rm_val = (rm_val >> 16) | (rm_val << 16);
+                        break;
+                      case 3:
+                        rm_val = (rm_val >> 24) | (rm_val << 8);
+                        break;
+                    }
+                    set_register(rd, (rm_val & 0xFFFF));
+                  } else {
+                    // Uxtah.
+                    uint32_t rn_val = get_register(rn);
+                    uint32_t rm_val = get_register(instr->RmValue());
+                    int32_t rotate = instr->Bits(11, 10);
+                    switch (rotate) {
+                      case 0:
+                        break;
+                      case 1:
+                        rm_val = (rm_val >> 8) | (rm_val << 24);
+                        break;
+                      case 2:
+                        rm_val = (rm_val >> 16) | (rm_val << 16);
+                        break;
+                      case 3:
+                        rm_val = (rm_val >> 24) | (rm_val << 8);
+                        break;
+                    }
+                    set_register(rd, rn_val + (rm_val & 0xFFFF));
                   }
-                  set_register(rd, rn_val + (rm_val & 0xFF));
                 }
               } else {
                 UNIMPLEMENTED();
index 5e1bed1..4547efe 100644 (file)
@@ -503,7 +503,7 @@ MemOperand::MemOperand(Register base, const Operand& offset, AddrMode addrmode)
     DCHECK(addrmode == Offset);
 
     regoffset_ = offset.reg();
-    shift_= offset.shift();
+    shift_ = offset.shift();
     shift_amount_ = offset.shift_amount();
 
     extend_ = NO_EXTEND;
@@ -520,7 +520,7 @@ MemOperand::MemOperand(Register base, const Operand& offset, AddrMode addrmode)
     extend_ = offset.extend();
     shift_amount_ = offset.shift_amount();
 
-    shift_= NO_SHIFT;
+    shift_ = NO_SHIFT;
     offset_ = 0;
 
     // These assertions match those in the extended-register constructor.
index 076e143..770d425 100644 (file)
@@ -44,22 +44,27 @@ namespace internal {
 // CpuFeatures implementation.
 
 void CpuFeatures::ProbeImpl(bool cross_compile) {
-  if (cross_compile) {
-    // Always align csp in cross compiled code - this is safe and ensures that
-    // csp will always be aligned if it is enabled by probing at runtime.
-    if (FLAG_enable_always_align_csp) supported_ |= 1u << ALWAYS_ALIGN_CSP;
-  } else {
-    base::CPU cpu;
-    if (FLAG_enable_always_align_csp &&
-        (cpu.implementer() == base::CPU::NVIDIA || FLAG_debug_code)) {
-      supported_ |= 1u << ALWAYS_ALIGN_CSP;
-    }
+  // AArch64 has no configuration options, no further probing is required.
+  supported_ = 0;
+
+  // Only use statically determined features for cross compile (snapshot).
+  if (cross_compile) return;
+
+  // Probe for runtime features
+  base::CPU cpu;
+  if (cpu.implementer() == base::CPU::NVIDIA &&
+      cpu.variant() == base::CPU::NVIDIA_DENVER) {
+    supported_ |= 1u << COHERENT_CACHE;
   }
 }
 
 
 void CpuFeatures::PrintTarget() { }
-void CpuFeatures::PrintFeatures() { }
+
+
+void CpuFeatures::PrintFeatures() {
+  printf("COHERENT_CACHE=%d\n", CpuFeatures::IsSupported(COHERENT_CACHE));
+}
 
 
 // -----------------------------------------------------------------------------
@@ -612,9 +617,12 @@ void Assembler::Align(int m) {
 void Assembler::CheckLabelLinkChain(Label const * label) {
 #ifdef DEBUG
   if (label->is_linked()) {
+    static const int kMaxLinksToCheck = 64;  // Avoid O(n2) behaviour.
+    int links_checked = 0;
     int linkoffset = label->pos();
     bool end_of_chain = false;
     while (!end_of_chain) {
+      if (++links_checked > kMaxLinksToCheck) break;
       Instruction * link = InstructionAt(linkoffset);
       int linkpcoffset = link->ImmPCOffset();
       int prevlinkoffset = linkoffset + linkpcoffset;
index 74535ba..9f140c2 100644 (file)
@@ -367,13 +367,13 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
             FieldMemOperand(init_map, Map::kBitField3Offset);
         // Check if slack tracking is enabled.
         __ Ldr(x4, bit_field3);
-        __ DecodeField<Map::ConstructionCount>(constructon_count, x4);
-        __ Cmp(constructon_count, Operand(JSFunction::kNoSlackTracking));
-        __ B(eq, &allocate);
+        __ DecodeField<Map::Counter>(constructon_count, x4);
+        __ Cmp(constructon_count, Operand(Map::kSlackTrackingCounterEnd));
+        __ B(lt, &allocate);
         // Decrease generous allocation count.
-        __ Subs(x4, x4, Operand(1 << Map::ConstructionCount::kShift));
+        __ Subs(x4, x4, Operand(1 << Map::Counter::kShift));
         __ Str(x4, bit_field3);
-        __ Cmp(constructon_count, Operand(JSFunction::kFinishSlackTracking));
+        __ Cmp(constructon_count, Operand(Map::kSlackTrackingCounterEnd));
         __ B(ne, &allocate);
 
         // Push the constructor and map to the stack, and the constructor again
@@ -381,7 +381,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
         __ Push(constructor, init_map, constructor);
         __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
         __ Pop(init_map, constructor);
-        __ Mov(constructon_count, Operand(JSFunction::kNoSlackTracking));
+        __ Mov(constructon_count, Operand(Map::kSlackTrackingCounterEnd - 1));
         __ Bind(&allocate);
       }
 
@@ -434,8 +434,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
         Label no_inobject_slack_tracking;
 
         // Check if slack tracking is enabled.
-        __ Cmp(constructon_count, Operand(JSFunction::kNoSlackTracking));
-        __ B(eq, &no_inobject_slack_tracking);
+        __ Cmp(constructon_count, Operand(Map::kSlackTrackingCounterEnd));
+        __ B(lt, &no_inobject_slack_tracking);
         constructon_count = NoReg;
 
         // Fill the pre-allocated fields with undef.
index 6583775..e773b53 100644 (file)
@@ -1412,6 +1412,11 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
 void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
   Label miss;
   Register receiver = LoadDescriptor::ReceiverRegister();
+  // Ensure that the vector and slot registers won't be clobbered before
+  // calling the miss handler.
+  DCHECK(!FLAG_vector_ics ||
+         !AreAliased(x10, x11, VectorLoadICDescriptor::VectorRegister(),
+                     VectorLoadICDescriptor::SlotRegister()));
 
   NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, x10,
                                                           x11, &miss);
@@ -1429,9 +1434,15 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
   Register receiver = LoadDescriptor::ReceiverRegister();
   Register index = LoadDescriptor::NameRegister();
   Register result = x0;
-  Register scratch = x3;
+  Register scratch = x10;
   DCHECK(!scratch.is(receiver) && !scratch.is(index));
+  DCHECK(!FLAG_vector_ics ||
+         (!scratch.is(VectorLoadICDescriptor::VectorRegister()) &&
+          result.is(VectorLoadICDescriptor::SlotRegister())));
 
+  // StringCharAtGenerator doesn't use the result register until it's passed
+  // the different miss possibilities. If it did, we would have a conflict
+  // when FLAG_vector_ics is true.
   StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
                                           &miss,  // When not a string.
                                           &miss,  // When not a number.
@@ -3016,6 +3027,10 @@ void CallICStub::Generate(MacroAssembler* masm) {
 
   // x1 - function
   // x3 - slot id (Smi)
+  const int with_types_offset =
+      FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
+  const int generic_offset =
+      FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
   Label extra_checks_or_miss, slow_start;
   Label slow, non_function, wrap, cont;
   Label have_js_function;
@@ -3064,35 +3079,72 @@ void CallICStub::Generate(MacroAssembler* masm) {
   }
 
   __ bind(&extra_checks_or_miss);
-  Label miss;
+  Label uninitialized, miss;
 
   __ JumpIfRoot(x4, Heap::kmegamorphic_symbolRootIndex, &slow_start);
-  __ JumpIfRoot(x4, Heap::kuninitialized_symbolRootIndex, &miss);
 
-  if (!FLAG_trace_ic) {
-    // We are going megamorphic. If the feedback is a JSFunction, it is fine
-    // to handle it here. More complex cases are dealt with in the runtime.
-    __ AssertNotSmi(x4);
-    __ JumpIfNotObjectType(x4, x5, x5, JS_FUNCTION_TYPE, &miss);
-    __ Add(x4, feedback_vector,
-           Operand::UntagSmiAndScale(index, kPointerSizeLog2));
-    __ LoadRoot(x5, Heap::kmegamorphic_symbolRootIndex);
-    __ Str(x5, FieldMemOperand(x4, FixedArray::kHeaderSize));
-    // We have to update statistics for runtime profiling.
-    const int with_types_offset =
-        FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
-    __ Ldr(x4, FieldMemOperand(feedback_vector, with_types_offset));
-    __ Subs(x4, x4, Operand(Smi::FromInt(1)));
-    __ Str(x4, FieldMemOperand(feedback_vector, with_types_offset));
-    const int generic_offset =
-        FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
-    __ Ldr(x4, FieldMemOperand(feedback_vector, generic_offset));
-    __ Adds(x4, x4, Operand(Smi::FromInt(1)));
-    __ Str(x4, FieldMemOperand(feedback_vector, generic_offset));
-    __ B(&slow_start);
+  // The following cases attempt to handle MISS cases without going to the
+  // runtime.
+  if (FLAG_trace_ic) {
+    __ jmp(&miss);
   }
 
-  // We are here because tracing is on or we are going monomorphic.
+  __ JumpIfRoot(x4, Heap::kuninitialized_symbolRootIndex, &miss);
+
+  // We are going megamorphic. If the feedback is a JSFunction, it is fine
+  // to handle it here. More complex cases are dealt with in the runtime.
+  __ AssertNotSmi(x4);
+  __ JumpIfNotObjectType(x4, x5, x5, JS_FUNCTION_TYPE, &miss);
+  __ Add(x4, feedback_vector,
+         Operand::UntagSmiAndScale(index, kPointerSizeLog2));
+  __ LoadRoot(x5, Heap::kmegamorphic_symbolRootIndex);
+  __ Str(x5, FieldMemOperand(x4, FixedArray::kHeaderSize));
+  // We have to update statistics for runtime profiling.
+  __ Ldr(x4, FieldMemOperand(feedback_vector, with_types_offset));
+  __ Subs(x4, x4, Operand(Smi::FromInt(1)));
+  __ Str(x4, FieldMemOperand(feedback_vector, with_types_offset));
+  __ Ldr(x4, FieldMemOperand(feedback_vector, generic_offset));
+  __ Adds(x4, x4, Operand(Smi::FromInt(1)));
+  __ Str(x4, FieldMemOperand(feedback_vector, generic_offset));
+  __ B(&slow_start);
+
+  __ bind(&uninitialized);
+
+  // We are going monomorphic, provided we actually have a JSFunction.
+  __ JumpIfSmi(function, &miss);
+
+  // Goto miss case if we do not have a function.
+  __ JumpIfNotObjectType(function, x5, x5, JS_FUNCTION_TYPE, &miss);
+
+  // Make sure the function is not the Array() function, which requires special
+  // behavior on MISS.
+  __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, x5);
+  __ Cmp(function, x5);
+  __ B(eq, &miss);
+
+  // Update stats.
+  __ Ldr(x4, FieldMemOperand(feedback_vector, with_types_offset));
+  __ Adds(x4, x4, Operand(Smi::FromInt(1)));
+  __ Str(x4, FieldMemOperand(feedback_vector, with_types_offset));
+
+  // Store the function.
+  __ Add(x4, feedback_vector,
+         Operand::UntagSmiAndScale(index, kPointerSizeLog2));
+  __ Str(function, FieldMemOperand(x4, FixedArray::kHeaderSize));
+
+  __ Add(x4, feedback_vector,
+         Operand::UntagSmiAndScale(index, kPointerSizeLog2));
+  __ Add(x4, x4, FixedArray::kHeaderSize - kHeapObjectTag);
+  __ Str(function, MemOperand(x4, 0));
+
+  // Update the write barrier.
+  __ Mov(x5, function);
+  __ RecordWrite(feedback_vector, x4, x5, kLRHasNotBeenSaved, kDontSaveFPRegs,
+                 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+  __ B(&have_js_function);
+
+  // We are here because tracing is on or we encountered a MISS case we can't
+  // handle here.
   __ bind(&miss);
   GenerateMiss(masm);
 
@@ -3835,16 +3887,43 @@ void SubStringStub::Generate(MacroAssembler* masm) {
 
 void ToNumberStub::Generate(MacroAssembler* masm) {
   // The ToNumber stub takes one argument in x0.
-  Label check_heap_number, call_builtin;
-  __ JumpIfNotSmi(x0, &check_heap_number);
+  Label not_smi;
+  __ JumpIfNotSmi(x0, &not_smi);
   __ Ret();
-
-  __ bind(&check_heap_number);
-  __ JumpIfNotHeapNumber(x0, &call_builtin);
+  __ Bind(&not_smi);
+
+  Label not_heap_number;
+  __ Ldr(x1, FieldMemOperand(x0, HeapObject::kMapOffset));
+  __ Ldrb(x1, FieldMemOperand(x1, Map::kInstanceTypeOffset));
+  // x0: object
+  // x1: instance type
+  __ Cmp(x1, HEAP_NUMBER_TYPE);
+  __ B(ne, &not_heap_number);
+  __ Ret();
+  __ Bind(&not_heap_number);
+
+  Label not_string, slow_string;
+  __ Cmp(x1, FIRST_NONSTRING_TYPE);
+  __ B(hs, &not_string);
+  // Check if string has a cached array index.
+  __ Ldr(x2, FieldMemOperand(x0, String::kHashFieldOffset));
+  __ Tst(x2, Operand(String::kContainsCachedArrayIndexMask));
+  __ B(ne, &slow_string);
+  __ IndexFromHash(x2, x0);
+  __ Ret();
+  __ Bind(&slow_string);
+  __ Push(x0);  // Push argument.
+  __ TailCallRuntime(Runtime::kStringToNumber, 1, 1);
+  __ Bind(&not_string);
+
+  Label not_oddball;
+  __ Cmp(x1, ODDBALL_TYPE);
+  __ B(ne, &not_oddball);
+  __ Ldr(x0, FieldMemOperand(x0, Oddball::kToNumberOffset));
   __ Ret();
+  __ Bind(&not_oddball);
 
-  __ bind(&call_builtin);
-  __ push(x0);
+  __ Push(x0);  // Push argument.
   __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
 }
 
@@ -4293,18 +4372,10 @@ void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
 }
 
 
-static unsigned int GetProfileEntryHookCallSize(MacroAssembler* masm) {
-  // The entry hook is a "BumpSystemStackPointer" instruction (sub),
-  // followed by a "Push lr" instruction, followed by a call.
-  unsigned int size =
-      Assembler::kCallSizeWithRelocation + (2 * kInstructionSize);
-  if (CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) {
-    // If ALWAYS_ALIGN_CSP then there will be an extra bic instruction in
-    // "BumpSystemStackPointer".
-    size += kInstructionSize;
-  }
-  return size;
-}
+// The entry hook is a "BumpSystemStackPointer" instruction (sub), followed by
+// a "Push lr" instruction, followed by a call.
+static const unsigned int kProfileEntryHookCallSize =
+    Assembler::kCallSizeWithRelocation + (2 * kInstructionSize);
 
 
 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
@@ -4317,7 +4388,7 @@ void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
     __ Push(lr);
     __ CallStub(&stub);
     DCHECK(masm->SizeOfCodeGeneratedSince(&entry_hook_call_start) ==
-           GetProfileEntryHookCallSize(masm));
+           kProfileEntryHookCallSize);
 
     __ Pop(lr);
   }
@@ -4335,7 +4406,7 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
   const int kNumSavedRegs = kCallerSaved.Count();
 
   // Compute the function's address as the first argument.
-  __ Sub(x0, lr, GetProfileEntryHookCallSize(masm));
+  __ Sub(x0, lr, kProfileEntryHookCallSize);
 
 #if V8_HOST_ARCH_ARM64
   uintptr_t entry_hook =
index 03dab5b..c9ee2c9 100644 (file)
@@ -97,7 +97,7 @@ class RecordWriteStub: public PlatformCodeStub {
     INCREMENTAL_COMPACTION
   };
 
-  virtual bool SometimesSetsUpAFrame() { return false; }
+  bool SometimesSetsUpAFrame() OVERRIDE { return false; }
 
   static Mode GetMode(Code* stub) {
     // Find the mode depending on the first two instructions.
@@ -275,9 +275,9 @@ class RecordWriteStub: public PlatformCodeStub {
     kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
   };
 
-  virtual inline Major MajorKey() const FINAL OVERRIDE { return RecordWrite; }
+  inline Major MajorKey() const FINAL { return RecordWrite; }
 
-  virtual void Generate(MacroAssembler* masm) OVERRIDE;
+  void Generate(MacroAssembler* masm) OVERRIDE;
   void GenerateIncremental(MacroAssembler* masm, Mode mode);
   void CheckNeedsToInformIncrementalMarker(
       MacroAssembler* masm,
@@ -285,7 +285,7 @@ class RecordWriteStub: public PlatformCodeStub {
       Mode mode);
   void InformIncrementalMarker(MacroAssembler* masm);
 
-  void Activate(Code* code) {
+  void Activate(Code* code) OVERRIDE {
     code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
   }
 
@@ -328,7 +328,7 @@ class DirectCEntryStub: public PlatformCodeStub {
   void GenerateCall(MacroAssembler* masm, Register target);
 
  private:
-  bool NeedsImmovableCode() { return true; }
+  bool NeedsImmovableCode() OVERRIDE { return true; }
 
   DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
   DEFINE_PLATFORM_CODE_STUB(DirectCEntry, PlatformCodeStub);
@@ -360,7 +360,7 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
                                      Register scratch1,
                                      Register scratch2);
 
-  virtual bool SometimesSetsUpAFrame() { return false; }
+  bool SometimesSetsUpAFrame() OVERRIDE { return false; }
 
  private:
   static const int kInlinedProbes = 4;
index 39beb6d..11ba7c9 100644 (file)
@@ -43,6 +43,8 @@ class CacheLineSizes {
 void CpuFeatures::FlushICache(void* address, size_t length) {
   if (length == 0) return;
 
+  if (CpuFeatures::IsSupported(COHERENT_CACHE)) return;
+
 #ifdef USE_SIMULATOR
   // TODO(all): consider doing some cache simulation to ensure every address
   // run has been synced.
index d67dc8f..b83bbbe 100644 (file)
@@ -21,6 +21,11 @@ int Deoptimizer::patch_size() {
 }
 
 
+void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
+  // Empty because there is no need for relocation information for the code
+  // patching in Deoptimizer::PatchCodeForDeoptimization below.
+}
+
 
 void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
   // Invalidate the relocation information, as it will become invalid by the
index 4262875..0d3d34b 100644 (file)
@@ -196,10 +196,10 @@ void FullCodeGenerator::Generate() {
     // Argument to NewContext is the function, which is still in x1.
     Comment cmnt(masm_, "[ Allocate context");
     bool need_write_barrier = true;
-    if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
+    if (FLAG_harmony_scoping && info->scope()->is_script_scope()) {
       __ Mov(x10, Operand(info->scope()->GetScopeInfo()));
       __ Push(x1, x10);
-      __ CallRuntime(Runtime::kNewGlobalContext, 2);
+      __ CallRuntime(Runtime::kNewScriptContext, 2);
     } else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
       FastNewContextStub stub(isolate(), heap_slots);
       __ CallStub(&stub);
@@ -934,7 +934,7 @@ void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
   EmitDebugCheckDeclarationContext(variable);
 
   // Load instance object.
-  __ LoadContext(x1, scope_->ContextChainLength(scope_->GlobalScope()));
+  __ LoadContext(x1, scope_->ContextChainLength(scope_->ScriptScope()));
   __ Ldr(x1, ContextMemOperand(x1, variable->interface()->Index()));
   __ Ldr(x1, ContextMemOperand(x1, Context::EXTENSION_INDEX));
 
@@ -1109,6 +1109,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
 
   // Get the object to enumerate over. If the object is null or undefined, skip
   // over the loop.  See ECMA-262 version 5, section 12.6.4.
+  SetExpressionPosition(stmt->enumerable());
   VisitForAccumulatorValue(stmt->enumerable());
   __ JumpIfRoot(x0, Heap::kUndefinedValueRootIndex, &exit);
   Register null_value = x15;
@@ -1202,6 +1203,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
   // Generate code for doing the condition check.
   PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
   __ Bind(&loop);
+  SetExpressionPosition(stmt->each());
+
   // Load the current count to x0, load the length to x1.
   __ PeekPair(x0, x1, 0);
   __ Cmp(x0, x1);  // Compare to the array length.
@@ -1271,48 +1274,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
 }
 
 
-void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
-  Comment cmnt(masm_, "[ ForOfStatement");
-  SetStatementPosition(stmt);
-
-  Iteration loop_statement(this, stmt);
-  increment_loop_depth();
-
-  // var iterator = iterable[Symbol.iterator]();
-  VisitForEffect(stmt->assign_iterator());
-
-  // Loop entry.
-  __ Bind(loop_statement.continue_label());
-
-  // result = iterator.next()
-  VisitForEffect(stmt->next_result());
-
-  // if (result.done) break;
-  Label result_not_done;
-  VisitForControl(stmt->result_done(),
-                  loop_statement.break_label(),
-                  &result_not_done,
-                  &result_not_done);
-  __ Bind(&result_not_done);
-
-  // each = result.value
-  VisitForEffect(stmt->assign_each());
-
-  // Generate code for the body of the loop.
-  Visit(stmt->body());
-
-  // Check stack before looping.
-  PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS);
-  EmitBackEdgeBookkeeping(stmt, loop_statement.continue_label());
-  __ B(loop_statement.continue_label());
-
-  // Exit and decrement the loop depth.
-  PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
-  __ Bind(loop_statement.break_label());
-  decrement_loop_depth();
-}
-
-
 void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
                                        bool pretenure) {
   // Use the fast case closure allocation code that allocates in new space for
@@ -1372,6 +1333,18 @@ void FullCodeGenerator::EmitLoadHomeObject(SuperReference* expr) {
 }
 
 
+void FullCodeGenerator::EmitSetHomeObjectIfNeeded(Expression* initializer,
+                                                  int offset) {
+  if (NeedsHomeObject(initializer)) {
+    __ Peek(StoreDescriptor::ReceiverRegister(), 0);
+    __ Mov(StoreDescriptor::NameRegister(),
+           Operand(isolate()->factory()->home_object_symbol()));
+    __ Peek(StoreDescriptor::ValueRegister(), offset * kPointerSize);
+    CallStoreIC();
+  }
+}
+
+
 void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
                                                       TypeofState typeof_state,
                                                       Label* slow) {
@@ -1721,6 +1694,14 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
             __ Peek(StoreDescriptor::ReceiverRegister(), 0);
             CallStoreIC(key->LiteralFeedbackId());
             PrepareForBailoutForId(key->id(), NO_REGISTERS);
+
+            if (NeedsHomeObject(value)) {
+              __ Mov(StoreDescriptor::ReceiverRegister(), x0);
+              __ Mov(StoreDescriptor::NameRegister(),
+                     Operand(isolate()->factory()->home_object_symbol()));
+              __ Peek(StoreDescriptor::ValueRegister(), 0);
+              CallStoreIC();
+            }
           } else {
             VisitForEffect(value);
           }
@@ -1732,6 +1713,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
           __ Push(x0);
           VisitForStackValue(key);
           VisitForStackValue(value);
+          EmitSetHomeObjectIfNeeded(value, 2);
           __ Mov(x0, Smi::FromInt(SLOPPY));  // Strict mode
           __ Push(x0);
           __ CallRuntime(Runtime::kSetProperty, 4);
@@ -1769,7 +1751,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
       __ Push(x10);
       VisitForStackValue(it->first);
       EmitAccessor(it->second->getter);
+      EmitSetHomeObjectIfNeeded(it->second->getter, 2);
       EmitAccessor(it->second->setter);
+      EmitSetHomeObjectIfNeeded(it->second->setter, 3);
       __ Mov(x10, Smi::FromInt(NONE));
       __ Push(x10);
       __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
@@ -2203,6 +2187,7 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
     __ Push(scratch);
     VisitForStackValue(key);
     VisitForStackValue(value);
+    EmitSetHomeObjectIfNeeded(value, 2);
 
     switch (property->kind()) {
       case ObjectLiteral::Property::CONSTANT:
@@ -2388,8 +2373,9 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
       }
       EmitStoreToStackLocalOrContextSlot(var, location);
     }
+  } else if (IsSignallingAssignmentToConst(var, op, strict_mode())) {
+    __ CallRuntime(Runtime::kThrowConstAssignError, 0);
   }
-  // Non-initializing assignments to consts are ignored.
 }
 
 
@@ -4909,7 +4895,6 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
     Expression *value,
     JSGeneratorObject::ResumeMode resume_mode) {
   ASM_LOCATION("FullCodeGenerator::EmitGeneratorResume");
-  Register value_reg = x0;
   Register generator_object = x1;
   Register the_hole = x2;
   Register operand_stack_size = w3;
@@ -4923,15 +4908,6 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
   VisitForAccumulatorValue(value);
   __ Pop(generator_object);
 
-  // Check generator state.
-  Label wrong_state, closed_state, done;
-  __ Ldr(x10, FieldMemOperand(generator_object,
-                              JSGeneratorObject::kContinuationOffset));
-  STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting < 0);
-  STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed == 0);
-  __ CompareAndBranch(x10, Smi::FromInt(0), eq, &closed_state);
-  __ CompareAndBranch(x10, Smi::FromInt(0), lt, &wrong_state);
-
   // Load suspended function and context.
   __ Ldr(cp, FieldMemOperand(generator_object,
                              JSGeneratorObject::kContextOffset));
@@ -4957,7 +4933,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
 
   // Enter a new JavaScript frame, and initialize its slots as they were when
   // the generator was suspended.
-  Label resume_frame;
+  Label resume_frame, done;
   __ Bl(&resume_frame);
   __ B(&done);
 
@@ -5002,26 +4978,6 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
   // Not reached: the runtime call returns elsewhere.
   __ Unreachable();
 
-  // Reach here when generator is closed.
-  __ Bind(&closed_state);
-  if (resume_mode == JSGeneratorObject::NEXT) {
-    // Return completed iterator result when generator is closed.
-    __ LoadRoot(x10, Heap::kUndefinedValueRootIndex);
-    __ Push(x10);
-    // Pop value from top-of-stack slot; box result into result register.
-    EmitCreateIteratorResult(true);
-  } else {
-    // Throw the provided value.
-    __ Push(value_reg);
-    __ CallRuntime(Runtime::kThrow, 1);
-  }
-  __ B(&done);
-
-  // Throw error if we attempt to operate on a running generator.
-  __ Bind(&wrong_state);
-  __ Push(generator_object);
-  __ CallRuntime(Runtime::kThrowGeneratorStateError, 1);
-
   __ Bind(&done);
   context()->Plug(result_register());
 }
@@ -5111,7 +5067,7 @@ void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
 
 void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
   Scope* declaration_scope = scope()->DeclarationScope();
-  if (declaration_scope->is_global_scope() ||
+  if (declaration_scope->is_script_scope() ||
       declaration_scope->is_module_scope()) {
     // Contexts nested in the native context have a canonical empty function
     // as their closure, not the anonymous closure containing the global
index 241bc4b..2d5f7f2 100644 (file)
@@ -1564,9 +1564,17 @@ LInstruction* LChunkBuilder::DoTailCallThroughMegamorphicCache(
       UseFixed(instr->receiver(), LoadDescriptor::ReceiverRegister());
   LOperand* name_register =
       UseFixed(instr->name(), LoadDescriptor::NameRegister());
+  LOperand* slot = NULL;
+  LOperand* vector = NULL;
+  if (FLAG_vector_ics) {
+    slot = UseFixed(instr->slot(), VectorLoadICDescriptor::SlotRegister());
+    vector =
+        UseFixed(instr->vector(), VectorLoadICDescriptor::VectorRegister());
+  }
+
   // Not marked as call. It can't deoptimize, and it never returns.
   return new (zone()) LTailCallThroughMegamorphicCache(
-      context, receiver_register, name_register);
+      context, receiver_register, name_register, slot, vector);
 }
 
 
@@ -1675,7 +1683,7 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
   LOperand* global_object =
       UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
   LOperand* vector = NULL;
-  if (FLAG_vector_ics) {
+  if (instr->HasVectorAndSlot()) {
     vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
   }
 
@@ -1738,7 +1746,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
       UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
   LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
   LOperand* vector = NULL;
-  if (FLAG_vector_ics) {
+  if (instr->HasVectorAndSlot()) {
     vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
   }
 
@@ -1760,7 +1768,7 @@ LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
   LOperand* object =
       UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
   LOperand* vector = NULL;
-  if (FLAG_vector_ics) {
+  if (instr->HasVectorAndSlot()) {
     vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
   }
 
@@ -2400,7 +2408,7 @@ LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
   LOperand* temp1 = NULL;
 
   if (instr->access().IsExternalMemory() ||
-      instr->field_representation().IsDouble()) {
+      (!FLAG_unbox_double_fields && instr->field_representation().IsDouble())) {
     value = UseRegister(instr->value());
   } else if (instr->NeedsWriteBarrier()) {
     value = UseRegisterAndClobber(instr->value());
index 6ead3fe..424ecba 100644 (file)
@@ -178,17 +178,13 @@ class LCodeGen;
   V(WrapReceiver)
 
 
-#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)                        \
-  virtual Opcode opcode() const FINAL OVERRIDE {                      \
-    return LInstruction::k##type;                                           \
-  }                                                                         \
-  virtual void CompileToNative(LCodeGen* generator) FINAL OVERRIDE;   \
-  virtual const char* Mnemonic() const FINAL OVERRIDE {               \
-    return mnemonic;                                                        \
-  }                                                                         \
-  static L##type* cast(LInstruction* instr) {                               \
-    DCHECK(instr->Is##type());                                              \
-    return reinterpret_cast<L##type*>(instr);                               \
+#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)            \
+  Opcode opcode() const FINAL { return LInstruction::k##type; } \
+  void CompileToNative(LCodeGen* generator) FINAL;              \
+  const char* Mnemonic() const FINAL { return mnemonic; }       \
+  static L##type* cast(LInstruction* instr) {                   \
+    DCHECK(instr->Is##type());                                  \
+    return reinterpret_cast<L##type*>(instr);                   \
   }
 
 
@@ -295,11 +291,9 @@ class LTemplateResultInstruction : public LInstruction {
  public:
   // Allow 0 or 1 output operands.
   STATIC_ASSERT(R == 0 || R == 1);
-  virtual bool HasResult() const FINAL OVERRIDE {
-    return (R != 0) && (result() != NULL);
-  }
+  bool HasResult() const FINAL { return (R != 0) && (result() != NULL); }
   void set_result(LOperand* operand) { results_[0] = operand; }
-  LOperand* result() const { return results_[0]; }
+  LOperand* result() const OVERRIDE { return results_[0]; }
 
  protected:
   EmbeddedContainer<LOperand*, R> results_;
@@ -317,28 +311,32 @@ class LTemplateInstruction : public LTemplateResultInstruction<R> {
 
  private:
   // Iterator support.
-  virtual int InputCount() FINAL OVERRIDE { return I; }
-  virtual LOperand* InputAt(int i) FINAL OVERRIDE { return inputs_[i]; }
+  int InputCount() FINAL { return I; }
+  LOperand* InputAt(int i) FINAL { return inputs_[i]; }
 
-  virtual int TempCount() FINAL OVERRIDE { return T; }
-  virtual LOperand* TempAt(int i) FINAL OVERRIDE { return temps_[i]; }
+  int TempCount() FINAL { return T; }
+  LOperand* TempAt(int i) FINAL { return temps_[i]; }
 };
 
 
 class LTailCallThroughMegamorphicCache FINAL
-    : public LTemplateInstruction<0, 3, 0> {
+    : public LTemplateInstruction<0, 5, 0> {
  public:
-  explicit LTailCallThroughMegamorphicCache(LOperand* context,
-                                            LOperand* receiver,
-                                            LOperand* name) {
+  LTailCallThroughMegamorphicCache(LOperand* context, LOperand* receiver,
+                                   LOperand* name, LOperand* slot,
+                                   LOperand* vector) {
     inputs_[0] = context;
     inputs_[1] = receiver;
     inputs_[2] = name;
+    inputs_[3] = slot;
+    inputs_[4] = vector;
   }
 
   LOperand* context() { return inputs_[0]; }
   LOperand* receiver() { return inputs_[1]; }
   LOperand* name() { return inputs_[2]; }
+  LOperand* slot() { return inputs_[3]; }
+  LOperand* vector() { return inputs_[4]; }
 
   DECLARE_CONCRETE_INSTRUCTION(TailCallThroughMegamorphicCache,
                                "tail-call-through-megamorphic-cache")
@@ -348,9 +346,7 @@ class LTailCallThroughMegamorphicCache FINAL
 
 class LUnknownOSRValue FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
-  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
-    return false;
-  }
+  bool HasInterestingComment(LCodeGen* gen) const OVERRIDE { return false; }
   DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
 };
 
@@ -360,7 +356,7 @@ class LControlInstruction : public LTemplateInstruction<0, I, T> {
  public:
   LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
 
-  virtual bool IsControl() const FINAL OVERRIDE { return true; }
+  bool IsControl() const FINAL { return true; }
 
   int SuccessorCount() { return hydrogen()->SuccessorCount(); }
   HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
@@ -410,8 +406,8 @@ class LGap : public LTemplateInstruction<0, 0, 0> {
   }
 
   // Can't use the DECLARE-macro here because of sub-classes.
-  virtual bool IsGap() const OVERRIDE { return true; }
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  bool IsGap() const OVERRIDE { return true; }
+  void PrintDataTo(StringStream* stream) OVERRIDE;
   static LGap* cast(LInstruction* instr) {
     DCHECK(instr->IsGap());
     return reinterpret_cast<LGap*>(instr);
@@ -451,7 +447,7 @@ class LInstructionGap FINAL : public LGap {
  public:
   explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
 
-  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
+  bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
     return !IsRedundant();
   }
 
@@ -492,10 +488,10 @@ class LGoto FINAL : public LTemplateInstruction<0, 0, 0> {
  public:
   explicit LGoto(HBasicBlock* block) : block_(block) { }
 
-  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE;
+  bool HasInterestingComment(LCodeGen* gen) const OVERRIDE;
   DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
-  virtual bool IsControl() const OVERRIDE { return true; }
+  void PrintDataTo(StringStream* stream) OVERRIDE;
+  bool IsControl() const OVERRIDE { return true; }
 
   int block_id() const { return block_->block_id(); }
 
@@ -525,12 +521,10 @@ class LLabel FINAL : public LGap {
   explicit LLabel(HBasicBlock* block)
       : LGap(block), replacement_(NULL) { }
 
-  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
-    return false;
-  }
+  bool HasInterestingComment(LCodeGen* gen) const OVERRIDE { return false; }
   DECLARE_CONCRETE_INSTRUCTION(Label, "label")
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int block_id() const { return block()->block_id(); }
   bool is_loop_header() const { return block()->IsLoopHeader(); }
@@ -550,9 +544,7 @@ class LOsrEntry FINAL : public LTemplateInstruction<0, 0, 0> {
  public:
   LOsrEntry() {}
 
-  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
-    return false;
-  }
+  bool HasInterestingComment(LCodeGen* gen) const OVERRIDE { return false; }
   DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
 };
 
@@ -573,7 +565,7 @@ class LAccessArgumentsAt FINAL : public LTemplateInstruction<1, 3, 0> {
   LOperand* length() { return inputs_[1]; }
   LOperand* index() { return inputs_[2]; }
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -721,11 +713,9 @@ class LArithmeticD FINAL : public LTemplateInstruction<1, 2, 0> {
   LOperand* left() { return inputs_[0]; }
   LOperand* right() { return inputs_[1]; }
 
-  virtual Opcode opcode() const OVERRIDE {
-    return LInstruction::kArithmeticD;
-  }
-  virtual void CompileToNative(LCodeGen* generator) OVERRIDE;
-  virtual const char* Mnemonic() const OVERRIDE;
+  Opcode opcode() const OVERRIDE { return LInstruction::kArithmeticD; }
+  void CompileToNative(LCodeGen* generator) OVERRIDE;
+  const char* Mnemonic() const OVERRIDE;
 
  private:
   Token::Value op_;
@@ -749,11 +739,9 @@ class LArithmeticT FINAL : public LTemplateInstruction<1, 3, 0> {
   LOperand* right() { return inputs_[2]; }
   Token::Value op() const { return op_; }
 
-  virtual Opcode opcode() const OVERRIDE {
-    return LInstruction::kArithmeticT;
-  }
-  virtual void CompileToNative(LCodeGen* generator) OVERRIDE;
-  virtual const char* Mnemonic() const OVERRIDE;
+  Opcode opcode() const OVERRIDE { return LInstruction::kArithmeticT; }
+  void CompileToNative(LCodeGen* generator) OVERRIDE;
+  const char* Mnemonic() const OVERRIDE;
 
  private:
   Token::Value op_;
@@ -838,7 +826,7 @@ class LBranch FINAL : public LControlInstruction<1, 2> {
   DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
   DECLARE_HYDROGEN_ACCESSOR(Branch)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -853,7 +841,7 @@ class LCallJSFunction FINAL : public LTemplateInstruction<1, 1, 0> {
   DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
   DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int arity() const { return hydrogen()->argument_count() - 1; }
 };
@@ -889,7 +877,7 @@ class LCallNew FINAL : public LTemplateInstruction<1, 2, 0> {
   DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
   DECLARE_HYDROGEN_ACCESSOR(CallNew)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int arity() const { return hydrogen()->argument_count() - 1; }
 };
@@ -908,7 +896,7 @@ class LCallNewArray FINAL : public LTemplateInstruction<1, 2, 0> {
   DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
   DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int arity() const { return hydrogen()->argument_count() - 1; }
 };
@@ -925,7 +913,7 @@ class LCallRuntime FINAL : public LTemplateInstruction<1, 1, 0> {
   DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
   DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
 
-  virtual bool ClobbersDoubleRegisters(Isolate* isolate) const OVERRIDE {
+  bool ClobbersDoubleRegisters(Isolate* isolate) const OVERRIDE {
     return save_doubles() == kDontSaveFPRegs;
   }
 
@@ -1097,7 +1085,7 @@ class LClassOfTestAndBranch FINAL : public LControlInstruction<1, 2> {
                                "class-of-test-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1215,7 +1203,7 @@ class LCompareNumericAndBranch FINAL : public LControlInstruction<2, 0> {
     return hydrogen()->representation().IsDouble();
   }
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1313,7 +1301,7 @@ class LDeclareGlobals FINAL : public LTemplateInstruction<0, 1, 0> {
 
 class LDeoptimize FINAL : public LTemplateInstruction<0, 0, 0> {
  public:
-  virtual bool IsControl() const OVERRIDE { return true; }
+  bool IsControl() const OVERRIDE { return true; }
   DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
   DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
 };
@@ -1447,7 +1435,7 @@ class LHasCachedArrayIndexAndBranch FINAL
                                "has-cached-array-index-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1465,7 +1453,7 @@ class LHasInstanceTypeAndBranch FINAL : public LControlInstruction<1, 1> {
                                "has-instance-type-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1479,7 +1467,7 @@ class LInnerAllocatedObject FINAL : public LTemplateInstruction<1, 2, 0> {
   LOperand* base_object() const { return inputs_[0]; }
   LOperand* offset() const { return inputs_[1]; }
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object")
 };
@@ -1555,11 +1543,12 @@ class LCallWithDescriptor FINAL : public LTemplateResultInstruction<1> {
 
   CallInterfaceDescriptor descriptor() { return descriptor_; }
 
+  DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
+
  private:
   DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
-  DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int arity() const { return hydrogen()->argument_count() - 1; }
 
@@ -1567,11 +1556,11 @@ class LCallWithDescriptor FINAL : public LTemplateResultInstruction<1> {
   ZoneList<LOperand*> inputs_;
 
   // Iterator support.
-  virtual int InputCount() FINAL OVERRIDE { return inputs_.length(); }
-  virtual LOperand* InputAt(int i) FINAL OVERRIDE { return inputs_[i]; }
+  int InputCount() FINAL { return inputs_.length(); }
+  LOperand* InputAt(int i) FINAL { return inputs_[i]; }
 
-  virtual int TempCount() FINAL OVERRIDE { return 0; }
-  virtual LOperand* TempAt(int i) FINAL OVERRIDE { return NULL; }
+  int TempCount() FINAL { return 0; }
+  LOperand* TempAt(int i) FINAL { return NULL; }
 };
 
 
@@ -1588,7 +1577,7 @@ class LInvokeFunction FINAL : public LTemplateInstruction<1, 2, 0> {
   DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
   DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int arity() const { return hydrogen()->argument_count() - 1; }
 };
@@ -1624,7 +1613,7 @@ class LIsObjectAndBranch FINAL : public LControlInstruction<1, 2> {
   DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1641,7 +1630,7 @@ class LIsStringAndBranch FINAL : public LControlInstruction<1, 1> {
   DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1656,7 +1645,7 @@ class LIsSmiAndBranch FINAL : public LControlInstruction<1, 0> {
   DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1674,7 +1663,7 @@ class LIsUndetectableAndBranch FINAL : public LControlInstruction<1, 1> {
                                "is-undetectable-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1691,7 +1680,7 @@ class LLoadContextSlot FINAL : public LTemplateInstruction<1, 1, 0> {
 
   int slot_index() const { return hydrogen()->slot_index(); }
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -2272,7 +2261,7 @@ class LNumberUntagD FINAL : public LTemplateInstruction<1, 1, 1> {
 
 class LParameter FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
-  virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
+  bool HasInterestingComment(LCodeGen* gen) const OVERRIDE { return false; }
   DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
 };
 
@@ -2331,11 +2320,11 @@ class LPushArguments FINAL : public LTemplateResultInstruction<0> {
 
  private:
   // Iterator support.
-  virtual int InputCount() FINAL OVERRIDE { return inputs_.length(); }
-  virtual LOperand* InputAt(int i) FINAL OVERRIDE { return inputs_[i]; }
+  int InputCount() FINAL { return inputs_.length(); }
+  LOperand* InputAt(int i) FINAL { return inputs_[i]; }
 
-  virtual int TempCount() FINAL OVERRIDE { return 0; }
-  virtual LOperand* TempAt(int i) FINAL OVERRIDE { return NULL; }
+  int TempCount() FINAL { return 0; }
+  LOperand* TempAt(int i) FINAL { return NULL; }
 };
 
 
@@ -2585,7 +2574,7 @@ class LStoreKeyedGeneric FINAL : public LTemplateInstruction<0, 4, 0> {
   DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
   DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   StrictMode strict_mode() { return hydrogen()->strict_mode(); }
 };
@@ -2609,7 +2598,7 @@ class LStoreNamedField FINAL : public LTemplateInstruction<0, 2, 2> {
   DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
   DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   Representation representation() const {
     return hydrogen()->field_representation();
@@ -2632,7 +2621,7 @@ class LStoreNamedGeneric FINAL: public LTemplateInstruction<0, 3, 0> {
   DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
   DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   Handle<Object> name() const { return hydrogen()->name(); }
   StrictMode strict_mode() { return hydrogen()->strict_mode(); }
@@ -2707,7 +2696,7 @@ class LStringCompareAndBranch FINAL : public LControlInstruction<3, 0> {
 
   Token::Value op() const { return hydrogen()->token(); }
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -2786,7 +2775,7 @@ class LStoreCodeEntry FINAL: public LTemplateInstruction<0, 2, 1> {
   LOperand* code_object() { return inputs_[1]; }
   LOperand* temp() { return temps_[0]; }
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry")
   DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry)
@@ -2810,7 +2799,7 @@ class LStoreContextSlot FINAL : public LTemplateInstruction<0, 2, 1> {
 
   int slot_index() { return hydrogen()->slot_index(); }
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -2916,7 +2905,7 @@ class LTransitionElementsKind FINAL : public LTemplateInstruction<0, 2, 2> {
                                "transition-elements-kind")
   DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
   Handle<Map> transitioned_map() {
@@ -2991,7 +2980,7 @@ class LTypeofIsAndBranch FINAL : public LControlInstruction<1, 2> {
 
   Handle<String> type_literal() const { return hydrogen()->type_literal(); }
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
index a285e7b..df9e7b5 100644 (file)
@@ -557,11 +557,6 @@ void LCodeGen::RecordSafepoint(LPointerMap* pointers,
       safepoint.DefinePointerRegister(ToRegister(pointer), zone());
     }
   }
-
-  if (kind & Safepoint::kWithRegisters) {
-    // Register cp always contains a pointer to the context.
-    safepoint.DefinePointerRegister(cp, zone());
-  }
 }
 
 void LCodeGen::RecordSafepoint(LPointerMap* pointers,
@@ -2047,23 +2042,33 @@ void LCodeGen::DoTailCallThroughMegamorphicCache(
   DCHECK(name.is(LoadDescriptor::NameRegister()));
   DCHECK(receiver.is(x1));
   DCHECK(name.is(x2));
-
-  Register scratch = x3;
-  Register extra = x4;
-  Register extra2 = x5;
-  Register extra3 = x6;
+  Register scratch = x4;
+  Register extra = x5;
+  Register extra2 = x6;
+  Register extra3 = x7;
+  DCHECK(!FLAG_vector_ics ||
+         !AreAliased(ToRegister(instr->slot()), ToRegister(instr->vector()),
+                     scratch, extra, extra2, extra3));
 
   // Important for the tail-call.
   bool must_teardown_frame = NeedsEagerFrame();
 
-  // The probe will tail call to a handler if found.
-  isolate()->stub_cache()->GenerateProbe(masm(), instr->hydrogen()->flags(),
-                                         must_teardown_frame, receiver, name,
-                                         scratch, extra, extra2, extra3);
+  if (!instr->hydrogen()->is_just_miss()) {
+    DCHECK(!instr->hydrogen()->is_keyed_load());
+
+    // The probe will tail call to a handler if found.
+    isolate()->stub_cache()->GenerateProbe(
+        masm(), Code::LOAD_IC, instr->hydrogen()->flags(), must_teardown_frame,
+        receiver, name, scratch, extra, extra2, extra3);
+  }
 
   // Tail call to miss if we ended up here.
   if (must_teardown_frame) __ LeaveFrame(StackFrame::INTERNAL);
-  LoadIC::GenerateMiss(masm());
+  if (instr->hydrogen()->is_keyed_load()) {
+    KeyedLoadIC::GenerateMiss(masm());
+  } else {
+    LoadIC::GenerateMiss(masm());
+  }
 }
 
 
@@ -2071,25 +2076,44 @@ void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
   DCHECK(instr->IsMarkedAsCall());
   DCHECK(ToRegister(instr->result()).Is(x0));
 
-  LPointerMap* pointers = instr->pointer_map();
-  SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
-
-  if (instr->target()->IsConstantOperand()) {
-    LConstantOperand* target = LConstantOperand::cast(instr->target());
-    Handle<Code> code = Handle<Code>::cast(ToHandle(target));
-    generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
-    // TODO(all): on ARM we use a call descriptor to specify a storage mode
-    // but on ARM64 we only have one storage mode so it isn't necessary. Check
-    // this understanding is correct.
-    __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None());
+  if (instr->hydrogen()->IsTailCall()) {
+    if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL);
+
+    if (instr->target()->IsConstantOperand()) {
+      LConstantOperand* target = LConstantOperand::cast(instr->target());
+      Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+      // TODO(all): on ARM we use a call descriptor to specify a storage mode
+      // but on ARM64 we only have one storage mode so it isn't necessary. Check
+      // this understanding is correct.
+      __ Jump(code, RelocInfo::CODE_TARGET);
+    } else {
+      DCHECK(instr->target()->IsRegister());
+      Register target = ToRegister(instr->target());
+      __ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
+      __ Br(target);
+    }
   } else {
-    DCHECK(instr->target()->IsRegister());
-    Register target = ToRegister(instr->target());
-    generator.BeforeCall(__ CallSize(target));
-    __ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
-    __ Call(target);
+    LPointerMap* pointers = instr->pointer_map();
+    SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+
+    if (instr->target()->IsConstantOperand()) {
+      LConstantOperand* target = LConstantOperand::cast(instr->target());
+      Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+      generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
+      // TODO(all): on ARM we use a call descriptor to specify a storage mode
+      // but on ARM64 we only have one storage mode so it isn't necessary. Check
+      // this understanding is correct.
+      __ Call(code, RelocInfo::CODE_TARGET, TypeFeedbackId::None());
+    } else {
+      DCHECK(instr->target()->IsRegister());
+      Register target = ToRegister(instr->target());
+      generator.BeforeCall(__ CallSize(target));
+      __ Add(target, target, Code::kHeaderSize - kHeapObjectTag);
+      __ Call(target);
+    }
+    generator.AfterCall();
   }
-  generator.AfterCall();
+
   after_push_argument_ = false;
 }
 
@@ -3372,13 +3396,17 @@ template <class T>
 void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
   DCHECK(FLAG_vector_ics);
   Register vector_register = ToRegister(instr->temp_vector());
+  Register slot_register = VectorLoadICDescriptor::SlotRegister();
   DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister()));
+  DCHECK(slot_register.is(x0));
+
+  AllowDeferredHandleDereference vector_structure_check;
   Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
   __ Mov(vector_register, vector);
   // No need to allocate this register.
-  DCHECK(VectorLoadICDescriptor::SlotRegister().is(x0));
-  int index = vector->GetIndex(instr->hydrogen()->slot());
-  __ Mov(VectorLoadICDescriptor::SlotRegister(), Smi::FromInt(index));
+  FeedbackVectorICSlot slot = instr->hydrogen()->slot();
+  int index = vector->GetIndex(slot);
+  __ Mov(slot_register, Smi::FromInt(index));
 }
 
 
@@ -3665,6 +3693,7 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
   }
 
   if (instr->hydrogen()->representation().IsDouble()) {
+    DCHECK(access.IsInobject());
     FPRegister result = ToDoubleRegister(instr->result());
     __ Ldr(result, FieldMemOperand(object, offset));
     return;
@@ -4771,6 +4800,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
     int parameter_count = ToInteger32(instr->constant_parameter_count());
     __ Drop(parameter_count + 1);
   } else {
+    DCHECK(info()->IsStub());  // Functions would need to drop one more value.
     Register parameter_count = ToRegister(instr->parameter_count());
     __ DropBySMI(parameter_count);
   }
@@ -5022,7 +5052,6 @@ void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
   Register scratch2 = x6;
   DCHECK(instr->IsMarkedAsCall());
 
-  ASM_UNIMPLEMENTED_BREAK("DoDeclareGlobals");
   // TODO(all): if Mov could handle object in new space then it could be used
   // here.
   __ LoadHeapObject(scratch1, instr->hydrogen()->pairs());
@@ -5354,7 +5383,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
 
   __ AssertNotSmi(object);
 
-  if (representation.IsDouble()) {
+  if (!FLAG_unbox_double_fields && representation.IsDouble()) {
     DCHECK(access.IsInobject());
     DCHECK(!instr->hydrogen()->has_transition());
     DCHECK(!instr->hydrogen()->NeedsWriteBarrier());
@@ -5363,8 +5392,6 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
     return;
   }
 
-  Register value = ToRegister(instr->value());
-
   DCHECK(!representation.IsSmi() ||
          !instr->value()->IsConstantOperand() ||
          IsInteger32Constant(LConstantOperand::cast(instr->value())));
@@ -5396,8 +5423,12 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
     destination = temp0;
   }
 
-  if (representation.IsSmi() &&
-     instr->hydrogen()->value()->representation().IsInteger32()) {
+  if (FLAG_unbox_double_fields && representation.IsDouble()) {
+    DCHECK(access.IsInobject());
+    FPRegister value = ToDoubleRegister(instr->value());
+    __ Str(value, FieldMemOperand(object, offset));
+  } else if (representation.IsSmi() &&
+             instr->hydrogen()->value()->representation().IsInteger32()) {
     DCHECK(instr->hydrogen()->store_mode() == STORE_TO_INITIALIZED_ENTRY);
 #ifdef DEBUG
     Register temp0 = ToRegister(instr->temp0());
@@ -5412,12 +5443,15 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
 #endif
     STATIC_ASSERT(static_cast<unsigned>(kSmiValueSize) == kWRegSizeInBits);
     STATIC_ASSERT(kSmiTag == 0);
+    Register value = ToRegister(instr->value());
     __ Store(value, UntagSmiFieldMemOperand(destination, offset),
              Representation::Integer32());
   } else {
+    Register value = ToRegister(instr->value());
     __ Store(value, FieldMemOperand(destination, offset), representation);
   }
   if (instr->hydrogen()->NeedsWriteBarrier()) {
+    Register value = ToRegister(instr->value());
     __ RecordWriteField(destination,
                         offset,
                         value,                        // Clobbered.
@@ -5985,10 +6019,11 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
           object_(object),
           index_(index) {
     }
-    virtual void Generate() OVERRIDE {
+    void Generate() OVERRIDE {
       codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
     }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
    private:
     LLoadFieldByIndex* instr_;
     Register result_;
index 4a4d644..b691e21 100644 (file)
@@ -1244,14 +1244,7 @@ void MacroAssembler::Uxtw(const Register& rd, const Register& rn) {
 void MacroAssembler::BumpSystemStackPointer(const Operand& space) {
   DCHECK(!csp.Is(sp_));
   if (!TmpList()->IsEmpty()) {
-    if (CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) {
-      UseScratchRegisterScope temps(this);
-      Register temp = temps.AcquireX();
-      Sub(temp, StackPointer(), space);
-      Bic(csp, temp, 0xf);
-    } else {
-      Sub(csp, StackPointer(), space);
-    }
+    Sub(csp, StackPointer(), space);
   } else {
     // TODO(jbramley): Several callers rely on this not using scratch
     // registers, so we use the assembler directly here. However, this means
@@ -1288,11 +1281,7 @@ void MacroAssembler::SyncSystemStackPointer() {
   DCHECK(emit_debug_code());
   DCHECK(!csp.Is(sp_));
   { InstructionAccurateScope scope(this);
-    if (CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) {
-      bic(csp, StackPointer(), 0xf);
-    } else {
-      mov(csp, StackPointer());
-    }
+    mov(csp, StackPointer());
   }
   AssertStackConsistency();
 }
index e0a2190..0253e7c 100644 (file)
@@ -1308,7 +1308,7 @@ void MacroAssembler::AssertStackConsistency() {
   // Avoid emitting code when !use_real_abort() since non-real aborts cause too
   // much code to be generated.
   if (emit_debug_code() && use_real_aborts()) {
-    if (csp.Is(StackPointer()) || CpuFeatures::IsSupported(ALWAYS_ALIGN_CSP)) {
+    if (csp.Is(StackPointer())) {
       // Always check the alignment of csp if ALWAYS_ALIGN_CSP is true.  We
       // can't check the alignment of csp without using a scratch register (or
       // clobbering the flags), but the processor (or simulator) will abort if
@@ -3788,23 +3788,38 @@ void MacroAssembler::CheckMap(Register obj_map,
 }
 
 
-void MacroAssembler::DispatchMap(Register obj,
-                                 Register scratch,
-                                 Handle<Map> map,
-                                 Handle<Code> success,
-                                 SmiCheckType smi_check_type) {
+void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
+                                     Register scratch2, Handle<WeakCell> cell,
+                                     Handle<Code> success,
+                                     SmiCheckType smi_check_type) {
   Label fail;
   if (smi_check_type == DO_SMI_CHECK) {
     JumpIfSmi(obj, &fail);
   }
-  Ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
-  Cmp(scratch, Operand(map));
+  Ldr(scratch1, FieldMemOperand(obj, HeapObject::kMapOffset));
+  CmpWeakValue(scratch1, cell, scratch2);
   B(ne, &fail);
   Jump(success, RelocInfo::CODE_TARGET);
   Bind(&fail);
 }
 
 
+void MacroAssembler::CmpWeakValue(Register value, Handle<WeakCell> cell,
+                                  Register scratch) {
+  Mov(scratch, Operand(cell));
+  Ldr(scratch, FieldMemOperand(scratch, WeakCell::kValueOffset));
+  Cmp(value, scratch);
+}
+
+
+void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
+                                   Label* miss) {
+  Mov(value, Operand(cell));
+  Ldr(value, FieldMemOperand(value, WeakCell::kValueOffset));
+  JumpIfSmi(value, miss);
+}
+
+
 void MacroAssembler::TestMapBitfield(Register object, uint64_t mask) {
   UseScratchRegisterScope temps(this);
   Register temp = temps.AcquireX();
@@ -4087,7 +4102,7 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
 
   // Check the context is a native context.
   if (emit_debug_code()) {
-    // Read the first word and compare to the global_context_map.
+    // Read the first word and compare to the native_context_map.
     Ldr(scratch2, FieldMemOperand(scratch1, HeapObject::kMapOffset));
     CompareRoot(scratch2, Heap::kNativeContextMapRootIndex);
     Check(eq, kExpectedNativeContext);
@@ -4213,10 +4228,11 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
   }
 
   Bind(&done);
-  // Check that the value is a normal property.
+  // Check that the value is a field property.
   const int kDetailsOffset =
       SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
   Ldrsw(scratch1, UntagSmiFieldMemOperand(scratch2, kDetailsOffset));
+  DCHECK_EQ(FIELD, 0);
   TestAndBranchIfAnySet(scratch1, PropertyDetails::TypeField::kMask, miss);
 
   // Get the value at the masked, scaled index and return.
@@ -4633,17 +4649,6 @@ void MacroAssembler::HasColor(Register object,
 }
 
 
-void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
-                                        Register scratch,
-                                        Label* if_deprecated) {
-  if (map->CanBeDeprecated()) {
-    Mov(scratch, Operand(map));
-    Ldrsw(scratch, FieldMemOperand(scratch, Map::kBitField3Offset));
-    TestAndBranchIfAnySet(scratch, Map::Deprecated::kMask, if_deprecated);
-  }
-}
-
-
 void MacroAssembler::JumpIfBlack(Register object,
                                  Register scratch0,
                                  Register scratch1,
index cff42d7..ee43589 100644 (file)
@@ -761,9 +761,9 @@ class MacroAssembler : public Assembler {
   // it can be evidence of a potential bug because the ABI forbids accesses
   // below csp.
   //
-  // If StackPointer() is the system stack pointer (csp) or ALWAYS_ALIGN_CSP is
-  // enabled, then csp will be dereferenced to  cause the processor
-  // (or simulator) to abort if it is not properly aligned.
+  // If StackPointer() is the system stack pointer (csp), then csp will be
+  // dereferenced to cause the processor (or simulator) to abort if it is not
+  // properly aligned.
   //
   // If emit_debug_code() is false, this emits no code.
   void AssertStackConsistency();
@@ -831,9 +831,7 @@ class MacroAssembler : public Assembler {
   inline void BumpSystemStackPointer(const Operand& space);
 
   // Re-synchronizes the system stack pointer (csp) with the current stack
-  // pointer (according to StackPointer()).  This function will ensure the
-  // new value of the system stack pointer is remains aligned to 16 bytes, and
-  // is lower than or equal to the value of the current stack pointer.
+  // pointer (according to StackPointer()).
   //
   // This method asserts that StackPointer() is not csp, since the call does
   // not make sense in that context.
@@ -1480,14 +1478,19 @@ class MacroAssembler : public Assembler {
                 Label* fail,
                 SmiCheckType smi_check_type);
 
-  // Check if the map of an object is equal to a specified map and branch to a
-  // specified target if equal. Skip the smi check if not required (object is
-  // known to be a heap object)
-  void DispatchMap(Register obj,
-                   Register scratch,
-                   Handle<Map> map,
-                   Handle<Code> success,
-                   SmiCheckType smi_check_type);
+  // Check if the map of an object is equal to a specified weak map and branch
+  // to a specified target if equal. Skip the smi check if not required
+  // (object is known to be a heap object)
+  void DispatchWeakMap(Register obj, Register scratch1, Register scratch2,
+                       Handle<WeakCell> cell, Handle<Code> success,
+                       SmiCheckType smi_check_type);
+
+  // Compare the given value and the value of weak cell.
+  void CmpWeakValue(Register value, Handle<WeakCell> cell, Register scratch);
+
+  // Load the value of the weak cell in the value register. Branch to the given
+  // miss label if the weak cell was cleared.
+  void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
 
   // Test the bitfield of the heap object map with mask and set the condition
   // flags. The object register is preserved.
@@ -1777,10 +1780,6 @@ class MacroAssembler : public Assembler {
                           int mask,
                           Label* if_all_clear);
 
-  void CheckMapDeprecated(Handle<Map> map,
-                          Register scratch,
-                          Label* if_deprecated);
-
   // Check if object is in new space and jump accordingly.
   // Register 'object' is preserved.
   void JumpIfNotInNewSpace(Register object,
index 569be9c..bc524af 100644 (file)
@@ -413,7 +413,7 @@ void Simulator::ResetState() {
 
   // Reset debug helpers.
   breakpoints_.empty();
-  break_on_next_= false;
+  break_on_next_ = false;
 }
 
 
index 864d5c1..17d55f0 100644 (file)
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-'use strict';
+"use strict";
 
 
 // This file relies on the fact that the following declaration has been made
index 29fa831..c702d8c 100644 (file)
@@ -238,7 +238,10 @@ function SparseMove(array, start_i, del_count, len, num_additional_args) {
   // Bail out if no moving is necessary.
   if (num_additional_args === del_count) return;
   // Move data to new array.
-  var new_array = new InternalArray(len - del_count + num_additional_args);
+  var new_array = new InternalArray(
+      // Clamp array length to 2^32-1 to avoid early RangeError.
+      MathMin(len - del_count + num_additional_args, 0xffffffff));
+  var big_indices;
   var indices = %GetArrayKeys(array, len);
   if (IS_NUMBER(indices)) {
     var limit = indices;
@@ -267,7 +270,12 @@ function SparseMove(array, start_i, del_count, len, num_additional_args) {
         } else if (key >= start_i + del_count) {
           var current = array[key];
           if (!IS_UNDEFINED(current) || key in array) {
-            new_array[key - del_count + num_additional_args] = current;
+            var new_key = key - del_count + num_additional_args;
+            new_array[new_key] = current;
+            if (new_key > 0xffffffff) {
+              big_indices = big_indices || new InternalArray();
+              big_indices.push(new_key);
+            }
           }
         }
       }
@@ -275,6 +283,14 @@ function SparseMove(array, start_i, del_count, len, num_additional_args) {
   }
   // Move contents of new_array into this array
   %MoveArrayContents(new_array, array);
+  // Add any moved values that aren't elements anymore.
+  if (!IS_UNDEFINED(big_indices)) {
+    var length = big_indices.length;
+    for (var i = 0; i < length; ++i) {
+      var key = big_indices[i];
+      array[key] = new_array[key];
+    }
+  }
 }
 
 
@@ -282,9 +298,10 @@ function SparseMove(array, start_i, del_count, len, num_additional_args) {
 // because the receiver is not an array (so we have no choice) or because we
 // know we are not deleting or moving a lot of elements.
 function SimpleSlice(array, start_i, del_count, len, deleted_elements) {
+  var is_array = IS_ARRAY(array);
   for (var i = 0; i < del_count; i++) {
     var index = start_i + i;
-    if (index in array) {
+    if (HAS_INDEX(array, index, is_array)) {
       var current = array[index];
       // The spec requires [[DefineOwnProperty]] here, %AddElement is close
       // enough (in that it ignores the prototype).
@@ -295,6 +312,7 @@ function SimpleSlice(array, start_i, del_count, len, deleted_elements) {
 
 
 function SimpleMove(array, start_i, del_count, len, num_additional_args) {
+  var is_array = IS_ARRAY(array);
   if (num_additional_args !== del_count) {
     // Move the existing elements after the elements to be deleted
     // to the right position in the resulting array.
@@ -302,7 +320,7 @@ function SimpleMove(array, start_i, del_count, len, num_additional_args) {
       for (var i = len - del_count; i > start_i; i--) {
         var from_index = i + del_count - 1;
         var to_index = i + num_additional_args - 1;
-        if (from_index in array) {
+        if (HAS_INDEX(array, from_index, is_array)) {
           array[to_index] = array[from_index];
         } else {
           delete array[to_index];
@@ -312,7 +330,7 @@ function SimpleMove(array, start_i, del_count, len, num_additional_args) {
       for (var i = start_i; i < len - del_count; i++) {
         var from_index = i + del_count;
         var to_index = i + num_additional_args;
-        if (from_index in array) {
+        if (HAS_INDEX(array, from_index, is_array)) {
           array[to_index] = array[from_index];
         } else {
           delete array[to_index];
@@ -966,7 +984,7 @@ function ArraySort(comparefn) {
   // of a prototype property.
   var CopyFromPrototype = function CopyFromPrototype(obj, length) {
     var max = 0;
-    for (var proto = %GetPrototype(obj); proto; proto = %GetPrototype(proto)) {
+    for (var proto = %_GetPrototype(obj); proto; proto = %_GetPrototype(proto)) {
       var indices = %GetArrayKeys(proto, length);
       if (IS_NUMBER(indices)) {
         // It's an interval.
@@ -995,7 +1013,7 @@ function ArraySort(comparefn) {
   // where a prototype of obj has an element. I.e., shadow all prototype
   // elements in that range.
   var ShadowPrototypeElements = function(obj, from, to) {
-    for (var proto = %GetPrototype(obj); proto; proto = %GetPrototype(proto)) {
+    for (var proto = %_GetPrototype(obj); proto; proto = %_GetPrototype(proto)) {
       var indices = %GetArrayKeys(proto, to);
       if (IS_NUMBER(indices)) {
         // It's an interval.
@@ -1063,7 +1081,7 @@ function ArraySort(comparefn) {
     }
     for (i = length - num_holes; i < length; i++) {
       // For compatability with Webkit, do not expose elements in the prototype.
-      if (i in %GetPrototype(obj)) {
+      if (i in %_GetPrototype(obj)) {
         obj[i] = UNDEFINED;
       } else {
         delete obj[i];
@@ -1137,9 +1155,10 @@ function ArrayFilter(f, receiver) {
   var result = new $Array();
   var accumulator = new InternalArray();
   var accumulator_length = 0;
+  var is_array = IS_ARRAY(array);
   var stepping = DEBUG_IS_ACTIVE && %DebugCallbackSupportsStepping(f);
   for (var i = 0; i < length; i++) {
-    if (i in array) {
+    if (HAS_INDEX(array, i, is_array)) {
       var element = array[i];
       // Prepare break slots for debugger step in.
       if (stepping) %DebugPrepareStepInIfStepping(f);
@@ -1172,9 +1191,10 @@ function ArrayForEach(f, receiver) {
     needs_wrapper = SHOULD_CREATE_WRAPPER(f, receiver);
   }
 
+  var is_array = IS_ARRAY(array);
   var stepping = DEBUG_IS_ACTIVE && %DebugCallbackSupportsStepping(f);
   for (var i = 0; i < length; i++) {
-    if (i in array) {
+    if (HAS_INDEX(array, i, is_array)) {
       var element = array[i];
       // Prepare break slots for debugger step in.
       if (stepping) %DebugPrepareStepInIfStepping(f);
@@ -1205,9 +1225,10 @@ function ArraySome(f, receiver) {
     needs_wrapper = SHOULD_CREATE_WRAPPER(f, receiver);
   }
 
+  var is_array = IS_ARRAY(array);
   var stepping = DEBUG_IS_ACTIVE && %DebugCallbackSupportsStepping(f);
   for (var i = 0; i < length; i++) {
-    if (i in array) {
+    if (HAS_INDEX(array, i, is_array)) {
       var element = array[i];
       // Prepare break slots for debugger step in.
       if (stepping) %DebugPrepareStepInIfStepping(f);
@@ -1237,9 +1258,10 @@ function ArrayEvery(f, receiver) {
     needs_wrapper = SHOULD_CREATE_WRAPPER(f, receiver);
   }
 
+  var is_array = IS_ARRAY(array);
   var stepping = DEBUG_IS_ACTIVE && %DebugCallbackSupportsStepping(f);
   for (var i = 0; i < length; i++) {
-    if (i in array) {
+    if (HAS_INDEX(array, i, is_array)) {
       var element = array[i];
       // Prepare break slots for debugger step in.
       if (stepping) %DebugPrepareStepInIfStepping(f);
@@ -1270,9 +1292,10 @@ function ArrayMap(f, receiver) {
 
   var result = new $Array();
   var accumulator = new InternalArray(length);
+  var is_array = IS_ARRAY(array);
   var stepping = DEBUG_IS_ACTIVE && %DebugCallbackSupportsStepping(f);
   for (var i = 0; i < length; i++) {
-    if (i in array) {
+    if (HAS_INDEX(array, i, is_array)) {
       var element = array[i];
       // Prepare break slots for debugger step in.
       if (stepping) %DebugPrepareStepInIfStepping(f);
@@ -1407,10 +1430,11 @@ function ArrayReduce(callback, current) {
     throw MakeTypeError('called_non_callable', [callback]);
   }
 
+  var is_array = IS_ARRAY(array);
   var i = 0;
   find_initial: if (%_ArgumentsLength() < 2) {
     for (; i < length; i++) {
-      if (i in array) {
+      if (HAS_INDEX(array, i, is_array)) {
         current = array[i++];
         break find_initial;
       }
@@ -1421,7 +1445,7 @@ function ArrayReduce(callback, current) {
   var receiver = %GetDefaultReceiver(callback);
   var stepping = DEBUG_IS_ACTIVE && %DebugCallbackSupportsStepping(callback);
   for (; i < length; i++) {
-    if (i in array) {
+    if (HAS_INDEX(array, i, is_array)) {
       var element = array[i];
       // Prepare break slots for debugger step in.
       if (stepping) %DebugPrepareStepInIfStepping(callback);
@@ -1443,10 +1467,11 @@ function ArrayReduceRight(callback, current) {
     throw MakeTypeError('called_non_callable', [callback]);
   }
 
+  var is_array = IS_ARRAY(array);
   var i = length - 1;
   find_initial: if (%_ArgumentsLength() < 2) {
     for (; i >= 0; i--) {
-      if (i in array) {
+      if (HAS_INDEX(array, i, is_array)) {
         current = array[i--];
         break find_initial;
       }
@@ -1457,7 +1482,7 @@ function ArrayReduceRight(callback, current) {
   var receiver = %GetDefaultReceiver(callback);
   var stepping = DEBUG_IS_ACTIVE && %DebugCallbackSupportsStepping(callback);
   for (; i >= 0; i--) {
-    if (i in array) {
+    if (HAS_INDEX(array, i, is_array)) {
       var element = array[i];
       // Prepare break slots for debugger step in.
       if (stepping) %DebugPrepareStepInIfStepping(callback);
index 8994ac8..c2dd70e 100644 (file)
@@ -16,11 +16,14 @@ namespace internal {
 class AstNumberingVisitor FINAL : public AstVisitor {
  public:
   explicit AstNumberingVisitor(Zone* zone)
-      : AstVisitor(), next_id_(BailoutId::FirstUsable().ToInt()) {
+      : AstVisitor(),
+        next_id_(BailoutId::FirstUsable().ToInt()),
+        dont_crankshaft_reason_(kNoReason),
+        dont_turbofan_reason_(kNoReason) {
     InitializeAstVisitor(zone);
   }
 
-  void Renumber(FunctionLiteral* node);
+  bool Renumber(FunctionLiteral* node);
 
  private:
 // AST node visitor interface.
@@ -28,6 +31,8 @@ class AstNumberingVisitor FINAL : public AstVisitor {
   AST_NODE_LIST(DEFINE_VISIT)
 #undef DEFINE_VISIT
 
+  bool Finish(FunctionLiteral* node);
+
   void VisitStatements(ZoneList<Statement*>* statements) OVERRIDE;
   void VisitDeclarations(ZoneList<Declaration*>* declarations) OVERRIDE;
   void VisitArguments(ZoneList<Expression*>* arguments);
@@ -40,9 +45,56 @@ class AstNumberingVisitor FINAL : public AstVisitor {
   }
 
   void IncrementNodeCount() { properties_.add_node_count(1); }
+  void DisableCrankshaft(BailoutReason reason) {
+    dont_crankshaft_reason_ = reason;
+    properties_.flags()->Add(kDontSelfOptimize);
+  }
+  // TODO(turbofan): Remove the dont_turbofan_reason once no nodes are
+  // DontTurbofanNode.  That set of nodes must be kept in sync with
+  // Pipeline::GenerateCode.
+  void DisableTurbofan(BailoutReason reason) {
+    dont_crankshaft_reason_ = reason;
+    dont_turbofan_reason_ = reason;
+    DisableSelfOptimization();
+  }
+  void DisableSelfOptimization() {
+    properties_.flags()->Add(kDontSelfOptimize);
+  }
+  void DisableCaching(BailoutReason reason) {
+    dont_crankshaft_reason_ = reason;
+    DisableSelfOptimization();
+    properties_.flags()->Add(kDontCache);
+  }
+
+  template <typename Node>
+  void ReserveFeedbackSlots(Node* node) {
+    FeedbackVectorRequirements reqs =
+        node->ComputeFeedbackRequirements(isolate());
+    if (reqs.slots() > 0) {
+      node->SetFirstFeedbackSlot(FeedbackVectorSlot(properties_.slots()));
+      properties_.increase_slots(reqs.slots());
+    }
+    if (reqs.ic_slots() > 0) {
+      int ic_slots = properties_.ic_slots();
+      node->SetFirstFeedbackICSlot(FeedbackVectorICSlot(ic_slots));
+      properties_.increase_ic_slots(reqs.ic_slots());
+      if (FLAG_vector_ics) {
+        for (int i = 0; i < reqs.ic_slots(); i++) {
+          properties_.SetKind(ic_slots + i, node->FeedbackICSlotKind(i));
+        }
+      }
+    }
+  }
+
+  BailoutReason dont_optimize_reason() const {
+    return (dont_turbofan_reason_ != kNoReason) ? dont_turbofan_reason_
+                                                : dont_crankshaft_reason_;
+  }
 
   int next_id_;
   AstProperties properties_;
+  BailoutReason dont_crankshaft_reason_;
+  BailoutReason dont_turbofan_reason_;
 
   DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
   DISALLOW_COPY_AND_ASSIGN(AstNumberingVisitor);
@@ -57,12 +109,14 @@ void AstNumberingVisitor::VisitVariableDeclaration(VariableDeclaration* node) {
 
 void AstNumberingVisitor::VisitExportDeclaration(ExportDeclaration* node) {
   IncrementNodeCount();
+  DisableCrankshaft(kExportDeclaration);
   VisitVariableProxy(node->proxy());
 }
 
 
 void AstNumberingVisitor::VisitModuleUrl(ModuleUrl* node) {
   IncrementNodeCount();
+  DisableCrankshaft(kModuleUrl);
 }
 
 
@@ -83,6 +137,7 @@ void AstNumberingVisitor::VisitBreakStatement(BreakStatement* node) {
 
 void AstNumberingVisitor::VisitDebuggerStatement(DebuggerStatement* node) {
   IncrementNodeCount();
+  DisableCrankshaft(kDebuggerStatement);
   node->set_base_id(ReserveIdRange(DebuggerStatement::num_ids()));
 }
 
@@ -90,6 +145,7 @@ void AstNumberingVisitor::VisitDebuggerStatement(DebuggerStatement* node) {
 void AstNumberingVisitor::VisitNativeFunctionLiteral(
     NativeFunctionLiteral* node) {
   IncrementNodeCount();
+  DisableCrankshaft(kNativeFunctionLiteral);
   node->set_base_id(ReserveIdRange(NativeFunctionLiteral::num_ids()));
 }
 
@@ -108,6 +164,10 @@ void AstNumberingVisitor::VisitRegExpLiteral(RegExpLiteral* node) {
 
 void AstNumberingVisitor::VisitVariableProxy(VariableProxy* node) {
   IncrementNodeCount();
+  if (node->var()->IsLookupSlot()) {
+    DisableCrankshaft(kReferenceToAVariableWhichRequiresDynamicLookup);
+  }
+  ReserveFeedbackSlots(node);
   node->set_base_id(ReserveIdRange(VariableProxy::num_ids()));
 }
 
@@ -120,6 +180,8 @@ void AstNumberingVisitor::VisitThisFunction(ThisFunction* node) {
 
 void AstNumberingVisitor::VisitSuperReference(SuperReference* node) {
   IncrementNodeCount();
+  DisableTurbofan(kSuperReference);
+  ReserveFeedbackSlots(node);
   node->set_base_id(ReserveIdRange(SuperReference::num_ids()));
   Visit(node->this_var());
 }
@@ -127,6 +189,7 @@ void AstNumberingVisitor::VisitSuperReference(SuperReference* node) {
 
 void AstNumberingVisitor::VisitModuleDeclaration(ModuleDeclaration* node) {
   IncrementNodeCount();
+  DisableCrankshaft(kModuleDeclaration);
   VisitVariableProxy(node->proxy());
   Visit(node->module());
 }
@@ -134,6 +197,7 @@ void AstNumberingVisitor::VisitModuleDeclaration(ModuleDeclaration* node) {
 
 void AstNumberingVisitor::VisitImportDeclaration(ImportDeclaration* node) {
   IncrementNodeCount();
+  DisableCrankshaft(kImportDeclaration);
   VisitVariableProxy(node->proxy());
   Visit(node->module());
 }
@@ -141,18 +205,21 @@ void AstNumberingVisitor::VisitImportDeclaration(ImportDeclaration* node) {
 
 void AstNumberingVisitor::VisitModuleVariable(ModuleVariable* node) {
   IncrementNodeCount();
+  DisableCrankshaft(kModuleVariable);
   Visit(node->proxy());
 }
 
 
 void AstNumberingVisitor::VisitModulePath(ModulePath* node) {
   IncrementNodeCount();
+  DisableCrankshaft(kModulePath);
   Visit(node->module());
 }
 
 
 void AstNumberingVisitor::VisitModuleStatement(ModuleStatement* node) {
   IncrementNodeCount();
+  DisableCrankshaft(kModuleStatement);
   Visit(node->body());
 }
 
@@ -171,6 +238,8 @@ void AstNumberingVisitor::VisitReturnStatement(ReturnStatement* node) {
 
 void AstNumberingVisitor::VisitYield(Yield* node) {
   IncrementNodeCount();
+  DisableCrankshaft(kYield);
+  ReserveFeedbackSlots(node);
   node->set_base_id(ReserveIdRange(Yield::num_ids()));
   Visit(node->generator_object());
   Visit(node->expression());
@@ -215,12 +284,18 @@ void AstNumberingVisitor::VisitFunctionDeclaration(FunctionDeclaration* node) {
 
 void AstNumberingVisitor::VisitModuleLiteral(ModuleLiteral* node) {
   IncrementNodeCount();
+  DisableCaching(kModuleLiteral);
   VisitBlock(node->body());
 }
 
 
 void AstNumberingVisitor::VisitCallRuntime(CallRuntime* node) {
   IncrementNodeCount();
+  ReserveFeedbackSlots(node);
+  if (node->is_jsruntime()) {
+    // Don't try to optimize JS runtime calls because we bailout on them.
+    DisableCrankshaft(kCallToAJavaScriptRuntimeFunction);
+  }
   node->set_base_id(ReserveIdRange(CallRuntime::num_ids()));
   VisitArguments(node->arguments());
 }
@@ -228,6 +303,7 @@ void AstNumberingVisitor::VisitCallRuntime(CallRuntime* node) {
 
 void AstNumberingVisitor::VisitWithStatement(WithStatement* node) {
   IncrementNodeCount();
+  DisableCrankshaft(kWithStatement);
   Visit(node->expression());
   Visit(node->statement());
 }
@@ -235,6 +311,7 @@ void AstNumberingVisitor::VisitWithStatement(WithStatement* node) {
 
 void AstNumberingVisitor::VisitDoWhileStatement(DoWhileStatement* node) {
   IncrementNodeCount();
+  DisableSelfOptimization();
   node->set_base_id(ReserveIdRange(DoWhileStatement::num_ids()));
   Visit(node->body());
   Visit(node->cond());
@@ -243,6 +320,7 @@ void AstNumberingVisitor::VisitDoWhileStatement(DoWhileStatement* node) {
 
 void AstNumberingVisitor::VisitWhileStatement(WhileStatement* node) {
   IncrementNodeCount();
+  DisableSelfOptimization();
   node->set_base_id(ReserveIdRange(WhileStatement::num_ids()));
   Visit(node->cond());
   Visit(node->body());
@@ -251,6 +329,7 @@ void AstNumberingVisitor::VisitWhileStatement(WhileStatement* node) {
 
 void AstNumberingVisitor::VisitTryCatchStatement(TryCatchStatement* node) {
   IncrementNodeCount();
+  DisableTurbofan(kTryCatchStatement);
   Visit(node->try_block());
   Visit(node->catch_block());
 }
@@ -258,6 +337,7 @@ void AstNumberingVisitor::VisitTryCatchStatement(TryCatchStatement* node) {
 
 void AstNumberingVisitor::VisitTryFinallyStatement(TryFinallyStatement* node) {
   IncrementNodeCount();
+  DisableTurbofan(kTryFinallyStatement);
   Visit(node->try_block());
   Visit(node->finally_block());
 }
@@ -265,6 +345,7 @@ void AstNumberingVisitor::VisitTryFinallyStatement(TryFinallyStatement* node) {
 
 void AstNumberingVisitor::VisitProperty(Property* node) {
   IncrementNodeCount();
+  ReserveFeedbackSlots(node);
   node->set_base_id(ReserveIdRange(Property::num_ids()));
   Visit(node->key());
   Visit(node->obj());
@@ -298,6 +379,8 @@ void AstNumberingVisitor::VisitCompareOperation(CompareOperation* node) {
 
 void AstNumberingVisitor::VisitForInStatement(ForInStatement* node) {
   IncrementNodeCount();
+  DisableSelfOptimization();
+  ReserveFeedbackSlots(node);
   node->set_base_id(ReserveIdRange(ForInStatement::num_ids()));
   Visit(node->each());
   Visit(node->enumerable());
@@ -307,6 +390,7 @@ void AstNumberingVisitor::VisitForInStatement(ForInStatement* node) {
 
 void AstNumberingVisitor::VisitForOfStatement(ForOfStatement* node) {
   IncrementNodeCount();
+  DisableTurbofan(kForOfStatement);
   node->set_base_id(ReserveIdRange(ForOfStatement::num_ids()));
   Visit(node->assign_iterator());
   Visit(node->next_result());
@@ -357,6 +441,7 @@ void AstNumberingVisitor::VisitCaseClause(CaseClause* node) {
 
 void AstNumberingVisitor::VisitForStatement(ForStatement* node) {
   IncrementNodeCount();
+  DisableSelfOptimization();
   node->set_base_id(ReserveIdRange(ForStatement::num_ids()));
   if (node->init() != NULL) Visit(node->init());
   if (node->cond() != NULL) Visit(node->cond());
@@ -367,9 +452,13 @@ void AstNumberingVisitor::VisitForStatement(ForStatement* node) {
 
 void AstNumberingVisitor::VisitClassLiteral(ClassLiteral* node) {
   IncrementNodeCount();
+  DisableTurbofan(kClassLiteral);
   node->set_base_id(ReserveIdRange(ClassLiteral::num_ids()));
   if (node->extends()) Visit(node->extends());
   if (node->constructor()) Visit(node->constructor());
+  if (node->class_variable_proxy()) {
+    VisitVariableProxy(node->class_variable_proxy());
+  }
   for (int i = 0; i < node->properties()->length(); i++) {
     VisitObjectLiteralProperty(node->properties()->at(i));
   }
@@ -403,6 +492,7 @@ void AstNumberingVisitor::VisitArrayLiteral(ArrayLiteral* node) {
 
 void AstNumberingVisitor::VisitCall(Call* node) {
   IncrementNodeCount();
+  ReserveFeedbackSlots(node);
   node->set_base_id(ReserveIdRange(Call::num_ids()));
   Visit(node->expression());
   VisitArguments(node->arguments());
@@ -411,6 +501,7 @@ void AstNumberingVisitor::VisitCall(Call* node) {
 
 void AstNumberingVisitor::VisitCallNew(CallNew* node) {
   IncrementNodeCount();
+  ReserveFeedbackSlots(node);
   node->set_base_id(ReserveIdRange(CallNew::num_ids()));
   Visit(node->expression());
   VisitArguments(node->arguments());
@@ -448,17 +539,26 @@ void AstNumberingVisitor::VisitFunctionLiteral(FunctionLiteral* node) {
 }
 
 
-void AstNumberingVisitor::Renumber(FunctionLiteral* node) {
-  properties_.flags()->Add(*node->flags());
-  properties_.increase_feedback_slots(node->slot_count());
-  properties_.increase_ic_feedback_slots(node->ic_slot_count());
+bool AstNumberingVisitor::Finish(FunctionLiteral* node) {
+  node->set_ast_properties(&properties_);
+  node->set_dont_optimize_reason(dont_optimize_reason());
+  return !HasStackOverflow();
+}
 
-  if (node->scope()->HasIllegalRedeclaration()) {
-    node->scope()->VisitIllegalRedeclaration(this);
-    return;
-  }
 
+bool AstNumberingVisitor::Renumber(FunctionLiteral* node) {
   Scope* scope = node->scope();
+
+  if (scope->HasIllegalRedeclaration()) {
+    scope->VisitIllegalRedeclaration(this);
+    DisableCrankshaft(kFunctionWithIllegalRedeclaration);
+    return Finish(node);
+  }
+  if (scope->calls_eval()) DisableCrankshaft(kFunctionCallsEval);
+  if (scope->arguments() != NULL && !scope->arguments()->IsStackAllocated()) {
+    DisableCrankshaft(kContextAllocatedArguments);
+  }
+
   VisitDeclarations(scope->declarations());
   if (scope->is_function_scope() && scope->function() != NULL) {
     // Visit the name of the named function expression.
@@ -466,14 +566,13 @@ void AstNumberingVisitor::Renumber(FunctionLiteral* node) {
   }
   VisitStatements(node->body());
 
-  node->set_ast_properties(&properties_);
+  return Finish(node);
 }
 
 
 bool AstNumbering::Renumber(FunctionLiteral* function, Zone* zone) {
   AstNumberingVisitor visitor(zone);
-  visitor.Renumber(function);
-  return !visitor.HasStackOverflow();
+  return visitor.Renumber(function);
 }
 }
 }  // namespace v8::internal
diff --git a/deps/v8/src/ast-this-access-visitor.cc b/deps/v8/src/ast-this-access-visitor.cc
new file mode 100644 (file)
index 0000000..cf4a3de
--- /dev/null
@@ -0,0 +1,239 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/ast-this-access-visitor.h"
+#include "src/parser.h"
+
+namespace v8 {
+namespace internal {
+
+typedef class AstThisAccessVisitor ATAV;  // for code shortitude.
+
+ATAV::AstThisAccessVisitor(Zone* zone) : uses_this_(false) {
+  InitializeAstVisitor(zone);
+}
+
+
+void ATAV::VisitVariableProxy(VariableProxy* proxy) {
+  if (proxy->is_this()) {
+    uses_this_ = true;
+  }
+}
+
+
+void ATAV::VisitSuperReference(SuperReference* leaf) {
+  // disallow super.method() and super(...).
+  uses_this_ = true;
+}
+
+
+void ATAV::VisitCallNew(CallNew* e) {
+  // new super(..) does not use 'this'.
+  if (!e->expression()->IsSuperReference()) {
+    Visit(e->expression());
+  }
+  VisitExpressions(e->arguments());
+}
+
+
+// ---------------------------------------------------------------------------
+// -- Leaf nodes -------------------------------------------------------------
+// ---------------------------------------------------------------------------
+
+void ATAV::VisitVariableDeclaration(VariableDeclaration* leaf) {}
+void ATAV::VisitFunctionDeclaration(FunctionDeclaration* leaf) {}
+void ATAV::VisitModuleDeclaration(ModuleDeclaration* leaf) {}
+void ATAV::VisitImportDeclaration(ImportDeclaration* leaf) {}
+void ATAV::VisitExportDeclaration(ExportDeclaration* leaf) {}
+void ATAV::VisitModuleVariable(ModuleVariable* leaf) {}
+void ATAV::VisitModulePath(ModulePath* leaf) {}
+void ATAV::VisitModuleUrl(ModuleUrl* leaf) {}
+void ATAV::VisitEmptyStatement(EmptyStatement* leaf) {}
+void ATAV::VisitContinueStatement(ContinueStatement* leaf) {}
+void ATAV::VisitBreakStatement(BreakStatement* leaf) {}
+void ATAV::VisitDebuggerStatement(DebuggerStatement* leaf) {}
+void ATAV::VisitFunctionLiteral(FunctionLiteral* leaf) {}
+void ATAV::VisitNativeFunctionLiteral(NativeFunctionLiteral* leaf) {}
+void ATAV::VisitLiteral(Literal* leaf) {}
+void ATAV::VisitRegExpLiteral(RegExpLiteral* leaf) {}
+void ATAV::VisitThisFunction(ThisFunction* leaf) {}
+
+// ---------------------------------------------------------------------------
+// -- Pass-through nodes------------------------------------------------------
+// ---------------------------------------------------------------------------
+void ATAV::VisitModuleLiteral(ModuleLiteral* e) { Visit(e->body()); }
+
+
+void ATAV::VisitBlock(Block* stmt) { VisitStatements(stmt->statements()); }
+
+
+void ATAV::VisitExpressionStatement(ExpressionStatement* stmt) {
+  Visit(stmt->expression());
+}
+
+
+void ATAV::VisitIfStatement(IfStatement* stmt) {
+  Visit(stmt->condition());
+  Visit(stmt->then_statement());
+  Visit(stmt->else_statement());
+}
+
+
+void ATAV::VisitReturnStatement(ReturnStatement* stmt) {
+  Visit(stmt->expression());
+}
+
+
+void ATAV::VisitWithStatement(WithStatement* stmt) {
+  Visit(stmt->expression());
+  Visit(stmt->statement());
+}
+
+
+void ATAV::VisitSwitchStatement(SwitchStatement* stmt) {
+  Visit(stmt->tag());
+  ZoneList<CaseClause*>* clauses = stmt->cases();
+  for (int i = 0; i < clauses->length(); i++) {
+    Visit(clauses->at(i));
+  }
+}
+
+
+void ATAV::VisitTryFinallyStatement(TryFinallyStatement* stmt) {
+  Visit(stmt->try_block());
+  Visit(stmt->finally_block());
+}
+
+
+void ATAV::VisitClassLiteral(ClassLiteral* e) {
+  VisitIfNotNull(e->extends());
+  Visit(e->constructor());
+  ZoneList<ObjectLiteralProperty*>* properties = e->properties();
+  for (int i = 0; i < properties->length(); i++) {
+    Visit(properties->at(i)->value());
+  }
+}
+
+
+void ATAV::VisitConditional(Conditional* e) {
+  Visit(e->condition());
+  Visit(e->then_expression());
+  Visit(e->else_expression());
+}
+
+
+void ATAV::VisitObjectLiteral(ObjectLiteral* e) {
+  ZoneList<ObjectLiteralProperty*>* properties = e->properties();
+  for (int i = 0; i < properties->length(); i++) {
+    Visit(properties->at(i)->value());
+  }
+}
+
+
+void ATAV::VisitArrayLiteral(ArrayLiteral* e) { VisitExpressions(e->values()); }
+
+
+void ATAV::VisitYield(Yield* stmt) {
+  Visit(stmt->generator_object());
+  Visit(stmt->expression());
+}
+
+
+void ATAV::VisitThrow(Throw* stmt) { Visit(stmt->exception()); }
+
+
+void ATAV::VisitProperty(Property* e) {
+  Visit(e->obj());
+  Visit(e->key());
+}
+
+
+void ATAV::VisitCall(Call* e) {
+  Visit(e->expression());
+  VisitExpressions(e->arguments());
+}
+
+
+void ATAV::VisitCallRuntime(CallRuntime* e) {
+  VisitExpressions(e->arguments());
+}
+
+
+void ATAV::VisitUnaryOperation(UnaryOperation* e) { Visit(e->expression()); }
+
+
+void ATAV::VisitBinaryOperation(BinaryOperation* e) {
+  Visit(e->left());
+  Visit(e->right());
+}
+
+
+void ATAV::VisitCompareOperation(CompareOperation* e) {
+  Visit(e->left());
+  Visit(e->right());
+}
+
+
+void ATAV::VisitCaseClause(CaseClause* cc) {
+  if (!cc->is_default()) Visit(cc->label());
+  VisitStatements(cc->statements());
+}
+
+
+void ATAV::VisitModuleStatement(ModuleStatement* stmt) { Visit(stmt->body()); }
+
+
+void ATAV::VisitTryCatchStatement(TryCatchStatement* stmt) {
+  Visit(stmt->try_block());
+  Visit(stmt->catch_block());
+}
+
+
+void ATAV::VisitDoWhileStatement(DoWhileStatement* loop) {
+  Visit(loop->body());
+  Visit(loop->cond());
+}
+
+
+void ATAV::VisitWhileStatement(WhileStatement* loop) {
+  Visit(loop->cond());
+  Visit(loop->body());
+}
+
+
+void ATAV::VisitForStatement(ForStatement* loop) {
+  VisitIfNotNull(loop->init());
+  VisitIfNotNull(loop->cond());
+  Visit(loop->body());
+  VisitIfNotNull(loop->next());
+}
+
+
+void ATAV::VisitForInStatement(ForInStatement* loop) {
+  Visit(loop->each());
+  Visit(loop->subject());
+  Visit(loop->body());
+}
+
+
+void ATAV::VisitForOfStatement(ForOfStatement* loop) {
+  Visit(loop->each());
+  Visit(loop->subject());
+  Visit(loop->body());
+}
+
+
+void ATAV::VisitAssignment(Assignment* stmt) {
+  Expression* l = stmt->target();
+  Visit(l);
+  Visit(stmt->value());
+}
+
+
+void ATAV::VisitCountOperation(CountOperation* e) {
+  Expression* l = e->expression();
+  Visit(l);
+}
+}
+}  // namespace v8::internal
diff --git a/deps/v8/src/ast-this-access-visitor.h b/deps/v8/src/ast-this-access-visitor.h
new file mode 100644 (file)
index 0000000..6030981
--- /dev/null
@@ -0,0 +1,34 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_AST_THIS_ACCESS_VISITOR_H_
+#define V8_AST_THIS_ACCESS_VISITOR_H_
+#include "src/ast.h"
+
+namespace v8 {
+namespace internal {
+
+class AstThisAccessVisitor : public AstVisitor {
+ public:
+  explicit AstThisAccessVisitor(Zone* zone);
+
+  bool UsesThis() { return uses_this_; }
+
+#define DECLARE_VISIT(type) void Visit##type(type* node) OVERRIDE;
+  AST_NODE_LIST(DECLARE_VISIT)
+#undef DECLARE_VISIT
+
+ private:
+  bool uses_this_;
+
+  void VisitIfNotNull(AstNode* node) {
+    if (node != NULL) Visit(node);
+  }
+
+  DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
+  DISALLOW_COPY_AND_ASSIGN(AstThisAccessVisitor);
+};
+}
+}  // namespace v8::internal
+#endif  // V8_AST_THIS_ACCESS_VISITOR_H_
index 518be23..ff20100 100644 (file)
@@ -56,22 +56,20 @@ class AstRawStringInternalizationKey : public HashTableKey {
   explicit AstRawStringInternalizationKey(const AstRawString* string)
       : string_(string) {}
 
-  virtual bool IsMatch(Object* other) OVERRIDE {
+  bool IsMatch(Object* other) OVERRIDE {
     if (string_->is_one_byte_)
       return String::cast(other)->IsOneByteEqualTo(string_->literal_bytes_);
     return String::cast(other)->IsTwoByteEqualTo(
         Vector<const uint16_t>::cast(string_->literal_bytes_));
   }
 
-  virtual uint32_t Hash() OVERRIDE {
-    return string_->hash() >> Name::kHashShift;
-  }
+  uint32_t Hash() OVERRIDE { return string_->hash() >> Name::kHashShift; }
 
-  virtual uint32_t HashForObject(Object* key) OVERRIDE {
+  uint32_t HashForObject(Object* key) OVERRIDE {
     return String::cast(key)->Hash();
   }
 
-  virtual Handle<Object> AsHandle(Isolate* isolate) OVERRIDE {
+  Handle<Object> AsHandle(Isolate* isolate) OVERRIDE {
     if (string_->is_one_byte_)
       return isolate->factory()->NewOneByteInternalizedString(
           string_->literal_bytes_, string_->hash());
@@ -182,9 +180,8 @@ void AstValue::Internalize(Isolate* isolate) {
       DCHECK(!string_->string().is_null());
       break;
     case SYMBOL:
-      value_ = Object::GetProperty(
-                   isolate, handle(isolate->native_context()->builtins()),
-                   symbol_name_).ToHandleChecked();
+      DCHECK_EQ(0, strcmp(symbol_name_, "iterator_symbol"));
+      value_ = isolate->factory()->iterator_symbol();
       break;
     case NUMBER:
       value_ = isolate->factory()->NewNumber(number_, TENURED);
index 09a4140..a189f17 100644 (file)
@@ -64,13 +64,13 @@ class AstString : public ZoneObject {
 
 class AstRawString : public AstString {
  public:
-  virtual int length() const OVERRIDE {
+  int length() const OVERRIDE {
     if (is_one_byte_)
       return literal_bytes_.length();
     return literal_bytes_.length() / 2;
   }
 
-  virtual void Internalize(Isolate* isolate) OVERRIDE;
+  void Internalize(Isolate* isolate) OVERRIDE;
 
   bool AsArrayIndex(uint32_t* index) const;
 
@@ -124,11 +124,9 @@ class AstConsString : public AstString {
       : left_(left),
         right_(right) {}
 
-  virtual int length() const OVERRIDE {
-    return left_->length() + right_->length();
-  }
+  int length() const OVERRIDE { return left_->length() + right_->length(); }
 
-  virtual void Internalize(Isolate* isolate) OVERRIDE;
+  void Internalize(Isolate* isolate) OVERRIDE;
 
  private:
   friend class AstValueFactory;
@@ -248,8 +246,10 @@ class AstValue : public ZoneObject {
   F(dot_result, ".result")                              \
   F(empty, "")                                          \
   F(eval, "eval")                                       \
+  F(get_template_callsite, "GetTemplateCallSite")       \
   F(initialize_const_global, "initializeConstGlobal")   \
   F(initialize_var_global, "initializeVarGlobal")       \
+  F(let, "let")                                         \
   F(make_reference_error, "MakeReferenceErrorEmbedded") \
   F(make_syntax_error, "MakeSyntaxErrorEmbedded")       \
   F(make_type_error, "MakeTypeErrorEmbedded")           \
index 26bc491..6329371 100644 (file)
@@ -151,6 +151,21 @@ StrictMode FunctionLiteral::strict_mode() const {
 }
 
 
+bool FunctionLiteral::uses_super_property() const {
+  DCHECK_NOT_NULL(scope());
+  return scope()->uses_super_property() || scope()->inner_uses_super_property();
+}
+
+
+bool FunctionLiteral::uses_super_constructor_call() const {
+  DCHECK_NOT_NULL(scope());
+  return scope()->uses_super_constructor_call() ||
+         scope()->inner_uses_super_constructor_call();
+}
+
+
+// Helper to find an existing shared function info in the baseline code for the
+// given function literal. Used to canonicalize SharedFunctionInfo objects.
 void FunctionLiteral::InitializeSharedInfo(
     Handle<Code> unoptimized_code) {
   for (RelocIterator it(*unoptimized_code); !it.done(); it.next()) {
@@ -567,6 +582,12 @@ bool Call::IsUsingCallFeedbackSlot(Isolate* isolate) const {
 }
 
 
+FeedbackVectorRequirements Call::ComputeFeedbackRequirements(Isolate* isolate) {
+  int ic_slots = IsUsingCallFeedbackSlot(isolate) ? 1 : 0;
+  return FeedbackVectorRequirements(0, ic_slots);
+}
+
+
 Call::CallType Call::GetCallType(Isolate* isolate) const {
   VariableProxy* proxy = expression()->AsVariableProxy();
   if (proxy != NULL) {
@@ -995,134 +1016,6 @@ CaseClause::CaseClause(Zone* zone, Expression* label,
       compare_type_(Type::None(zone)) {}
 
 
-#define REGULAR_NODE(NodeType)                                   \
-  void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
-  }
-#define REGULAR_NODE_WITH_FEEDBACK_SLOTS(NodeType)               \
-  void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
-    add_slot_node(node);                                         \
-  }
-#define DONT_OPTIMIZE_NODE(NodeType)                             \
-  void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
-    set_dont_crankshaft_reason(k##NodeType);                     \
-    add_flag(kDontSelfOptimize);                                 \
-  }
-#define DONT_OPTIMIZE_NODE_WITH_FEEDBACK_SLOTS(NodeType)         \
-  void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
-    add_slot_node(node);                                         \
-    set_dont_crankshaft_reason(k##NodeType);                     \
-    add_flag(kDontSelfOptimize);                                 \
-  }
-#define DONT_TURBOFAN_NODE(NodeType)                             \
-  void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
-    set_dont_crankshaft_reason(k##NodeType);                     \
-    set_dont_turbofan_reason(k##NodeType);                       \
-    add_flag(kDontSelfOptimize);                                 \
-  }
-#define DONT_TURBOFAN_NODE_WITH_FEEDBACK_SLOTS(NodeType)         \
-  void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
-    add_slot_node(node);                                         \
-    set_dont_crankshaft_reason(k##NodeType);                     \
-    set_dont_turbofan_reason(k##NodeType);                       \
-    add_flag(kDontSelfOptimize);                                 \
-  }
-#define DONT_SELFOPTIMIZE_NODE(NodeType)                         \
-  void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
-    add_flag(kDontSelfOptimize);                                 \
-  }
-#define DONT_SELFOPTIMIZE_NODE_WITH_FEEDBACK_SLOTS(NodeType)     \
-  void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
-    add_slot_node(node);                                         \
-    add_flag(kDontSelfOptimize);                                 \
-  }
-#define DONT_CACHE_NODE(NodeType)                                \
-  void AstConstructionVisitor::Visit##NodeType(NodeType* node) { \
-    set_dont_crankshaft_reason(k##NodeType);                     \
-    add_flag(kDontSelfOptimize);                                 \
-    add_flag(kDontCache);                                        \
-  }
-
-REGULAR_NODE(VariableDeclaration)
-REGULAR_NODE(FunctionDeclaration)
-REGULAR_NODE(Block)
-REGULAR_NODE(ExpressionStatement)
-REGULAR_NODE(EmptyStatement)
-REGULAR_NODE(IfStatement)
-REGULAR_NODE(ContinueStatement)
-REGULAR_NODE(BreakStatement)
-REGULAR_NODE(ReturnStatement)
-REGULAR_NODE(SwitchStatement)
-REGULAR_NODE(CaseClause)
-REGULAR_NODE(Conditional)
-REGULAR_NODE(Literal)
-REGULAR_NODE(ArrayLiteral)
-REGULAR_NODE(ObjectLiteral)
-REGULAR_NODE(RegExpLiteral)
-REGULAR_NODE(FunctionLiteral)
-REGULAR_NODE(Assignment)
-REGULAR_NODE(Throw)
-REGULAR_NODE(UnaryOperation)
-REGULAR_NODE(CountOperation)
-REGULAR_NODE(BinaryOperation)
-REGULAR_NODE(CompareOperation)
-REGULAR_NODE(ThisFunction)
-
-REGULAR_NODE_WITH_FEEDBACK_SLOTS(Call)
-REGULAR_NODE_WITH_FEEDBACK_SLOTS(CallNew)
-REGULAR_NODE_WITH_FEEDBACK_SLOTS(Property)
-// In theory, for VariableProxy we'd have to add:
-// if (node->var()->IsLookupSlot())
-//   set_dont_optimize_reason(kReferenceToAVariableWhichRequiresDynamicLookup);
-// But node->var() is usually not bound yet at VariableProxy creation time, and
-// LOOKUP variables only result from constructs that cannot be inlined anyway.
-REGULAR_NODE_WITH_FEEDBACK_SLOTS(VariableProxy)
-
-// We currently do not optimize any modules.
-DONT_OPTIMIZE_NODE(ModuleDeclaration)
-DONT_OPTIMIZE_NODE(ImportDeclaration)
-DONT_OPTIMIZE_NODE(ExportDeclaration)
-DONT_OPTIMIZE_NODE(ModuleVariable)
-DONT_OPTIMIZE_NODE(ModulePath)
-DONT_OPTIMIZE_NODE(ModuleUrl)
-DONT_OPTIMIZE_NODE(ModuleStatement)
-DONT_OPTIMIZE_NODE(WithStatement)
-DONT_OPTIMIZE_NODE(DebuggerStatement)
-DONT_OPTIMIZE_NODE(NativeFunctionLiteral)
-
-DONT_OPTIMIZE_NODE_WITH_FEEDBACK_SLOTS(Yield)
-
-// TODO(turbofan): Remove the dont_turbofan_reason once this list is empty.
-// This list must be kept in sync with Pipeline::GenerateCode.
-DONT_TURBOFAN_NODE(ForOfStatement)
-DONT_TURBOFAN_NODE(TryCatchStatement)
-DONT_TURBOFAN_NODE(TryFinallyStatement)
-DONT_TURBOFAN_NODE(ClassLiteral)
-
-DONT_TURBOFAN_NODE_WITH_FEEDBACK_SLOTS(SuperReference)
-
-DONT_SELFOPTIMIZE_NODE(DoWhileStatement)
-DONT_SELFOPTIMIZE_NODE(WhileStatement)
-DONT_SELFOPTIMIZE_NODE(ForStatement)
-
-DONT_SELFOPTIMIZE_NODE_WITH_FEEDBACK_SLOTS(ForInStatement)
-
-DONT_CACHE_NODE(ModuleLiteral)
-
-
-void AstConstructionVisitor::VisitCallRuntime(CallRuntime* node) {
-  add_slot_node(node);
-  if (node->is_jsruntime()) {
-    // Don't try to optimize JS runtime calls because we bailout on them.
-    set_dont_crankshaft_reason(kCallToAJavaScriptRuntimeFunction);
-  }
-}
-
-#undef REGULAR_NODE
-#undef DONT_OPTIMIZE_NODE
-#undef DONT_SELFOPTIMIZE_NODE
-#undef DONT_CACHE_NODE
-
-
 uint32_t Literal::Hash() {
   return raw_value()->IsString()
              ? raw_value()->AsString()->hash()
index 749e579..43bde6a 100644 (file)
@@ -105,8 +105,7 @@ namespace internal {
   EXPRESSION_NODE_LIST(V)
 
 // Forward declarations
-class AstConstructionVisitor;
-template<class> class AstNodeFactory;
+class AstNodeFactory;
 class AstVisitor;
 class Declaration;
 class Module;
@@ -142,12 +141,10 @@ typedef ZoneList<Handle<String> > ZoneStringList;
 typedef ZoneList<Handle<Object> > ZoneObjectList;
 
 
-#define DECLARE_NODE_TYPE(type)                                 \
-  virtual void Accept(AstVisitor* v) OVERRIDE;                  \
-  virtual AstNode::NodeType node_type() const FINAL OVERRIDE {  \
-    return AstNode::k##type;                                    \
-  }                                                             \
-  template<class> friend class AstNodeFactory;
+#define DECLARE_NODE_TYPE(type)                                          \
+  void Accept(AstVisitor* v) OVERRIDE;                                   \
+  AstNode::NodeType node_type() const FINAL { return AstNode::k##type; } \
+  friend class AstNodeFactory;
 
 
 enum AstPropertiesFlag {
@@ -175,25 +172,24 @@ class AstProperties FINAL BASE_EMBEDDED {
  public:
   class Flags : public EnumSet<AstPropertiesFlag, int> {};
 
-  AstProperties() : node_count_(0), feedback_slots_(0), ic_feedback_slots_(0) {}
+  AstProperties() : node_count_(0) {}
 
   Flags* flags() { return &flags_; }
   int node_count() { return node_count_; }
   void add_node_count(int count) { node_count_ += count; }
 
-  int feedback_slots() const { return feedback_slots_; }
-  void increase_feedback_slots(int count) {
-    feedback_slots_ += count;
-  }
+  int slots() const { return spec_.slots(); }
+  void increase_slots(int count) { spec_.increase_slots(count); }
 
-  int ic_feedback_slots() const { return ic_feedback_slots_; }
-  void increase_ic_feedback_slots(int count) { ic_feedback_slots_ += count; }
+  int ic_slots() const { return spec_.ic_slots(); }
+  void increase_ic_slots(int count) { spec_.increase_ic_slots(count); }
+  void SetKind(int ic_slot, Code::Kind kind) { spec_.SetKind(ic_slot, kind); }
+  const FeedbackVectorSpec& get_spec() const { return spec_; }
 
  private:
   Flags flags_;
   int node_count_;
-  int feedback_slots_;
-  int ic_feedback_slots_;
+  FeedbackVectorSpec spec_;
 };
 
 
@@ -238,13 +234,19 @@ class AstNode: public ZoneObject {
   // node types which don't actually have this. Note that this is conceptually
   // not really nice, but multiple inheritance would introduce yet another
   // vtable entry per node, something we don't want for space reasons.
-  virtual FeedbackVectorRequirements ComputeFeedbackRequirements() {
+  virtual FeedbackVectorRequirements ComputeFeedbackRequirements(
+      Isolate* isolate) {
     return FeedbackVectorRequirements(0, 0);
   }
   virtual void SetFirstFeedbackSlot(FeedbackVectorSlot slot) { UNREACHABLE(); }
   virtual void SetFirstFeedbackICSlot(FeedbackVectorICSlot slot) {
     UNREACHABLE();
   }
+  // Each ICSlot stores a kind of IC which the participating node should know.
+  virtual Code::Kind FeedbackICSlotKind(int index) {
+    UNREACHABLE();
+    return Code::NUMBER_OF_KINDS;
+  }
 
  private:
   // Hidden to prevent accidental usage. It would have to load the
@@ -442,9 +444,7 @@ class BreakableStatement : public Statement {
   ZoneList<const AstRawString*>* labels() const { return labels_; }
 
   // Type testing & conversion.
-  virtual BreakableStatement* AsBreakableStatement() FINAL OVERRIDE {
-    return this;
-  }
+  BreakableStatement* AsBreakableStatement() FINAL { return this; }
 
   // Code generation
   Label* break_target() { return &break_target_; }
@@ -499,7 +499,7 @@ class Block FINAL : public BreakableStatement {
   static int num_ids() { return parent_num_ids() + 1; }
   BailoutId DeclsId() const { return BailoutId(local_id(0)); }
 
-  virtual bool IsJump() const OVERRIDE {
+  bool IsJump() const OVERRIDE {
     return !statements_.is_empty() && statements_.last()->IsJump()
         && labels() == NULL;  // Good enough as an approximation...
   }
@@ -553,7 +553,7 @@ class VariableDeclaration FINAL : public Declaration {
  public:
   DECLARE_NODE_TYPE(VariableDeclaration)
 
-  virtual InitializationFlag initialization() const OVERRIDE {
+  InitializationFlag initialization() const OVERRIDE {
     return mode() == VAR ? kCreatedInitialized : kNeedsInitialization;
   }
 
@@ -573,10 +573,10 @@ class FunctionDeclaration FINAL : public Declaration {
   DECLARE_NODE_TYPE(FunctionDeclaration)
 
   FunctionLiteral* fun() const { return fun_; }
-  virtual InitializationFlag initialization() const OVERRIDE {
+  InitializationFlag initialization() const OVERRIDE {
     return kCreatedInitialized;
   }
-  virtual bool IsInlineable() const OVERRIDE;
+  bool IsInlineable() const OVERRIDE;
 
  protected:
   FunctionDeclaration(Zone* zone,
@@ -602,7 +602,7 @@ class ModuleDeclaration FINAL : public Declaration {
   DECLARE_NODE_TYPE(ModuleDeclaration)
 
   Module* module() const { return module_; }
-  virtual InitializationFlag initialization() const OVERRIDE {
+  InitializationFlag initialization() const OVERRIDE {
     return kCreatedInitialized;
   }
 
@@ -626,7 +626,7 @@ class ImportDeclaration FINAL : public Declaration {
   DECLARE_NODE_TYPE(ImportDeclaration)
 
   Module* module() const { return module_; }
-  virtual InitializationFlag initialization() const OVERRIDE {
+  InitializationFlag initialization() const OVERRIDE {
     return kCreatedInitialized;
   }
 
@@ -649,7 +649,7 @@ class ExportDeclaration FINAL : public Declaration {
  public:
   DECLARE_NODE_TYPE(ExportDeclaration)
 
-  virtual InitializationFlag initialization() const OVERRIDE {
+  InitializationFlag initialization() const OVERRIDE {
     return kCreatedInitialized;
   }
 
@@ -760,9 +760,7 @@ class ModuleStatement FINAL : public Statement {
 class IterationStatement : public BreakableStatement {
  public:
   // Type testing & conversion.
-  virtual IterationStatement* AsIterationStatement() FINAL OVERRIDE {
-    return this;
-  }
+  IterationStatement* AsIterationStatement() FINAL { return this; }
 
   Statement* body() const { return body_; }
 
@@ -801,10 +799,8 @@ class DoWhileStatement FINAL : public IterationStatement {
   Expression* cond() const { return cond_; }
 
   static int num_ids() { return parent_num_ids() + 2; }
-  virtual BailoutId ContinueId() const OVERRIDE {
-    return BailoutId(local_id(0));
-  }
-  virtual BailoutId StackCheckId() const OVERRIDE { return BackEdgeId(); }
+  BailoutId ContinueId() const OVERRIDE { return BailoutId(local_id(0)); }
+  BailoutId StackCheckId() const OVERRIDE { return BackEdgeId(); }
   BailoutId BackEdgeId() const { return BailoutId(local_id(1)); }
 
  protected:
@@ -829,32 +825,21 @@ class WhileStatement FINAL : public IterationStatement {
   }
 
   Expression* cond() const { return cond_; }
-  bool may_have_function_literal() const {
-    return may_have_function_literal_;
-  }
-  void set_may_have_function_literal(bool value) {
-    may_have_function_literal_ = value;
-  }
 
   static int num_ids() { return parent_num_ids() + 1; }
-  virtual BailoutId ContinueId() const OVERRIDE { return EntryId(); }
-  virtual BailoutId StackCheckId() const OVERRIDE { return BodyId(); }
+  BailoutId ContinueId() const OVERRIDE { return EntryId(); }
+  BailoutId StackCheckId() const OVERRIDE { return BodyId(); }
   BailoutId BodyId() const { return BailoutId(local_id(0)); }
 
  protected:
   WhileStatement(Zone* zone, ZoneList<const AstRawString*>* labels, int pos)
-      : IterationStatement(zone, labels, pos),
-        cond_(NULL),
-        may_have_function_literal_(true) {}
+      : IterationStatement(zone, labels, pos), cond_(NULL) {}
   static int parent_num_ids() { return IterationStatement::num_ids(); }
 
  private:
   int local_id(int n) const { return base_id() + parent_num_ids() + n; }
 
   Expression* cond_;
-
-  // True if there is a function literal subexpression in the condition.
-  bool may_have_function_literal_;
 };
 
 
@@ -876,32 +861,17 @@ class ForStatement FINAL : public IterationStatement {
   Expression* cond() const { return cond_; }
   Statement* next() const { return next_; }
 
-  bool may_have_function_literal() const {
-    return may_have_function_literal_;
-  }
-  void set_may_have_function_literal(bool value) {
-    may_have_function_literal_ = value;
-  }
-
   static int num_ids() { return parent_num_ids() + 2; }
-  virtual BailoutId ContinueId() const OVERRIDE {
-    return BailoutId(local_id(0));
-  }
-  virtual BailoutId StackCheckId() const OVERRIDE { return BodyId(); }
+  BailoutId ContinueId() const OVERRIDE { return BailoutId(local_id(0)); }
+  BailoutId StackCheckId() const OVERRIDE { return BodyId(); }
   BailoutId BodyId() const { return BailoutId(local_id(1)); }
 
-  bool is_fast_smi_loop() { return loop_variable_ != NULL; }
-  Variable* loop_variable() { return loop_variable_; }
-  void set_loop_variable(Variable* var) { loop_variable_ = var; }
-
  protected:
   ForStatement(Zone* zone, ZoneList<const AstRawString*>* labels, int pos)
       : IterationStatement(zone, labels, pos),
         init_(NULL),
         cond_(NULL),
-        next_(NULL),
-        may_have_function_literal_(true),
-        loop_variable_(NULL) {}
+        next_(NULL) {}
   static int parent_num_ids() { return IterationStatement::num_ids(); }
 
  private:
@@ -910,10 +880,6 @@ class ForStatement FINAL : public IterationStatement {
   Statement* init_;
   Expression* cond_;
   Statement* next_;
-
-  // True if there is a function literal subexpression in the condition.
-  bool may_have_function_literal_;
-  Variable* loop_variable_;
 };
 
 
@@ -952,10 +918,11 @@ class ForInStatement FINAL : public ForEachStatement {
   }
 
   // Type feedback information.
-  virtual FeedbackVectorRequirements ComputeFeedbackRequirements() OVERRIDE {
+  virtual FeedbackVectorRequirements ComputeFeedbackRequirements(
+      Isolate* isolate) OVERRIDE {
     return FeedbackVectorRequirements(1, 0);
   }
-  virtual void SetFirstFeedbackSlot(FeedbackVectorSlot slot) OVERRIDE {
+  void SetFirstFeedbackSlot(FeedbackVectorSlot slot) OVERRIDE {
     for_in_feedback_slot_ = slot;
   }
 
@@ -973,8 +940,8 @@ class ForInStatement FINAL : public ForEachStatement {
   BailoutId PrepareId() const { return BailoutId(local_id(1)); }
   BailoutId EnumId() const { return BailoutId(local_id(2)); }
   BailoutId ToObjectId() const { return BailoutId(local_id(3)); }
-  virtual BailoutId ContinueId() const OVERRIDE { return EntryId(); }
-  virtual BailoutId StackCheckId() const OVERRIDE { return BodyId(); }
+  BailoutId ContinueId() const OVERRIDE { return EntryId(); }
+  BailoutId StackCheckId() const OVERRIDE { return BodyId(); }
 
  protected:
   ForInStatement(Zone* zone, ZoneList<const AstRawString*>* labels, int pos)
@@ -1033,8 +1000,8 @@ class ForOfStatement FINAL : public ForEachStatement {
     return assign_each_;
   }
 
-  virtual BailoutId ContinueId() const OVERRIDE { return EntryId(); }
-  virtual BailoutId StackCheckId() const OVERRIDE { return BackEdgeId(); }
+  BailoutId ContinueId() const OVERRIDE { return EntryId(); }
+  BailoutId StackCheckId() const OVERRIDE { return BackEdgeId(); }
 
   static int num_ids() { return parent_num_ids() + 1; }
   BailoutId BackEdgeId() const { return BailoutId(local_id(0)); }
@@ -1064,7 +1031,7 @@ class ExpressionStatement FINAL : public Statement {
 
   void set_expression(Expression* e) { expression_ = e; }
   Expression* expression() const { return expression_; }
-  virtual bool IsJump() const OVERRIDE { return expression_->IsThrow(); }
+  bool IsJump() const OVERRIDE { return expression_->IsThrow(); }
 
  protected:
   ExpressionStatement(Zone* zone, Expression* expression, int pos)
@@ -1077,7 +1044,7 @@ class ExpressionStatement FINAL : public Statement {
 
 class JumpStatement : public Statement {
  public:
-  virtual bool IsJump() const FINAL OVERRIDE { return true; }
+  bool IsJump() const FINAL { return true; }
 
  protected:
   explicit JumpStatement(Zone* zone, int pos) : Statement(zone, pos) {}
@@ -1227,7 +1194,7 @@ class IfStatement FINAL : public Statement {
   Statement* then_statement() const { return then_statement_; }
   Statement* else_statement() const { return else_statement_; }
 
-  virtual bool IsJump() const OVERRIDE {
+  bool IsJump() const OVERRIDE {
     return HasThenStatement() && then_statement()->IsJump()
         && HasElseStatement() && else_statement()->IsJump();
   }
@@ -1276,9 +1243,9 @@ class TargetCollector FINAL : public AstNode {
   void AddTarget(Label* target, Zone* zone);
 
   // Virtual behaviour. TargetCollectors are never part of the AST.
-  virtual void Accept(AstVisitor* v) OVERRIDE { UNREACHABLE(); }
-  virtual NodeType node_type() const OVERRIDE { return kInvalid; }
-  virtual TargetCollector* AsTargetCollector() OVERRIDE { return this; }
+  void Accept(AstVisitor* v) OVERRIDE { UNREACHABLE(); }
+  NodeType node_type() const OVERRIDE { return kInvalid; }
+  TargetCollector* AsTargetCollector() OVERRIDE { return this; }
 
   ZoneList<Label*>* targets() { return &targets_; }
 
@@ -1397,9 +1364,7 @@ class Literal FINAL : public Expression {
  public:
   DECLARE_NODE_TYPE(Literal)
 
-  virtual bool IsPropertyName() const OVERRIDE {
-    return value_->IsPropertyName();
-  }
+  bool IsPropertyName() const OVERRIDE { return value_->IsPropertyName(); }
 
   Handle<String> AsPropertyName() {
     DCHECK(IsPropertyName());
@@ -1411,12 +1376,8 @@ class Literal FINAL : public Expression {
     return value_->AsString();
   }
 
-  virtual bool ToBooleanIsTrue() const OVERRIDE {
-    return value()->BooleanValue();
-  }
-  virtual bool ToBooleanIsFalse() const OVERRIDE {
-    return !value()->BooleanValue();
-  }
+  bool ToBooleanIsTrue() const OVERRIDE { return value()->BooleanValue(); }
+  bool ToBooleanIsFalse() const OVERRIDE { return !value()->BooleanValue(); }
 
   Handle<Object> value() const { return value_->value(); }
   const AstValue* raw_value() const { return value_; }
@@ -1526,7 +1487,7 @@ class ObjectLiteralProperty FINAL : public ZoneObject {
   bool is_static() const { return is_static_; }
 
  protected:
-  template<class> friend class AstNodeFactory;
+  friend class AstNodeFactory;
 
   ObjectLiteralProperty(Zone* zone, bool is_getter, FunctionLiteral* value,
                         bool is_static);
@@ -1687,7 +1648,7 @@ class VariableProxy FINAL : public Expression {
  public:
   DECLARE_NODE_TYPE(VariableProxy)
 
-  virtual bool IsValidReferenceExpression() const OVERRIDE {
+  bool IsValidReferenceExpression() const OVERRIDE {
     return !is_resolved() || var()->IsValidReference();
   }
 
@@ -1725,14 +1686,21 @@ class VariableProxy FINAL : public Expression {
   // Bind this proxy to the variable var. Interfaces must match.
   void BindTo(Variable* var);
 
-  virtual FeedbackVectorRequirements ComputeFeedbackRequirements() OVERRIDE {
-    return FeedbackVectorRequirements(0, FLAG_vector_ics ? 1 : 0);
+  bool UsesVariableFeedbackSlot() const {
+    return FLAG_vector_ics && (var()->IsUnallocated() || var()->IsLookupSlot());
   }
-  virtual void SetFirstFeedbackICSlot(FeedbackVectorICSlot slot) OVERRIDE {
-    variable_feedback_slot_ = slot;
+
+  virtual FeedbackVectorRequirements ComputeFeedbackRequirements(
+      Isolate* isolate) OVERRIDE {
+    return FeedbackVectorRequirements(0, UsesVariableFeedbackSlot() ? 1 : 0);
   }
 
+  void SetFirstFeedbackICSlot(FeedbackVectorICSlot slot) OVERRIDE {
+    variable_feedback_slot_ = slot;
+  }
+  Code::Kind FeedbackICSlotKind(int index) OVERRIDE { return Code::LOAD_IC; }
   FeedbackVectorICSlot VariableFeedbackSlot() {
+    DCHECK(!UsesVariableFeedbackSlot() || !variable_feedback_slot_.IsInvalid());
     return variable_feedback_slot_;
   }
 
@@ -1762,7 +1730,7 @@ class Property FINAL : public Expression {
  public:
   DECLARE_NODE_TYPE(Property)
 
-  virtual bool IsValidReferenceExpression() const OVERRIDE { return true; }
+  bool IsValidReferenceExpression() const OVERRIDE { return true; }
 
   Expression* obj() const { return obj_; }
   Expression* key() const { return key_; }
@@ -1776,18 +1744,11 @@ class Property FINAL : public Expression {
   }
 
   // Type feedback information.
-  virtual bool IsMonomorphic() OVERRIDE {
-    return receiver_types_.length() == 1;
-  }
-  virtual SmallMapList* GetReceiverTypes() OVERRIDE {
-    return &receiver_types_;
-  }
-  virtual KeyedAccessStoreMode GetStoreMode() const OVERRIDE {
-    return STANDARD_STORE;
-  }
-  virtual IcCheckType GetKeyType() const OVERRIDE {
-    // PROPERTY key types currently aren't implemented for KeyedLoadICs.
-    return ELEMENT;
+  bool IsMonomorphic() OVERRIDE { return receiver_types_.length() == 1; }
+  SmallMapList* GetReceiverTypes() OVERRIDE { return &receiver_types_; }
+  KeyedAccessStoreMode GetStoreMode() const OVERRIDE { return STANDARD_STORE; }
+  IcCheckType GetKeyType() const OVERRIDE {
+    return KeyTypeField::decode(bit_field_);
   }
   bool IsUninitialized() const {
     return !is_for_call() && HasNoTypeInformation();
@@ -1801,6 +1762,9 @@ class Property FINAL : public Expression {
   void set_is_string_access(bool b) {
     bit_field_ = IsStringAccessField::update(bit_field_, b);
   }
+  void set_key_type(IcCheckType key_type) {
+    bit_field_ = KeyTypeField::update(bit_field_, key_type);
+  }
   void mark_for_call() {
     bit_field_ = IsForCallField::update(bit_field_, true);
   }
@@ -1810,14 +1774,19 @@ class Property FINAL : public Expression {
     return obj()->IsSuperReference();
   }
 
-  virtual FeedbackVectorRequirements ComputeFeedbackRequirements() OVERRIDE {
+  virtual FeedbackVectorRequirements ComputeFeedbackRequirements(
+      Isolate* isolate) OVERRIDE {
     return FeedbackVectorRequirements(0, FLAG_vector_ics ? 1 : 0);
   }
-  virtual void SetFirstFeedbackICSlot(FeedbackVectorICSlot slot) OVERRIDE {
+  void SetFirstFeedbackICSlot(FeedbackVectorICSlot slot) OVERRIDE {
     property_feedback_slot_ = slot;
   }
+  Code::Kind FeedbackICSlotKind(int index) OVERRIDE {
+    return key()->IsPropertyName() ? Code::LOAD_IC : Code::KEYED_LOAD_IC;
+  }
 
   FeedbackVectorICSlot PropertyFeedbackSlot() const {
+    DCHECK(!FLAG_vector_ics || !property_feedback_slot_.IsInvalid());
     return property_feedback_slot_;
   }
 
@@ -1838,6 +1807,7 @@ class Property FINAL : public Expression {
   class IsForCallField : public BitField8<bool, 0, 1> {};
   class IsUninitializedField : public BitField8<bool, 1, 1> {};
   class IsStringAccessField : public BitField8<bool, 2, 1> {};
+  class KeyTypeField : public BitField8<IcCheckType, 3, 1> {};
   uint8_t bit_field_;
   FeedbackVectorICSlot property_feedback_slot_;
   Expression* obj_;
@@ -1854,24 +1824,27 @@ class Call FINAL : public Expression {
   ZoneList<Expression*>* arguments() const { return arguments_; }
 
   // Type feedback information.
-  virtual FeedbackVectorRequirements ComputeFeedbackRequirements() OVERRIDE {
-    return FeedbackVectorRequirements(0, 1);
-  }
-  virtual void SetFirstFeedbackICSlot(FeedbackVectorICSlot slot) OVERRIDE {
+  virtual FeedbackVectorRequirements ComputeFeedbackRequirements(
+      Isolate* isolate) OVERRIDE;
+  void SetFirstFeedbackICSlot(FeedbackVectorICSlot slot) OVERRIDE {
     call_feedback_slot_ = slot;
   }
+  Code::Kind FeedbackICSlotKind(int index) OVERRIDE { return Code::CALL_IC; }
 
   bool HasCallFeedbackSlot() const { return !call_feedback_slot_.IsInvalid(); }
-  FeedbackVectorICSlot CallFeedbackSlot() const { return call_feedback_slot_; }
+  FeedbackVectorICSlot CallFeedbackSlot() const {
+    DCHECK(!call_feedback_slot_.IsInvalid());
+    return call_feedback_slot_;
+  }
 
-  virtual SmallMapList* GetReceiverTypes() OVERRIDE {
+  SmallMapList* GetReceiverTypes() OVERRIDE {
     if (expression()->IsProperty()) {
       return expression()->AsProperty()->GetReceiverTypes();
     }
     return NULL;
   }
 
-  virtual bool IsMonomorphic() OVERRIDE {
+  bool IsMonomorphic() OVERRIDE {
     if (expression()->IsProperty()) {
       return expression()->AsProperty()->IsMonomorphic();
     }
@@ -1903,6 +1876,13 @@ class Call FINAL : public Expression {
   BailoutId ReturnId() const { return BailoutId(local_id(0)); }
   BailoutId EvalOrLookupId() const { return BailoutId(local_id(1)); }
 
+  bool is_uninitialized() const {
+    return IsUninitializedField::decode(bit_field_);
+  }
+  void set_is_uninitialized(bool b) {
+    bit_field_ = IsUninitializedField::update(bit_field_, b);
+  }
+
   enum CallType {
     POSSIBLY_EVAL_CALL,
     GLOBAL_CALL,
@@ -1927,7 +1907,8 @@ class Call FINAL : public Expression {
       : Expression(zone, pos),
         call_feedback_slot_(FeedbackVectorICSlot::Invalid()),
         expression_(expression),
-        arguments_(arguments) {
+        arguments_(arguments),
+        bit_field_(IsUninitializedField::encode(false)) {
     if (expression->IsProperty()) {
       expression->AsProperty()->mark_for_call();
     }
@@ -1943,6 +1924,8 @@ class Call FINAL : public Expression {
   Handle<JSFunction> target_;
   Handle<Cell> cell_;
   Handle<AllocationSite> allocation_site_;
+  class IsUninitializedField : public BitField8<bool, 0, 1> {};
+  uint8_t bit_field_;
 };
 
 
@@ -1954,21 +1937,25 @@ class CallNew FINAL : public Expression {
   ZoneList<Expression*>* arguments() const { return arguments_; }
 
   // Type feedback information.
-  virtual FeedbackVectorRequirements ComputeFeedbackRequirements() OVERRIDE {
+  virtual FeedbackVectorRequirements ComputeFeedbackRequirements(
+      Isolate* isolate) OVERRIDE {
     return FeedbackVectorRequirements(FLAG_pretenuring_call_new ? 2 : 1, 0);
   }
-  virtual void SetFirstFeedbackSlot(FeedbackVectorSlot slot) OVERRIDE {
+  void SetFirstFeedbackSlot(FeedbackVectorSlot slot) OVERRIDE {
     callnew_feedback_slot_ = slot;
   }
 
-  FeedbackVectorSlot CallNewFeedbackSlot() { return callnew_feedback_slot_; }
+  FeedbackVectorSlot CallNewFeedbackSlot() {
+    DCHECK(!callnew_feedback_slot_.IsInvalid());
+    return callnew_feedback_slot_;
+  }
   FeedbackVectorSlot AllocationSiteFeedbackSlot() {
     DCHECK(FLAG_pretenuring_call_new);
     return CallNewFeedbackSlot().next();
   }
 
   void RecordTypeFeedback(TypeFeedbackOracle* oracle);
-  virtual bool IsMonomorphic() OVERRIDE { return is_monomorphic_; }
+  bool IsMonomorphic() OVERRIDE { return is_monomorphic_; }
   Handle<JSFunction> target() const { return target_; }
   Handle<AllocationSite> allocation_site() const {
     return allocation_site_;
@@ -2016,15 +2003,21 @@ class CallRuntime FINAL : public Expression {
   bool is_jsruntime() const { return function_ == NULL; }
 
   // Type feedback information.
-  virtual FeedbackVectorRequirements ComputeFeedbackRequirements() OVERRIDE {
-    return FeedbackVectorRequirements(
-        0, (FLAG_vector_ics && is_jsruntime()) ? 1 : 0);
+  bool HasCallRuntimeFeedbackSlot() const {
+    return FLAG_vector_ics && is_jsruntime();
   }
-  virtual void SetFirstFeedbackICSlot(FeedbackVectorICSlot slot) OVERRIDE {
+  virtual FeedbackVectorRequirements ComputeFeedbackRequirements(
+      Isolate* isolate) OVERRIDE {
+    return FeedbackVectorRequirements(0, HasCallRuntimeFeedbackSlot() ? 1 : 0);
+  }
+  void SetFirstFeedbackICSlot(FeedbackVectorICSlot slot) OVERRIDE {
     callruntime_feedback_slot_ = slot;
   }
+  Code::Kind FeedbackICSlotKind(int index) OVERRIDE { return Code::LOAD_IC; }
 
   FeedbackVectorICSlot CallRuntimeFeedbackSlot() {
+    DCHECK(!HasCallRuntimeFeedbackSlot() ||
+           !callruntime_feedback_slot_.IsInvalid());
     return callruntime_feedback_slot_;
   }
 
@@ -2089,7 +2082,7 @@ class BinaryOperation FINAL : public Expression {
  public:
   DECLARE_NODE_TYPE(BinaryOperation)
 
-  virtual bool ResultOverwriteAllowed() const OVERRIDE;
+  bool ResultOverwriteAllowed() const OVERRIDE;
 
   Token::Value op() const { return static_cast<Token::Value>(op_); }
   Expression* left() const { return left_; }
@@ -2160,16 +2153,12 @@ class CountOperation FINAL : public Expression {
 
   Expression* expression() const { return expression_; }
 
-  virtual bool IsMonomorphic() OVERRIDE {
-    return receiver_types_.length() == 1;
-  }
-  virtual SmallMapList* GetReceiverTypes() OVERRIDE {
-    return &receiver_types_;
-  }
-  virtual IcCheckType GetKeyType() const OVERRIDE {
+  bool IsMonomorphic() OVERRIDE { return receiver_types_.length() == 1; }
+  SmallMapList* GetReceiverTypes() OVERRIDE { return &receiver_types_; }
+  IcCheckType GetKeyType() const OVERRIDE {
     return KeyTypeField::decode(bit_field_);
   }
-  virtual KeyedAccessStoreMode GetStoreMode() const OVERRIDE {
+  KeyedAccessStoreMode GetStoreMode() const OVERRIDE {
     return StoreModeField::decode(bit_field_);
   }
   Type* type() const { return type_; }
@@ -2314,22 +2303,18 @@ class Assignment FINAL : public Expression {
 
   // Type feedback information.
   TypeFeedbackId AssignmentFeedbackId() { return TypeFeedbackId(local_id(1)); }
-  virtual bool IsMonomorphic() OVERRIDE {
-    return receiver_types_.length() == 1;
-  }
+  bool IsMonomorphic() OVERRIDE { return receiver_types_.length() == 1; }
   bool IsUninitialized() const {
     return IsUninitializedField::decode(bit_field_);
   }
   bool HasNoTypeInformation() {
     return IsUninitializedField::decode(bit_field_);
   }
-  virtual SmallMapList* GetReceiverTypes() OVERRIDE {
-    return &receiver_types_;
-  }
-  virtual IcCheckType GetKeyType() const OVERRIDE {
+  SmallMapList* GetReceiverTypes() OVERRIDE { return &receiver_types_; }
+  IcCheckType GetKeyType() const OVERRIDE {
     return KeyTypeField::decode(bit_field_);
   }
-  virtual KeyedAccessStoreMode GetStoreMode() const OVERRIDE {
+  KeyedAccessStoreMode GetStoreMode() const OVERRIDE {
     return StoreModeField::decode(bit_field_);
   }
   void set_is_uninitialized(bool b) {
@@ -2347,15 +2332,6 @@ class Assignment FINAL : public Expression {
              int pos);
   static int parent_num_ids() { return Expression::num_ids(); }
 
-  template <class Visitor>
-  void Init(AstNodeFactory<Visitor>* factory) {
-    DCHECK(Token::IsAssignmentOp(op()));
-    if (is_compound()) {
-      binary_operation_ = factory->NewBinaryOperation(
-          binary_op(), target_, value_, position() + 1);
-    }
-  }
-
  private:
   int local_id(int n) const { return base_id() + parent_num_ids() + n; }
 
@@ -2402,15 +2378,22 @@ class Yield FINAL : public Expression {
   }
 
   // Type feedback information.
-  virtual FeedbackVectorRequirements ComputeFeedbackRequirements() OVERRIDE {
-    return FeedbackVectorRequirements(
-        0, (FLAG_vector_ics && yield_kind() == kDelegating) ? 3 : 0);
+  bool HasFeedbackSlots() const {
+    return FLAG_vector_ics && (yield_kind() == kDelegating);
+  }
+  virtual FeedbackVectorRequirements ComputeFeedbackRequirements(
+      Isolate* isolate) OVERRIDE {
+    return FeedbackVectorRequirements(0, HasFeedbackSlots() ? 3 : 0);
   }
-  virtual void SetFirstFeedbackICSlot(FeedbackVectorICSlot slot) OVERRIDE {
+  void SetFirstFeedbackICSlot(FeedbackVectorICSlot slot) OVERRIDE {
     yield_first_feedback_slot_ = slot;
   }
+  Code::Kind FeedbackICSlotKind(int index) OVERRIDE {
+    return index == 0 ? Code::KEYED_LOAD_IC : Code::LOAD_IC;
+  }
 
   FeedbackVectorICSlot KeyedLoadFeedbackSlot() {
+    DCHECK(!HasFeedbackSlots() || !yield_first_feedback_slot_.IsInvalid());
     return yield_first_feedback_slot_;
   }
 
@@ -2497,6 +2480,13 @@ class FunctionLiteral FINAL : public Expression {
   bool is_expression() const { return IsExpression::decode(bitfield_); }
   bool is_anonymous() const { return IsAnonymous::decode(bitfield_); }
   StrictMode strict_mode() const;
+  bool uses_super_property() const;
+  bool uses_super_constructor_call() const;
+
+  static bool NeedsHomeObject(Expression* literal) {
+    return literal != NULL && literal->IsFunctionLiteral() &&
+           literal->AsFunctionLiteral()->uses_super_property();
+  }
 
   int materialized_literal_count() { return materialized_literal_count_; }
   int expected_property_count() { return expected_property_count_; }
@@ -2576,16 +2566,18 @@ class FunctionLiteral FINAL : public Expression {
   bool is_concise_method() {
     return IsConciseMethod(FunctionKindBits::decode(bitfield_));
   }
+  bool is_default_constructor() {
+    return IsDefaultConstructor(FunctionKindBits::decode(bitfield_));
+  }
 
   int ast_node_count() { return ast_properties_.node_count(); }
   AstProperties::Flags* flags() { return ast_properties_.flags(); }
   void set_ast_properties(AstProperties* ast_properties) {
     ast_properties_ = *ast_properties;
   }
-  int slot_count() {
-    return ast_properties_.feedback_slots();
+  const FeedbackVectorSpec& feedback_vector_spec() const {
+    return ast_properties_.get_spec();
   }
-  int ic_slot_count() { return ast_properties_.ic_feedback_slots(); }
   bool dont_optimize() { return dont_optimize_reason_ != kNoReason; }
   BailoutReason dont_optimize_reason() { return dont_optimize_reason_; }
   void set_dont_optimize_reason(BailoutReason reason) {
@@ -2647,7 +2639,7 @@ class FunctionLiteral FINAL : public Expression {
   class HasDuplicateParameters : public BitField<ParameterFlag, 3, 1> {};
   class IsFunction : public BitField<IsFunctionFlag, 4, 1> {};
   class IsParenthesized : public BitField<IsParenthesizedFlag, 5, 1> {};
-  class FunctionKindBits : public BitField<FunctionKind, 6, 3> {};
+  class FunctionKindBits : public BitField<FunctionKind, 6, 4> {};
 };
 
 
@@ -2659,6 +2651,8 @@ class ClassLiteral FINAL : public Expression {
 
   Handle<String> name() const { return raw_name_->string(); }
   const AstRawString* raw_name() const { return raw_name_; }
+  Scope* scope() const { return scope_; }
+  VariableProxy* class_variable_proxy() const { return class_variable_proxy_; }
   Expression* extends() const { return extends_; }
   Expression* constructor() const { return constructor_; }
   ZoneList<Property*>* properties() const { return properties_; }
@@ -2666,11 +2660,14 @@ class ClassLiteral FINAL : public Expression {
   int end_position() const { return end_position_; }
 
  protected:
-  ClassLiteral(Zone* zone, const AstRawString* name, Expression* extends,
+  ClassLiteral(Zone* zone, const AstRawString* name, Scope* scope,
+               VariableProxy* class_variable_proxy, Expression* extends,
                Expression* constructor, ZoneList<Property*>* properties,
                int start_position, int end_position)
       : Expression(zone, start_position),
         raw_name_(name),
+        scope_(scope),
+        class_variable_proxy_(class_variable_proxy),
         extends_(extends),
         constructor_(constructor),
         properties_(properties),
@@ -2678,6 +2675,8 @@ class ClassLiteral FINAL : public Expression {
 
  private:
   const AstRawString* raw_name_;
+  Scope* scope_;
+  VariableProxy* class_variable_proxy_;
   Expression* extends_;
   Expression* constructor_;
   ZoneList<Property*>* properties_;
@@ -2722,12 +2721,14 @@ class SuperReference FINAL : public Expression {
   TypeFeedbackId HomeObjectFeedbackId() { return TypeFeedbackId(local_id(0)); }
 
   // Type feedback information.
-  virtual FeedbackVectorRequirements ComputeFeedbackRequirements() OVERRIDE {
+  virtual FeedbackVectorRequirements ComputeFeedbackRequirements(
+      Isolate* isolate) OVERRIDE {
     return FeedbackVectorRequirements(0, FLAG_vector_ics ? 1 : 0);
   }
-  virtual void SetFirstFeedbackICSlot(FeedbackVectorICSlot slot) OVERRIDE {
+  void SetFirstFeedbackICSlot(FeedbackVectorICSlot slot) OVERRIDE {
     homeobject_feedback_slot_ = slot;
   }
+  Code::Kind FeedbackICSlotKind(int index) OVERRIDE { return Code::LOAD_IC; }
 
   FeedbackVectorICSlot HomeObjectFeedbackSlot() {
     DCHECK(!FLAG_vector_ics || !homeobject_feedback_slot_.IsInvalid());
@@ -2796,16 +2797,16 @@ class RegExpTree : public ZoneObject {
 class RegExpDisjunction FINAL : public RegExpTree {
  public:
   explicit RegExpDisjunction(ZoneList<RegExpTree*>* alternatives);
-  virtual void* Accept(RegExpVisitor* visitor, void* data) OVERRIDE;
+  void* Accept(RegExpVisitor* visitor, void* data) OVERRIDE;
   virtual RegExpNode* ToNode(RegExpCompiler* compiler,
                              RegExpNode* on_success) OVERRIDE;
-  virtual RegExpDisjunction* AsDisjunction() OVERRIDE;
-  virtual Interval CaptureRegisters() OVERRIDE;
-  virtual bool IsDisjunction() OVERRIDE;
-  virtual bool IsAnchoredAtStart() OVERRIDE;
-  virtual bool IsAnchoredAtEnd() OVERRIDE;
-  virtual int min_match() OVERRIDE { return min_match_; }
-  virtual int max_match() OVERRIDE { return max_match_; }
+  RegExpDisjunction* AsDisjunction() OVERRIDE;
+  Interval CaptureRegisters() OVERRIDE;
+  bool IsDisjunction() OVERRIDE;
+  bool IsAnchoredAtStart() OVERRIDE;
+  bool IsAnchoredAtEnd() OVERRIDE;
+  int min_match() OVERRIDE { return min_match_; }
+  int max_match() OVERRIDE { return max_match_; }
   ZoneList<RegExpTree*>* alternatives() { return alternatives_; }
  private:
   ZoneList<RegExpTree*>* alternatives_;
@@ -2817,16 +2818,16 @@ class RegExpDisjunction FINAL : public RegExpTree {
 class RegExpAlternative FINAL : public RegExpTree {
  public:
   explicit RegExpAlternative(ZoneList<RegExpTree*>* nodes);
-  virtual void* Accept(RegExpVisitor* visitor, void* data) OVERRIDE;
+  void* Accept(RegExpVisitor* visitor, void* data) OVERRIDE;
   virtual RegExpNode* ToNode(RegExpCompiler* compiler,
                              RegExpNode* on_success) OVERRIDE;
-  virtual RegExpAlternative* AsAlternative() OVERRIDE;
-  virtual Interval CaptureRegisters() OVERRIDE;
-  virtual bool IsAlternative() OVERRIDE;
-  virtual bool IsAnchoredAtStart() OVERRIDE;
-  virtual bool IsAnchoredAtEnd() OVERRIDE;
-  virtual int min_match() OVERRIDE { return min_match_; }
-  virtual int max_match() OVERRIDE { return max_match_; }
+  RegExpAlternative* AsAlternative() OVERRIDE;
+  Interval CaptureRegisters() OVERRIDE;
+  bool IsAlternative() OVERRIDE;
+  bool IsAnchoredAtStart() OVERRIDE;
+  bool IsAnchoredAtEnd() OVERRIDE;
+  int min_match() OVERRIDE { return min_match_; }
+  int max_match() OVERRIDE { return max_match_; }
   ZoneList<RegExpTree*>* nodes() { return nodes_; }
  private:
   ZoneList<RegExpTree*>* nodes_;
@@ -2846,15 +2847,15 @@ class RegExpAssertion FINAL : public RegExpTree {
     NON_BOUNDARY
   };
   explicit RegExpAssertion(AssertionType type) : assertion_type_(type) { }
-  virtual void* Accept(RegExpVisitor* visitor, void* data) OVERRIDE;
+  void* Accept(RegExpVisitor* visitor, void* data) OVERRIDE;
   virtual RegExpNode* ToNode(RegExpCompiler* compiler,
                              RegExpNode* on_success) OVERRIDE;
-  virtual RegExpAssertion* AsAssertion() OVERRIDE;
-  virtual bool IsAssertion() OVERRIDE;
-  virtual bool IsAnchoredAtStart() OVERRIDE;
-  virtual bool IsAnchoredAtEnd() OVERRIDE;
-  virtual int min_match() OVERRIDE { return 0; }
-  virtual int max_match() OVERRIDE { return 0; }
+  RegExpAssertion* AsAssertion() OVERRIDE;
+  bool IsAssertion() OVERRIDE;
+  bool IsAnchoredAtStart() OVERRIDE;
+  bool IsAnchoredAtEnd() OVERRIDE;
+  int min_match() OVERRIDE { return 0; }
+  int max_match() OVERRIDE { return 0; }
   AssertionType assertion_type() { return assertion_type_; }
  private:
   AssertionType assertion_type_;
@@ -2892,15 +2893,15 @@ class RegExpCharacterClass FINAL : public RegExpTree {
   explicit RegExpCharacterClass(uc16 type)
       : set_(type),
         is_negated_(false) { }
-  virtual void* Accept(RegExpVisitor* visitor, void* data) OVERRIDE;
+  void* Accept(RegExpVisitor* visitor, void* data) OVERRIDE;
   virtual RegExpNode* ToNode(RegExpCompiler* compiler,
                              RegExpNode* on_success) OVERRIDE;
-  virtual RegExpCharacterClass* AsCharacterClass() OVERRIDE;
-  virtual bool IsCharacterClass() OVERRIDE;
-  virtual bool IsTextElement() OVERRIDE { return true; }
-  virtual int min_match() OVERRIDE { return 1; }
-  virtual int max_match() OVERRIDE { return 1; }
-  virtual void AppendToText(RegExpText* text, Zone* zone) OVERRIDE;
+  RegExpCharacterClass* AsCharacterClass() OVERRIDE;
+  bool IsCharacterClass() OVERRIDE;
+  bool IsTextElement() OVERRIDE { return true; }
+  int min_match() OVERRIDE { return 1; }
+  int max_match() OVERRIDE { return 1; }
+  void AppendToText(RegExpText* text, Zone* zone) OVERRIDE;
   CharacterSet character_set() { return set_; }
   // TODO(lrn): Remove need for complex version if is_standard that
   // recognizes a mangled standard set and just do { return set_.is_special(); }
@@ -2929,15 +2930,15 @@ class RegExpCharacterClass FINAL : public RegExpTree {
 class RegExpAtom FINAL : public RegExpTree {
  public:
   explicit RegExpAtom(Vector<const uc16> data) : data_(data) { }
-  virtual void* Accept(RegExpVisitor* visitor, void* data) OVERRIDE;
+  void* Accept(RegExpVisitor* visitor, void* data) OVERRIDE;
   virtual RegExpNode* ToNode(RegExpCompiler* compiler,
                              RegExpNode* on_success) OVERRIDE;
-  virtual RegExpAtom* AsAtom() OVERRIDE;
-  virtual bool IsAtom() OVERRIDE;
-  virtual bool IsTextElement() OVERRIDE { return true; }
-  virtual int min_match() OVERRIDE { return data_.length(); }
-  virtual int max_match() OVERRIDE { return data_.length(); }
-  virtual void AppendToText(RegExpText* text, Zone* zone) OVERRIDE;
+  RegExpAtom* AsAtom() OVERRIDE;
+  bool IsAtom() OVERRIDE;
+  bool IsTextElement() OVERRIDE { return true; }
+  int min_match() OVERRIDE { return data_.length(); }
+  int max_match() OVERRIDE { return data_.length(); }
+  void AppendToText(RegExpText* text, Zone* zone) OVERRIDE;
   Vector<const uc16> data() { return data_; }
   int length() { return data_.length(); }
  private:
@@ -2948,15 +2949,15 @@ class RegExpAtom FINAL : public RegExpTree {
 class RegExpText FINAL : public RegExpTree {
  public:
   explicit RegExpText(Zone* zone) : elements_(2, zone), length_(0) {}
-  virtual void* Accept(RegExpVisitor* visitor, void* data) OVERRIDE;
+  void* Accept(RegExpVisitor* visitor, void* data) OVERRIDE;
   virtual RegExpNode* ToNode(RegExpCompiler* compiler,
                              RegExpNode* on_success) OVERRIDE;
-  virtual RegExpText* AsText() OVERRIDE;
-  virtual bool IsText() OVERRIDE;
-  virtual bool IsTextElement() OVERRIDE { return true; }
-  virtual int min_match() OVERRIDE { return length_; }
-  virtual int max_match() OVERRIDE { return length_; }
-  virtual void AppendToText(RegExpText* text, Zone* zone) OVERRIDE;
+  RegExpText* AsText() OVERRIDE;
+  bool IsText() OVERRIDE;
+  bool IsTextElement() OVERRIDE { return true; }
+  int min_match() OVERRIDE { return length_; }
+  int max_match() OVERRIDE { return length_; }
+  void AppendToText(RegExpText* text, Zone* zone) OVERRIDE;
   void AddElement(TextElement elm, Zone* zone)  {
     elements_.Add(elm, zone);
     length_ += elm.length();
@@ -2983,7 +2984,7 @@ class RegExpQuantifier FINAL : public RegExpTree {
       max_match_ = max * body->max_match();
     }
   }
-  virtual void* Accept(RegExpVisitor* visitor, void* data) OVERRIDE;
+  void* Accept(RegExpVisitor* visitor, void* data) OVERRIDE;
   virtual RegExpNode* ToNode(RegExpCompiler* compiler,
                              RegExpNode* on_success) OVERRIDE;
   static RegExpNode* ToNode(int min,
@@ -2993,11 +2994,11 @@ class RegExpQuantifier FINAL : public RegExpTree {
                             RegExpCompiler* compiler,
                             RegExpNode* on_success,
                             bool not_at_start = false);
-  virtual RegExpQuantifier* AsQuantifier() OVERRIDE;
-  virtual Interval CaptureRegisters() OVERRIDE;
-  virtual bool IsQuantifier() OVERRIDE;
-  virtual int min_match() OVERRIDE { return min_match_; }
-  virtual int max_match() OVERRIDE { return max_match_; }
+  RegExpQuantifier* AsQuantifier() OVERRIDE;
+  Interval CaptureRegisters() OVERRIDE;
+  bool IsQuantifier() OVERRIDE;
+  int min_match() OVERRIDE { return min_match_; }
+  int max_match() OVERRIDE { return max_match_; }
   int min() { return min_; }
   int max() { return max_; }
   bool is_possessive() { return quantifier_type_ == POSSESSIVE; }
@@ -3019,20 +3020,20 @@ class RegExpCapture FINAL : public RegExpTree {
  public:
   explicit RegExpCapture(RegExpTree* body, int index)
       : body_(body), index_(index) { }
-  virtual void* Accept(RegExpVisitor* visitor, void* data) OVERRIDE;
+  void* Accept(RegExpVisitor* visitor, void* data) OVERRIDE;
   virtual RegExpNode* ToNode(RegExpCompiler* compiler,
                              RegExpNode* on_success) OVERRIDE;
   static RegExpNode* ToNode(RegExpTree* body,
                             int index,
                             RegExpCompiler* compiler,
                             RegExpNode* on_success);
-  virtual RegExpCapture* AsCapture() OVERRIDE;
-  virtual bool IsAnchoredAtStart() OVERRIDE;
-  virtual bool IsAnchoredAtEnd() OVERRIDE;
-  virtual Interval CaptureRegisters() OVERRIDE;
-  virtual bool IsCapture() OVERRIDE;
-  virtual int min_match() OVERRIDE { return body_->min_match(); }
-  virtual int max_match() OVERRIDE { return body_->max_match(); }
+  RegExpCapture* AsCapture() OVERRIDE;
+  bool IsAnchoredAtStart() OVERRIDE;
+  bool IsAnchoredAtEnd() OVERRIDE;
+  Interval CaptureRegisters() OVERRIDE;
+  bool IsCapture() OVERRIDE;
+  int min_match() OVERRIDE { return body_->min_match(); }
+  int max_match() OVERRIDE { return body_->max_match(); }
   RegExpTree* body() { return body_; }
   int index() { return index_; }
   static int StartRegister(int index) { return index * 2; }
@@ -3055,15 +3056,15 @@ class RegExpLookahead FINAL : public RegExpTree {
         capture_count_(capture_count),
         capture_from_(capture_from) { }
 
-  virtual void* Accept(RegExpVisitor* visitor, void* data) OVERRIDE;
+  void* Accept(RegExpVisitor* visitor, void* data) OVERRIDE;
   virtual RegExpNode* ToNode(RegExpCompiler* compiler,
                              RegExpNode* on_success) OVERRIDE;
-  virtual RegExpLookahead* AsLookahead() OVERRIDE;
-  virtual Interval CaptureRegisters() OVERRIDE;
-  virtual bool IsLookahead() OVERRIDE;
-  virtual bool IsAnchoredAtStart() OVERRIDE;
-  virtual int min_match() OVERRIDE { return 0; }
-  virtual int max_match() OVERRIDE { return 0; }
+  RegExpLookahead* AsLookahead() OVERRIDE;
+  Interval CaptureRegisters() OVERRIDE;
+  bool IsLookahead() OVERRIDE;
+  bool IsAnchoredAtStart() OVERRIDE;
+  int min_match() OVERRIDE { return 0; }
+  int max_match() OVERRIDE { return 0; }
   RegExpTree* body() { return body_; }
   bool is_positive() { return is_positive_; }
   int capture_count() { return capture_count_; }
@@ -3081,13 +3082,13 @@ class RegExpBackReference FINAL : public RegExpTree {
  public:
   explicit RegExpBackReference(RegExpCapture* capture)
       : capture_(capture) { }
-  virtual void* Accept(RegExpVisitor* visitor, void* data) OVERRIDE;
+  void* Accept(RegExpVisitor* visitor, void* data) OVERRIDE;
   virtual RegExpNode* ToNode(RegExpCompiler* compiler,
                              RegExpNode* on_success) OVERRIDE;
-  virtual RegExpBackReference* AsBackReference() OVERRIDE;
-  virtual bool IsBackReference() OVERRIDE;
-  virtual int min_match() OVERRIDE { return 0; }
-  virtual int max_match() OVERRIDE { return capture_->max_match(); }
+  RegExpBackReference* AsBackReference() OVERRIDE;
+  bool IsBackReference() OVERRIDE;
+  int min_match() OVERRIDE { return 0; }
+  int max_match() OVERRIDE { return capture_->max_match(); }
   int index() { return capture_->index(); }
   RegExpCapture* capture() { return capture_; }
  private:
@@ -3098,17 +3099,13 @@ class RegExpBackReference FINAL : public RegExpTree {
 class RegExpEmpty FINAL : public RegExpTree {
  public:
   RegExpEmpty() { }
-  virtual void* Accept(RegExpVisitor* visitor, void* data) OVERRIDE;
+  void* Accept(RegExpVisitor* visitor, void* data) OVERRIDE;
   virtual RegExpNode* ToNode(RegExpCompiler* compiler,
                              RegExpNode* on_success) OVERRIDE;
-  virtual RegExpEmpty* AsEmpty() OVERRIDE;
-  virtual bool IsEmpty() OVERRIDE;
-  virtual int min_match() OVERRIDE { return 0; }
-  virtual int max_match() OVERRIDE { return 0; }
-  static RegExpEmpty* GetInstance() {
-    static RegExpEmpty* instance = ::new RegExpEmpty();
-    return instance;
-  }
+  RegExpEmpty* AsEmpty() OVERRIDE;
+  bool IsEmpty() OVERRIDE;
+  int min_match() OVERRIDE { return 0; }
+  int max_match() OVERRIDE { return 0; }
 };
 
 
@@ -3146,123 +3143,49 @@ class AstVisitor BASE_EMBEDDED {
 };
 
 
-#define DEFINE_AST_VISITOR_SUBCLASS_MEMBERS()                       \
-public:                                                             \
-  virtual void Visit(AstNode* node) FINAL OVERRIDE {          \
-    if (!CheckStackOverflow()) node->Accept(this);                  \
-  }                                                                 \
-                                                                    \
-  void SetStackOverflow() { stack_overflow_ = true; }               \
-  void ClearStackOverflow() { stack_overflow_ = false; }            \
-  bool HasStackOverflow() const { return stack_overflow_; }         \
-                                                                    \
-  bool CheckStackOverflow() {                                       \
-    if (stack_overflow_) return true;                               \
-    StackLimitCheck check(zone_->isolate());                        \
-    if (!check.HasOverflowed()) return false;                       \
-    return (stack_overflow_ = true);                                \
-  }                                                                 \
-                                                                    \
-private:                                                            \
-  void InitializeAstVisitor(Zone* zone) {                           \
-    zone_ = zone;                                                   \
-    stack_overflow_ = false;                                        \
-  }                                                                 \
-  Zone* zone() { return zone_; }                                    \
-  Isolate* isolate() { return zone_->isolate(); }                   \
-                                                                    \
-  Zone* zone_;                                                      \
+#define DEFINE_AST_VISITOR_SUBCLASS_MEMBERS()               \
+ public:                                                    \
+  void Visit(AstNode* node) FINAL {                         \
+    if (!CheckStackOverflow()) node->Accept(this);          \
+  }                                                         \
+                                                            \
+  void SetStackOverflow() { stack_overflow_ = true; }       \
+  void ClearStackOverflow() { stack_overflow_ = false; }    \
+  bool HasStackOverflow() const { return stack_overflow_; } \
+                                                            \
+  bool CheckStackOverflow() {                               \
+    if (stack_overflow_) return true;                       \
+    StackLimitCheck check(zone_->isolate());                \
+    if (!check.HasOverflowed()) return false;               \
+    return (stack_overflow_ = true);                        \
+  }                                                         \
+                                                            \
+ private:                                                   \
+  void InitializeAstVisitor(Zone* zone) {                   \
+    zone_ = zone;                                           \
+    stack_overflow_ = false;                                \
+  }                                                         \
+  Zone* zone() { return zone_; }                            \
+  Isolate* isolate() { return zone_->isolate(); }           \
+                                                            \
+  Zone* zone_;                                              \
   bool stack_overflow_
 
 
 // ----------------------------------------------------------------------------
-// Construction time visitor.
-
-class AstConstructionVisitor BASE_EMBEDDED {
- public:
-  AstConstructionVisitor()
-      : dont_crankshaft_reason_(kNoReason), dont_turbofan_reason_(kNoReason) {}
-
-  AstProperties* ast_properties() { return &properties_; }
-  BailoutReason dont_optimize_reason() {
-    if (dont_turbofan_reason_ != kNoReason) {
-      return dont_turbofan_reason_;
-    } else {
-      return dont_crankshaft_reason_;
-    }
-  }
-
- private:
-  template<class> friend class AstNodeFactory;
-
-  // Node visitors.
-#define DEF_VISIT(type) \
-  void Visit##type(type* node);
-  AST_NODE_LIST(DEF_VISIT)
-#undef DEF_VISIT
-
-  void add_flag(AstPropertiesFlag flag) { properties_.flags()->Add(flag); }
-  void set_dont_crankshaft_reason(BailoutReason reason) {
-    dont_crankshaft_reason_ = reason;
-  }
-  void set_dont_turbofan_reason(BailoutReason reason) {
-    dont_turbofan_reason_ = reason;
-  }
-
-  void add_slot_node(AstNode* slot_node) {
-    FeedbackVectorRequirements reqs = slot_node->ComputeFeedbackRequirements();
-    if (reqs.slots() > 0) {
-      slot_node->SetFirstFeedbackSlot(
-          FeedbackVectorSlot(properties_.feedback_slots()));
-      properties_.increase_feedback_slots(reqs.slots());
-    }
-    if (reqs.ic_slots() > 0) {
-      slot_node->SetFirstFeedbackICSlot(
-          FeedbackVectorICSlot(properties_.ic_feedback_slots()));
-      properties_.increase_ic_feedback_slots(reqs.ic_slots());
-    }
-  }
-
-  AstProperties properties_;
-  BailoutReason dont_crankshaft_reason_;
-  BailoutReason dont_turbofan_reason_;
-};
-
-
-class AstNullVisitor BASE_EMBEDDED {
- public:
-  // Node visitors.
-#define DEF_VISIT(type) \
-  void Visit##type(type* node) {}
-  AST_NODE_LIST(DEF_VISIT)
-#undef DEF_VISIT
-};
-
-
-
-// ----------------------------------------------------------------------------
 // AstNode factory
 
-template<class Visitor>
 class AstNodeFactory FINAL BASE_EMBEDDED {
  public:
   explicit AstNodeFactory(AstValueFactory* ast_value_factory)
       : zone_(ast_value_factory->zone()),
         ast_value_factory_(ast_value_factory) {}
 
-  Visitor* visitor() { return &visitor_; }
-
-#define VISIT_AND_RETURN(NodeType, node) \
-  visitor_.Visit##NodeType((node)); \
-  return node;
-
   VariableDeclaration* NewVariableDeclaration(VariableProxy* proxy,
                                               VariableMode mode,
                                               Scope* scope,
                                               int pos) {
-    VariableDeclaration* decl =
-        new(zone_) VariableDeclaration(zone_, proxy, mode, scope, pos);
-    VISIT_AND_RETURN(VariableDeclaration, decl)
+    return new (zone_) VariableDeclaration(zone_, proxy, mode, scope, pos);
   }
 
   FunctionDeclaration* NewFunctionDeclaration(VariableProxy* proxy,
@@ -3270,71 +3193,56 @@ class AstNodeFactory FINAL BASE_EMBEDDED {
                                               FunctionLiteral* fun,
                                               Scope* scope,
                                               int pos) {
-    FunctionDeclaration* decl =
-        new(zone_) FunctionDeclaration(zone_, proxy, mode, fun, scope, pos);
-    VISIT_AND_RETURN(FunctionDeclaration, decl)
+    return new (zone_) FunctionDeclaration(zone_, proxy, mode, fun, scope, pos);
   }
 
   ModuleDeclaration* NewModuleDeclaration(VariableProxy* proxy,
                                           Module* module,
                                           Scope* scope,
                                           int pos) {
-    ModuleDeclaration* decl =
-        new(zone_) ModuleDeclaration(zone_, proxy, module, scope, pos);
-    VISIT_AND_RETURN(ModuleDeclaration, decl)
+    return new (zone_) ModuleDeclaration(zone_, proxy, module, scope, pos);
   }
 
   ImportDeclaration* NewImportDeclaration(VariableProxy* proxy,
                                           Module* module,
                                           Scope* scope,
                                           int pos) {
-    ImportDeclaration* decl =
-        new(zone_) ImportDeclaration(zone_, proxy, module, scope, pos);
-    VISIT_AND_RETURN(ImportDeclaration, decl)
+    return new (zone_) ImportDeclaration(zone_, proxy, module, scope, pos);
   }
 
   ExportDeclaration* NewExportDeclaration(VariableProxy* proxy,
                                           Scope* scope,
                                           int pos) {
-    ExportDeclaration* decl =
-        new(zone_) ExportDeclaration(zone_, proxy, scope, pos);
-    VISIT_AND_RETURN(ExportDeclaration, decl)
+    return new (zone_) ExportDeclaration(zone_, proxy, scope, pos);
   }
 
   ModuleLiteral* NewModuleLiteral(Block* body, Interface* interface, int pos) {
-    ModuleLiteral* module =
-        new(zone_) ModuleLiteral(zone_, body, interface, pos);
-    VISIT_AND_RETURN(ModuleLiteral, module)
+    return new (zone_) ModuleLiteral(zone_, body, interface, pos);
   }
 
   ModuleVariable* NewModuleVariable(VariableProxy* proxy, int pos) {
-    ModuleVariable* module = new(zone_) ModuleVariable(zone_, proxy, pos);
-    VISIT_AND_RETURN(ModuleVariable, module)
+    return new (zone_) ModuleVariable(zone_, proxy, pos);
   }
 
   ModulePath* NewModulePath(Module* origin, const AstRawString* name, int pos) {
-    ModulePath* module = new (zone_) ModulePath(zone_, origin, name, pos);
-    VISIT_AND_RETURN(ModulePath, module)
+    return new (zone_) ModulePath(zone_, origin, name, pos);
   }
 
   ModuleUrl* NewModuleUrl(Handle<String> url, int pos) {
-    ModuleUrl* module = new(zone_) ModuleUrl(zone_, url, pos);
-    VISIT_AND_RETURN(ModuleUrl, module)
+    return new (zone_) ModuleUrl(zone_, url, pos);
   }
 
   Block* NewBlock(ZoneList<const AstRawString*>* labels,
                   int capacity,
                   bool is_initializer_block,
                   int pos) {
-    Block* block =
-        new (zone_) Block(zone_, labels, capacity, is_initializer_block, pos);
-    VISIT_AND_RETURN(Block, block)
+    return new (zone_)
+        Block(zone_, labels, capacity, is_initializer_block, pos);
   }
 
 #define STATEMENT_WITH_LABELS(NodeType)                                     \
   NodeType* New##NodeType(ZoneList<const AstRawString*>* labels, int pos) { \
-    NodeType* stmt = new (zone_) NodeType(zone_, labels, pos);              \
-    VISIT_AND_RETURN(NodeType, stmt);                                       \
+    return new (zone_) NodeType(zone_, labels, pos);                        \
   }
   STATEMENT_WITH_LABELS(DoWhileStatement)
   STATEMENT_WITH_LABELS(WhileStatement)
@@ -3347,12 +3255,10 @@ class AstNodeFactory FINAL BASE_EMBEDDED {
                                         int pos) {
     switch (visit_mode) {
       case ForEachStatement::ENUMERATE: {
-        ForInStatement* stmt = new (zone_) ForInStatement(zone_, labels, pos);
-        VISIT_AND_RETURN(ForInStatement, stmt);
+        return new (zone_) ForInStatement(zone_, labels, pos);
       }
       case ForEachStatement::ITERATE: {
-        ForOfStatement* stmt = new (zone_) ForOfStatement(zone_, labels, pos);
-        VISIT_AND_RETURN(ForOfStatement, stmt);
+        return new (zone_) ForOfStatement(zone_, labels, pos);
       }
     }
     UNREACHABLE();
@@ -3361,47 +3267,38 @@ class AstNodeFactory FINAL BASE_EMBEDDED {
 
   ModuleStatement* NewModuleStatement(
       VariableProxy* proxy, Block* body, int pos) {
-    ModuleStatement* stmt = new(zone_) ModuleStatement(zone_, proxy, body, pos);
-    VISIT_AND_RETURN(ModuleStatement, stmt)
+    return new (zone_) ModuleStatement(zone_, proxy, body, pos);
   }
 
   ExpressionStatement* NewExpressionStatement(Expression* expression, int pos) {
-    ExpressionStatement* stmt =
-        new(zone_) ExpressionStatement(zone_, expression, pos);
-    VISIT_AND_RETURN(ExpressionStatement, stmt)
+    return new (zone_) ExpressionStatement(zone_, expression, pos);
   }
 
   ContinueStatement* NewContinueStatement(IterationStatement* target, int pos) {
-    ContinueStatement* stmt = new(zone_) ContinueStatement(zone_, target, pos);
-    VISIT_AND_RETURN(ContinueStatement, stmt)
+    return new (zone_) ContinueStatement(zone_, target, pos);
   }
 
   BreakStatement* NewBreakStatement(BreakableStatement* target, int pos) {
-    BreakStatement* stmt = new(zone_) BreakStatement(zone_, target, pos);
-    VISIT_AND_RETURN(BreakStatement, stmt)
+    return new (zone_) BreakStatement(zone_, target, pos);
   }
 
   ReturnStatement* NewReturnStatement(Expression* expression, int pos) {
-    ReturnStatement* stmt = new(zone_) ReturnStatement(zone_, expression, pos);
-    VISIT_AND_RETURN(ReturnStatement, stmt)
+    return new (zone_) ReturnStatement(zone_, expression, pos);
   }
 
   WithStatement* NewWithStatement(Scope* scope,
                                   Expression* expression,
                                   Statement* statement,
                                   int pos) {
-    WithStatement* stmt = new(zone_) WithStatement(
-        zone_, scope, expression, statement, pos);
-    VISIT_AND_RETURN(WithStatement, stmt)
+    return new (zone_) WithStatement(zone_, scope, expression, statement, pos);
   }
 
   IfStatement* NewIfStatement(Expression* condition,
                               Statement* then_statement,
                               Statement* else_statement,
                               int pos) {
-    IfStatement* stmt = new (zone_)
+    return new (zone_)
         IfStatement(zone_, condition, then_statement, else_statement, pos);
-    VISIT_AND_RETURN(IfStatement, stmt)
   }
 
   TryCatchStatement* NewTryCatchStatement(int index,
@@ -3410,23 +3307,20 @@ class AstNodeFactory FINAL BASE_EMBEDDED {
                                           Variable* variable,
                                           Block* catch_block,
                                           int pos) {
-    TryCatchStatement* stmt = new(zone_) TryCatchStatement(
-        zone_, index, try_block, scope, variable, catch_block, pos);
-    VISIT_AND_RETURN(TryCatchStatement, stmt)
+    return new (zone_) TryCatchStatement(zone_, index, try_block, scope,
+                                         variable, catch_block, pos);
   }
 
   TryFinallyStatement* NewTryFinallyStatement(int index,
                                               Block* try_block,
                                               Block* finally_block,
                                               int pos) {
-    TryFinallyStatement* stmt = new(zone_) TryFinallyStatement(
-        zone_, index, try_block, finally_block, pos);
-    VISIT_AND_RETURN(TryFinallyStatement, stmt)
+    return new (zone_)
+        TryFinallyStatement(zone_, index, try_block, finally_block, pos);
   }
 
   DebuggerStatement* NewDebuggerStatement(int pos) {
-    DebuggerStatement* stmt = new (zone_) DebuggerStatement(zone_, pos);
-    VISIT_AND_RETURN(DebuggerStatement, stmt)
+    return new (zone_) DebuggerStatement(zone_, pos);
   }
 
   EmptyStatement* NewEmptyStatement(int pos) {
@@ -3435,57 +3329,42 @@ class AstNodeFactory FINAL BASE_EMBEDDED {
 
   CaseClause* NewCaseClause(
       Expression* label, ZoneList<Statement*>* statements, int pos) {
-    CaseClause* clause = new (zone_) CaseClause(zone_, label, statements, pos);
-    VISIT_AND_RETURN(CaseClause, clause)
+    return new (zone_) CaseClause(zone_, label, statements, pos);
   }
 
   Literal* NewStringLiteral(const AstRawString* string, int pos) {
-    Literal* lit =
-        new (zone_) Literal(zone_, ast_value_factory_->NewString(string), pos);
-    VISIT_AND_RETURN(Literal, lit)
+    return new (zone_)
+        Literal(zone_, ast_value_factory_->NewString(string), pos);
   }
 
   // A JavaScript symbol (ECMA-262 edition 6).
   Literal* NewSymbolLiteral(const char* name, int pos) {
-    Literal* lit =
-        new (zone_) Literal(zone_, ast_value_factory_->NewSymbol(name), pos);
-    VISIT_AND_RETURN(Literal, lit)
+    return new (zone_) Literal(zone_, ast_value_factory_->NewSymbol(name), pos);
   }
 
   Literal* NewNumberLiteral(double number, int pos) {
-    Literal* lit =
-        new (zone_) Literal(zone_, ast_value_factory_->NewNumber(number), pos);
-    VISIT_AND_RETURN(Literal, lit)
+    return new (zone_)
+        Literal(zone_, ast_value_factory_->NewNumber(number), pos);
   }
 
   Literal* NewSmiLiteral(int number, int pos) {
-    Literal* lit =
-        new (zone_) Literal(zone_, ast_value_factory_->NewSmi(number), pos);
-    VISIT_AND_RETURN(Literal, lit)
+    return new (zone_) Literal(zone_, ast_value_factory_->NewSmi(number), pos);
   }
 
   Literal* NewBooleanLiteral(bool b, int pos) {
-    Literal* lit =
-        new (zone_) Literal(zone_, ast_value_factory_->NewBoolean(b), pos);
-    VISIT_AND_RETURN(Literal, lit)
+    return new (zone_) Literal(zone_, ast_value_factory_->NewBoolean(b), pos);
   }
 
   Literal* NewNullLiteral(int pos) {
-    Literal* lit =
-        new (zone_) Literal(zone_, ast_value_factory_->NewNull(), pos);
-    VISIT_AND_RETURN(Literal, lit)
+    return new (zone_) Literal(zone_, ast_value_factory_->NewNull(), pos);
   }
 
   Literal* NewUndefinedLiteral(int pos) {
-    Literal* lit =
-        new (zone_) Literal(zone_, ast_value_factory_->NewUndefined(), pos);
-    VISIT_AND_RETURN(Literal, lit)
+    return new (zone_) Literal(zone_, ast_value_factory_->NewUndefined(), pos);
   }
 
   Literal* NewTheHoleLiteral(int pos) {
-    Literal* lit =
-        new (zone_) Literal(zone_, ast_value_factory_->NewTheHole(), pos);
-    VISIT_AND_RETURN(Literal, lit)
+    return new (zone_) Literal(zone_, ast_value_factory_->NewTheHole(), pos);
   }
 
   ObjectLiteral* NewObjectLiteral(
@@ -3494,10 +3373,8 @@ class AstNodeFactory FINAL BASE_EMBEDDED {
       int boilerplate_properties,
       bool has_function,
       int pos) {
-    ObjectLiteral* lit =
-        new (zone_) ObjectLiteral(zone_, properties, literal_index,
-                                  boilerplate_properties, has_function, pos);
-    VISIT_AND_RETURN(ObjectLiteral, lit)
+    return new (zone_) ObjectLiteral(zone_, properties, literal_index,
+                                     boilerplate_properties, has_function, pos);
   }
 
   ObjectLiteral::Property* NewObjectLiteralProperty(Literal* key,
@@ -3513,120 +3390,104 @@ class AstNodeFactory FINAL BASE_EMBEDDED {
     ObjectLiteral::Property* prop =
         new (zone_) ObjectLiteral::Property(zone_, is_getter, value, is_static);
     prop->set_key(NewStringLiteral(value->raw_name(), pos));
-    return prop;  // Not an AST node, will not be visited.
+    return prop;
   }
 
   RegExpLiteral* NewRegExpLiteral(const AstRawString* pattern,
                                   const AstRawString* flags,
                                   int literal_index,
                                   int pos) {
-    RegExpLiteral* lit =
-        new (zone_) RegExpLiteral(zone_, pattern, flags, literal_index, pos);
-    VISIT_AND_RETURN(RegExpLiteral, lit);
+    return new (zone_) RegExpLiteral(zone_, pattern, flags, literal_index, pos);
   }
 
   ArrayLiteral* NewArrayLiteral(ZoneList<Expression*>* values,
                                 int literal_index,
                                 int pos) {
-    ArrayLiteral* lit =
-        new (zone_) ArrayLiteral(zone_, values, literal_index, pos);
-    VISIT_AND_RETURN(ArrayLiteral, lit)
+    return new (zone_) ArrayLiteral(zone_, values, literal_index, pos);
   }
 
   VariableProxy* NewVariableProxy(Variable* var,
                                   int pos = RelocInfo::kNoPosition) {
-    VariableProxy* proxy = new (zone_) VariableProxy(zone_, var, pos);
-    VISIT_AND_RETURN(VariableProxy, proxy)
+    return new (zone_) VariableProxy(zone_, var, pos);
   }
 
   VariableProxy* NewVariableProxy(const AstRawString* name,
                                   bool is_this,
                                   Interface* interface = Interface::NewValue(),
                                   int position = RelocInfo::kNoPosition) {
-    VariableProxy* proxy =
-        new (zone_) VariableProxy(zone_, name, is_this, interface, position);
-    VISIT_AND_RETURN(VariableProxy, proxy)
+    return new (zone_) VariableProxy(zone_, name, is_this, interface, position);
   }
 
   Property* NewProperty(Expression* obj, Expression* key, int pos) {
-    Property* prop = new (zone_) Property(zone_, obj, key, pos);
-    VISIT_AND_RETURN(Property, prop)
+    return new (zone_) Property(zone_, obj, key, pos);
   }
 
   Call* NewCall(Expression* expression,
                 ZoneList<Expression*>* arguments,
                 int pos) {
-    Call* call = new (zone_) Call(zone_, expression, arguments, pos);
-    VISIT_AND_RETURN(Call, call)
+    return new (zone_) Call(zone_, expression, arguments, pos);
   }
 
   CallNew* NewCallNew(Expression* expression,
                       ZoneList<Expression*>* arguments,
                       int pos) {
-    CallNew* call = new (zone_) CallNew(zone_, expression, arguments, pos);
-    VISIT_AND_RETURN(CallNew, call)
+    return new (zone_) CallNew(zone_, expression, arguments, pos);
   }
 
   CallRuntime* NewCallRuntime(const AstRawString* name,
                               const Runtime::Function* function,
                               ZoneList<Expression*>* arguments,
                               int pos) {
-    CallRuntime* call =
-        new (zone_) CallRuntime(zone_, name, function, arguments, pos);
-    VISIT_AND_RETURN(CallRuntime, call)
+    return new (zone_) CallRuntime(zone_, name, function, arguments, pos);
   }
 
   UnaryOperation* NewUnaryOperation(Token::Value op,
                                     Expression* expression,
                                     int pos) {
-    UnaryOperation* node =
-        new (zone_) UnaryOperation(zone_, op, expression, pos);
-    VISIT_AND_RETURN(UnaryOperation, node)
+    return new (zone_) UnaryOperation(zone_, op, expression, pos);
   }
 
   BinaryOperation* NewBinaryOperation(Token::Value op,
                                       Expression* left,
                                       Expression* right,
                                       int pos) {
-    BinaryOperation* node =
-        new (zone_) BinaryOperation(zone_, op, left, right, pos);
-    VISIT_AND_RETURN(BinaryOperation, node)
+    return new (zone_) BinaryOperation(zone_, op, left, right, pos);
   }
 
   CountOperation* NewCountOperation(Token::Value op,
                                     bool is_prefix,
                                     Expression* expr,
                                     int pos) {
-    CountOperation* node =
-        new (zone_) CountOperation(zone_, op, is_prefix, expr, pos);
-    VISIT_AND_RETURN(CountOperation, node)
+    return new (zone_) CountOperation(zone_, op, is_prefix, expr, pos);
   }
 
   CompareOperation* NewCompareOperation(Token::Value op,
                                         Expression* left,
                                         Expression* right,
                                         int pos) {
-    CompareOperation* node =
-        new (zone_) CompareOperation(zone_, op, left, right, pos);
-    VISIT_AND_RETURN(CompareOperation, node)
+    return new (zone_) CompareOperation(zone_, op, left, right, pos);
   }
 
   Conditional* NewConditional(Expression* condition,
                               Expression* then_expression,
                               Expression* else_expression,
                               int position) {
-    Conditional* cond = new (zone_) Conditional(
-        zone_, condition, then_expression, else_expression, position);
-    VISIT_AND_RETURN(Conditional, cond)
+    return new (zone_) Conditional(zone_, condition, then_expression,
+                                   else_expression, position);
   }
 
   Assignment* NewAssignment(Token::Value op,
                             Expression* target,
                             Expression* value,
                             int pos) {
+    DCHECK(Token::IsAssignmentOp(op));
     Assignment* assign = new (zone_) Assignment(zone_, op, target, value, pos);
-    assign->Init(this);
-    VISIT_AND_RETURN(Assignment, assign)
+    if (assign->is_compound()) {
+      DCHECK(Token::IsAssignmentOp(op));
+      assign->binary_operation_ =
+          NewBinaryOperation(assign->binary_op(), target, value, pos + 1);
+    }
+    return assign;
   }
 
   Yield* NewYield(Expression *generator_object,
@@ -3634,14 +3495,12 @@ class AstNodeFactory FINAL BASE_EMBEDDED {
                   Yield::Kind yield_kind,
                   int pos) {
     if (!expression) expression = NewUndefinedLiteral(pos);
-    Yield* yield =
-        new (zone_) Yield(zone_, generator_object, expression, yield_kind, pos);
-    VISIT_AND_RETURN(Yield, yield)
+    return new (zone_)
+        Yield(zone_, generator_object, expression, yield_kind, pos);
   }
 
   Throw* NewThrow(Expression* exception, int pos) {
-    Throw* t = new (zone_) Throw(zone_, exception, pos);
-    VISIT_AND_RETURN(Throw, t)
+    return new (zone_) Throw(zone_, exception, pos);
   }
 
   FunctionLiteral* NewFunctionLiteral(
@@ -3653,51 +3512,39 @@ class AstNodeFactory FINAL BASE_EMBEDDED {
       FunctionLiteral::IsFunctionFlag is_function,
       FunctionLiteral::IsParenthesizedFlag is_parenthesized, FunctionKind kind,
       int position) {
-    FunctionLiteral* lit = new (zone_) FunctionLiteral(
+    return new (zone_) FunctionLiteral(
         zone_, name, ast_value_factory, scope, body, materialized_literal_count,
         expected_property_count, handler_count, parameter_count, function_type,
         has_duplicate_parameters, is_function, is_parenthesized, kind,
         position);
-    // Top-level literal doesn't count for the AST's properties.
-    if (is_function == FunctionLiteral::kIsFunction) {
-      visitor_.VisitFunctionLiteral(lit);
-    }
-    return lit;
   }
 
-  ClassLiteral* NewClassLiteral(const AstRawString* name, Expression* extends,
+  ClassLiteral* NewClassLiteral(const AstRawString* name, Scope* scope,
+                                VariableProxy* proxy, Expression* extends,
                                 Expression* constructor,
                                 ZoneList<ObjectLiteral::Property*>* properties,
                                 int start_position, int end_position) {
-    ClassLiteral* lit =
-        new (zone_) ClassLiteral(zone_, name, extends, constructor, properties,
-                                 start_position, end_position);
-    VISIT_AND_RETURN(ClassLiteral, lit)
+    return new (zone_)
+        ClassLiteral(zone_, name, scope, proxy, extends, constructor,
+                     properties, start_position, end_position);
   }
 
   NativeFunctionLiteral* NewNativeFunctionLiteral(const AstRawString* name,
                                                   v8::Extension* extension,
                                                   int pos) {
-    NativeFunctionLiteral* lit =
-        new (zone_) NativeFunctionLiteral(zone_, name, extension, pos);
-    VISIT_AND_RETURN(NativeFunctionLiteral, lit)
+    return new (zone_) NativeFunctionLiteral(zone_, name, extension, pos);
   }
 
   ThisFunction* NewThisFunction(int pos) {
-    ThisFunction* fun = new (zone_) ThisFunction(zone_, pos);
-    VISIT_AND_RETURN(ThisFunction, fun)
+    return new (zone_) ThisFunction(zone_, pos);
   }
 
   SuperReference* NewSuperReference(VariableProxy* this_var, int pos) {
-    SuperReference* super = new (zone_) SuperReference(zone_, this_var, pos);
-    VISIT_AND_RETURN(SuperReference, super);
+    return new (zone_) SuperReference(zone_, this_var, pos);
   }
 
-#undef VISIT_AND_RETURN
-
  private:
   Zone* zone_;
-  Visitor visitor_;
   AstValueFactory* ast_value_factory_;
 };
 
index 5bc8b13..56e1c46 100644 (file)
@@ -291,32 +291,36 @@ static bool HasListItem(const char* list, const char* item) {
 
 #endif  // V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64
 
-CPU::CPU() : stepping_(0),
-             model_(0),
-             ext_model_(0),
-             family_(0),
-             ext_family_(0),
-             type_(0),
-             implementer_(0),
-             architecture_(0),
-             part_(0),
-             has_fpu_(false),
-             has_cmov_(false),
-             has_sahf_(false),
-             has_mmx_(false),
-             has_sse_(false),
-             has_sse2_(false),
-             has_sse3_(false),
-             has_ssse3_(false),
-             has_sse41_(false),
-             has_sse42_(false),
-             has_idiva_(false),
-             has_neon_(false),
-             has_thumb2_(false),
-             has_vfp_(false),
-             has_vfp3_(false),
-             has_vfp3_d32_(false),
-             is_fp64_mode_(false) {
+CPU::CPU()
+    : stepping_(0),
+      model_(0),
+      ext_model_(0),
+      family_(0),
+      ext_family_(0),
+      type_(0),
+      implementer_(0),
+      architecture_(0),
+      variant_(-1),
+      part_(0),
+      has_fpu_(false),
+      has_cmov_(false),
+      has_sahf_(false),
+      has_mmx_(false),
+      has_sse_(false),
+      has_sse2_(false),
+      has_sse3_(false),
+      has_ssse3_(false),
+      has_sse41_(false),
+      has_sse42_(false),
+      has_avx_(false),
+      has_fma3_(false),
+      has_idiva_(false),
+      has_neon_(false),
+      has_thumb2_(false),
+      has_vfp_(false),
+      has_vfp3_(false),
+      has_vfp3_d32_(false),
+      is_fp64_mode_(false) {
   memcpy(vendor_, "Unknown", 8);
 #if V8_OS_NACL
 // Portable host shouldn't do feature detection.
@@ -356,6 +360,8 @@ CPU::CPU() : stepping_(0),
     has_ssse3_ = (cpu_info[2] & 0x00000200) != 0;
     has_sse41_ = (cpu_info[2] & 0x00080000) != 0;
     has_sse42_ = (cpu_info[2] & 0x00100000) != 0;
+    has_avx_ = (cpu_info[2] & 0x10000000) != 0;
+    if (has_avx_) has_fma3_ = (cpu_info[2] & 0x00001000) != 0;
   }
 
 #if V8_HOST_ARCH_IA32
@@ -383,7 +389,7 @@ CPU::CPU() : stepping_(0),
   // Extract implementor from the "CPU implementer" field.
   char* implementer = cpu_info.ExtractField("CPU implementer");
   if (implementer != NULL) {
-    char* end ;
+    char* end;
     implementer_ = strtol(implementer, &end, 0);
     if (end == implementer) {
       implementer_ = 0;
@@ -391,10 +397,20 @@ CPU::CPU() : stepping_(0),
     delete[] implementer;
   }
 
+  char* variant = cpu_info.ExtractField("CPU variant");
+  if (variant != NULL) {
+    char* end;
+    variant_ = strtol(variant, &end, 0);
+    if (end == variant) {
+      variant_ = -1;
+    }
+    delete[] variant;
+  }
+
   // Extract part number from the "CPU part" field.
   char* part = cpu_info.ExtractField("CPU part");
   if (part != NULL) {
-    char* end ;
+    char* end;
     part_ = strtol(part, &end, 0);
     if (end == part) {
       part_ = 0;
@@ -535,7 +551,7 @@ CPU::CPU() : stepping_(0),
   // Extract implementor from the "CPU implementer" field.
   char* implementer = cpu_info.ExtractField("CPU implementer");
   if (implementer != NULL) {
-    char* end ;
+    char* end;
     implementer_ = strtol(implementer, &end, 0);
     if (end == implementer) {
       implementer_ = 0;
@@ -543,10 +559,20 @@ CPU::CPU() : stepping_(0),
     delete[] implementer;
   }
 
+  char* variant = cpu_info.ExtractField("CPU variant");
+  if (variant != NULL) {
+    char* end;
+    variant_ = strtol(variant, &end, 0);
+    if (end == variant) {
+      variant_ = -1;
+    }
+    delete[] variant;
+  }
+
   // Extract part number from the "CPU part" field.
   char* part = cpu_info.ExtractField("CPU part");
   if (part != NULL) {
-    char* end ;
+    char* end;
     part_ = strtol(part, &end, 0);
     if (end == part) {
       part_ = 0;
index dc0eaf4..8c41f9d 100644 (file)
@@ -47,6 +47,8 @@ class CPU FINAL {
   static const int NVIDIA = 0x4e;
   static const int QUALCOMM = 0x51;
   int architecture() const { return architecture_; }
+  int variant() const { return variant_; }
+  static const int NVIDIA_DENVER = 0x0;
   int part() const { return part_; }
   static const int ARM_CORTEX_A5 = 0xc05;
   static const int ARM_CORTEX_A7 = 0xc07;
@@ -68,6 +70,8 @@ class CPU FINAL {
   bool has_ssse3() const { return has_ssse3_; }
   bool has_sse41() const { return has_sse41_; }
   bool has_sse42() const { return has_sse42_; }
+  bool has_avx() const { return has_avx_; }
+  bool has_fma3() const { return has_fma3_; }
 
   // arm features
   bool has_idiva() const { return has_idiva_; }
@@ -90,6 +94,7 @@ class CPU FINAL {
   int type_;
   int implementer_;
   int architecture_;
+  int variant_;
   int part_;
   bool has_fpu_;
   bool has_cmov_;
@@ -101,6 +106,8 @@ class CPU FINAL {
   bool has_ssse3_;
   bool has_sse41_;
   bool has_sse42_;
+  bool has_avx_;
+  bool has_fma3_;
   bool has_idiva_;
   bool has_neon_;
   bool has_thumb2_;
diff --git a/deps/v8/src/base/iterator.h b/deps/v8/src/base/iterator.h
new file mode 100644 (file)
index 0000000..e380dc3
--- /dev/null
@@ -0,0 +1,56 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_BASE_ITERATOR_H_
+#define V8_BASE_ITERATOR_H_
+
+#include <iterator>
+
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace base {
+
+// The intention of the base::iterator_range class is to encapsulate two
+// iterators so that the range defined by the iterators can be used like
+// a regular STL container (actually only a subset of the full container
+// functionality is available usually).
+template <typename ForwardIterator>
+class iterator_range {
+ public:
+  typedef ForwardIterator iterator;
+  typedef ForwardIterator const_iterator;
+  typedef typename std::iterator_traits<iterator>::pointer pointer;
+  typedef typename std::iterator_traits<iterator>::reference reference;
+  typedef typename std::iterator_traits<iterator>::value_type value_type;
+  typedef
+      typename std::iterator_traits<iterator>::difference_type difference_type;
+
+  iterator_range() : begin_(), end_() {}
+  template <typename ForwardIterator2>
+  iterator_range(ForwardIterator2 const& begin, ForwardIterator2 const& end)
+      : begin_(begin), end_(end) {}
+
+  iterator begin() { return begin_; }
+  iterator end() { return end_; }
+  const_iterator begin() const { return begin_; }
+  const_iterator end() const { return end_; }
+  const_iterator cbegin() const { return begin_; }
+  const_iterator cend() const { return end_; }
+
+  bool empty() const { return cbegin() == cend(); }
+
+  // Random Access iterators only.
+  reference operator[](difference_type n) { return begin()[n]; }
+  difference_type size() const { return cend() - cbegin(); }
+
+ private:
+  const_iterator const begin_;
+  const_iterator const end_;
+};
+
+}  // namespace base
+}  // namespace v8
+
+#endif  // V8_BASE_ITERATOR_H_
index 80a8949..371d7da 100644 (file)
@@ -20,9 +20,9 @@
 // corresponds to 'offsetof' (in stddef.h), except that it doesn't
 // use 0 or NULL, which causes a problem with the compiler warnings
 // we have enabled (which is also why 'offsetof' doesn't seem to work).
-// Here we simply use the non-zero value 4, which seems to work.
-#define OFFSET_OF(type, field)                                          \
-  (reinterpret_cast<intptr_t>(&(reinterpret_cast<type*>(4)->field)) - 4)
+// Here we simply use the aligned, non-zero value 16.
+#define OFFSET_OF(type, field) \
+  (reinterpret_cast<intptr_t>(&(reinterpret_cast<type*>(16)->field)) - 16)
 
 
 #if V8_OS_NACL
index b13a1e8..36857e6 100644 (file)
@@ -8,10 +8,10 @@
 #include <pthread.h>
 #include <semaphore.h>
 #include <signal.h>
+#include <stdio.h>
 #include <stdlib.h>
 #include <sys/resource.h>
 #include <sys/time.h>
-#include <sys/types.h>
 
 // Ubuntu Dapper requires memory pages to be marked as
 // executable. Otherwise, OS raises an exception when executing code
index 0a222a6..c2fa26a 100644 (file)
@@ -13,6 +13,7 @@
 #include <pthread_np.h>  // for pthread_set_name_np
 #endif
 #include <sched.h>  // for sched_yield
+#include <stdio.h>
 #include <time.h>
 #include <unistd.h>
 
@@ -253,7 +254,7 @@ int OS::GetCurrentProcessId() {
 
 
 int OS::GetCurrentThreadId() {
-#if V8_OS_MACOSX
+#if V8_OS_MACOSX || (V8_OS_ANDROID && defined(__APPLE__))
   return static_cast<int>(pthread_mach_thread_np(pthread_self()));
 #elif V8_OS_LINUX
   return static_cast<int>(syscall(__NR_gettid));
index 1c46cf6..d68e861 100644 (file)
@@ -346,26 +346,41 @@ void Win32Time::SetToCurrentTime() {
 }
 
 
-int64_t FileTimeToInt64(FILETIME ft) {
-  ULARGE_INTEGER result;
-  result.LowPart = ft.dwLowDateTime;
-  result.HighPart = ft.dwHighDateTime;
-  return static_cast<int64_t>(result.QuadPart);
-}
-
-
 // Return the local timezone offset in milliseconds east of UTC. This
 // takes into account whether daylight saving is in effect at the time.
 // Only times in the 32-bit Unix range may be passed to this function.
 // Also, adding the time-zone offset to the input must not overflow.
 // The function EquivalentTime() in date.js guarantees this.
 int64_t Win32Time::LocalOffset(TimezoneCache* cache) {
-  FILETIME local;
-  SYSTEMTIME system_utc, system_local;
-  FileTimeToSystemTime(&time_.ft_, &system_utc);
-  SystemTimeToTzSpecificLocalTime(NULL, &system_utc, &system_local);
-  SystemTimeToFileTime(&system_local, &local);
-  return (FileTimeToInt64(local) - FileTimeToInt64(time_.ft_)) / kTimeScaler;
+  cache->InitializeIfNeeded();
+
+  Win32Time rounded_to_second(*this);
+  rounded_to_second.t() =
+      rounded_to_second.t() / 1000 / kTimeScaler * 1000 * kTimeScaler;
+  // Convert to local time using POSIX localtime function.
+  // Windows XP Service Pack 3 made SystemTimeToTzSpecificLocalTime()
+  // very slow.  Other browsers use localtime().
+
+  // Convert from JavaScript milliseconds past 1/1/1970 0:00:00 to
+  // POSIX seconds past 1/1/1970 0:00:00.
+  double unchecked_posix_time = rounded_to_second.ToJSTime() / 1000;
+  if (unchecked_posix_time > INT_MAX || unchecked_posix_time < 0) {
+    return 0;
+  }
+  // Because _USE_32BIT_TIME_T is defined, time_t is a 32-bit int.
+  time_t posix_time = static_cast<time_t>(unchecked_posix_time);
+
+  // Convert to local time, as struct with fields for day, hour, year, etc.
+  tm posix_local_time_struct;
+  if (localtime_s(&posix_local_time_struct, &posix_time)) return 0;
+
+  if (posix_local_time_struct.tm_isdst > 0) {
+    return (cache->tzinfo_.Bias + cache->tzinfo_.DaylightBias) * -kMsPerMinute;
+  } else if (posix_local_time_struct.tm_isdst == 0) {
+    return (cache->tzinfo_.Bias + cache->tzinfo_.StandardBias) * -kMsPerMinute;
+  } else {
+    return cache->tzinfo_.Bias * -kMsPerMinute;
+  }
 }
 
 
index d47ccaf..40dd188 100644 (file)
@@ -401,7 +401,7 @@ class HighResolutionTickClock FINAL : public TickClock {
   }
   virtual ~HighResolutionTickClock() {}
 
-  virtual int64_t Now() OVERRIDE {
+  int64_t Now() OVERRIDE {
     LARGE_INTEGER now;
     BOOL result = QueryPerformanceCounter(&now);
     DCHECK(result);
@@ -419,9 +419,7 @@ class HighResolutionTickClock FINAL : public TickClock {
     return ticks + 1;
   }
 
-  virtual bool IsHighResolution() OVERRIDE {
-    return true;
-  }
+  bool IsHighResolution() OVERRIDE { return true; }
 
  private:
   int64_t ticks_per_second_;
@@ -435,7 +433,7 @@ class RolloverProtectedTickClock FINAL : public TickClock {
   RolloverProtectedTickClock() : last_seen_now_(0), rollover_ms_(1) {}
   virtual ~RolloverProtectedTickClock() {}
 
-  virtual int64_t Now() OVERRIDE {
+  int64_t Now() OVERRIDE {
     LockGuard<Mutex> lock_guard(&mutex_);
     // We use timeGetTime() to implement TimeTicks::Now(), which rolls over
     // every ~49.7 days. We try to track rollover ourselves, which works if
@@ -454,9 +452,7 @@ class RolloverProtectedTickClock FINAL : public TickClock {
     return (now + rollover_ms_) * Time::kMicrosecondsPerMillisecond;
   }
 
-  virtual bool IsHighResolution() OVERRIDE {
-    return false;
-  }
+  bool IsHighResolution() OVERRIDE { return false; }
 
  private:
   Mutex mutex_;
index 06c4f24..c665771 100644 (file)
@@ -34,14 +34,12 @@ int SysInfo::NumberOfProcessors() {
   int ncpu = 0;
   size_t len = sizeof(ncpu);
   if (sysctl(mib, arraysize(mib), &ncpu, &len, NULL, 0) != 0) {
-    UNREACHABLE();
     return 1;
   }
   return ncpu;
 #elif V8_OS_POSIX
   long result = sysconf(_SC_NPROCESSORS_ONLN);  // NOLINT(runtime/int)
   if (result == -1) {
-    UNREACHABLE();
     return 1;
   }
   return static_cast<int>(result);
@@ -60,7 +58,6 @@ int64_t SysInfo::AmountOfPhysicalMemory() {
   int64_t memsize = 0;
   size_t len = sizeof(memsize);
   if (sysctl(mib, arraysize(mib), &memsize, &len, NULL, 0) != 0) {
-    UNREACHABLE();
     return 0;
   }
   return memsize;
@@ -70,7 +67,6 @@ int64_t SysInfo::AmountOfPhysicalMemory() {
   sysctlbyname("vm.stats.vm.v_page_count", &pages, &size, NULL, 0);
   sysctlbyname("vm.stats.vm.v_page_size", &page_size, &size, NULL, 0);
   if (pages == -1 || page_size == -1) {
-    UNREACHABLE();
     return 0;
   }
   return static_cast<int64_t>(pages) * page_size;
@@ -78,7 +74,6 @@ int64_t SysInfo::AmountOfPhysicalMemory() {
   MEMORYSTATUSEX memory_info;
   memory_info.dwLength = sizeof(memory_info);
   if (!GlobalMemoryStatusEx(&memory_info)) {
-    UNREACHABLE();
     return 0;
   }
   int64_t result = static_cast<int64_t>(memory_info.ullTotalPhys);
@@ -87,7 +82,6 @@ int64_t SysInfo::AmountOfPhysicalMemory() {
 #elif V8_OS_QNX
   struct stat stat_buf;
   if (stat("/proc", &stat_buf) != 0) {
-    UNREACHABLE();
     return 0;
   }
   return static_cast<int64_t>(stat_buf.st_size);
@@ -98,7 +92,6 @@ int64_t SysInfo::AmountOfPhysicalMemory() {
   long pages = sysconf(_SC_PHYS_PAGES);    // NOLINT(runtime/int)
   long page_size = sysconf(_SC_PAGESIZE);  // NOLINT(runtime/int)
   if (pages == -1 || page_size == -1) {
-    UNREACHABLE();
     return 0;
   }
   return static_cast<int64_t>(pages) * page_size;
@@ -114,7 +107,6 @@ int64_t SysInfo::AmountOfVirtualMemory() {
   struct rlimit rlim;
   int result = getrlimit(RLIMIT_DATA, &rlim);
   if (result != 0) {
-    UNREACHABLE();
     return 0;
   }
   return (rlim.rlim_cur == RLIM_INFINITY) ? 0 : rlim.rlim_cur;
index 264d1c6..7105eb2 100644 (file)
@@ -48,7 +48,7 @@ Handle<String> Bootstrapper::NativesSourceLookup(int index) {
   Heap* heap = isolate_->heap();
   if (heap->natives_source_cache()->get(index)->IsUndefined()) {
     // We can use external strings for the natives.
-    Vector<const char> source = Natives::GetRawScriptSource(index);
+    Vector<const char> source = Natives::GetScriptSource(index);
     NativesExternalStringResource* resource =
         new NativesExternalStringResource(this,
                                           source.start(),
@@ -361,8 +361,8 @@ Handle<Context> Bootstrapper::CreateEnvironment(
 static void SetObjectPrototype(Handle<JSObject> object, Handle<Object> proto) {
   // object.__proto__ = proto;
   Handle<Map> old_map = Handle<Map>(object->map());
-  Handle<Map> new_map = Map::Copy(old_map);
-  new_map->set_prototype(*proto);
+  Handle<Map> new_map = Map::Copy(old_map, "SetObjectPrototype");
+  new_map->SetPrototype(proto, FAST_PROTOTYPE);
   JSObject::MigrateToMap(object, new_map);
 }
 
@@ -493,6 +493,8 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
 
   Handle<String> object_name = factory->Object_string();
 
+  Handle<JSObject> object_function_prototype;
+
   {  // --- O b j e c t ---
     Handle<JSFunction> object_fun = factory->NewFunction(object_name);
     int unused = JSObject::kInitialGlobalObjectUnusedPropertiesCount;
@@ -507,19 +509,20 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
     native_context()->set_object_function(*object_fun);
 
     // Allocate a new prototype for the object function.
-    Handle<JSObject> prototype = factory->NewJSObject(
-        isolate->object_function(),
-        TENURED);
-    Handle<Map> map = Map::Copy(handle(prototype->map()));
+    object_function_prototype =
+        factory->NewJSObject(isolate->object_function(), TENURED);
+    Handle<Map> map = Map::Copy(handle(object_function_prototype->map()),
+                                "EmptyObjectPrototype");
     map->set_is_prototype_map(true);
-    prototype->set_map(*map);
+    object_function_prototype->set_map(*map);
 
-    native_context()->set_initial_object_prototype(*prototype);
+    native_context()->set_initial_object_prototype(*object_function_prototype);
     // For bootstrapping set the array prototype to be the same as the object
     // prototype, otherwise the missing initial_array_prototype will cause
     // assertions during startup.
-    native_context()->set_initial_array_prototype(*prototype);
-    Accessors::FunctionSetPrototype(object_fun, prototype).Assert();
+    native_context()->set_initial_array_prototype(*object_function_prototype);
+    Accessors::FunctionSetPrototype(object_fun, object_function_prototype)
+        .Assert();
   }
 
   // Allocate the empty function as the prototype for function ECMAScript
@@ -534,8 +537,7 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
   Handle<Map> empty_function_map =
       CreateFunctionMap(FUNCTION_WITHOUT_PROTOTYPE);
   DCHECK(!empty_function_map->is_dictionary_map());
-  empty_function_map->set_prototype(
-      native_context()->object_function()->prototype());
+  empty_function_map->SetPrototype(object_function_prototype);
   empty_function_map->set_is_prototype_map(true);
   empty_function->set_map(*empty_function_map);
 
@@ -549,10 +551,10 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
   empty_function->shared()->DontAdaptArguments();
 
   // Set prototypes for the function maps.
-  native_context()->sloppy_function_map()->set_prototype(*empty_function);
-  native_context()->sloppy_function_without_prototype_map()->
-      set_prototype(*empty_function);
-  sloppy_function_map_writable_prototype_->set_prototype(*empty_function);
+  native_context()->sloppy_function_map()->SetPrototype(empty_function);
+  native_context()->sloppy_function_without_prototype_map()->SetPrototype(
+      empty_function);
+  sloppy_function_map_writable_prototype_->SetPrototype(empty_function);
   return empty_function;
 }
 
@@ -654,7 +656,7 @@ Handle<Map> Genesis::CreateStrictFunctionMap(
   Handle<Map> map = factory()->NewMap(JS_FUNCTION_TYPE, JSFunction::kSize);
   SetStrictFunctionInstanceDescriptor(map, function_mode);
   map->set_function_with_prototype(IsFunctionModeWithPrototype(function_mode));
-  map->set_prototype(*empty_function);
+  map->SetPrototype(empty_function);
   return map;
 }
 
@@ -863,7 +865,6 @@ void Genesis::HookUpGlobalProxy(Handle<GlobalObject> global_object,
                                 Handle<JSGlobalProxy> global_proxy) {
   // Set the native context for the global object.
   global_object->set_native_context(*native_context());
-  global_object->set_global_context(*native_context());
   global_object->set_global_proxy(*global_proxy);
   global_proxy->set_native_context(*native_context());
   native_context()->set_global_proxy(*global_proxy);
@@ -908,6 +909,10 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
   Factory* factory = isolate->factory();
   Heap* heap = isolate->heap();
 
+  Handle<ScriptContextTable> script_context_table =
+      factory->NewScriptContextTable();
+  native_context()->set_script_context_table(*script_context_table);
+
   Handle<String> object_name = factory->Object_string();
   JSObject::AddProperty(
       global_object, object_name, isolate->object_function(), DONT_ENUM);
@@ -946,7 +951,7 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
       CallbacksDescriptor d(
           Handle<Name>(Name::cast(array_length->name())),
           array_length, attribs);
-      array_function->initial_map()->AppendDescriptor(&d);
+      initial_map->AppendDescriptor(&d);
     }
 
     // array_function is used internally. JS code creating array object should
@@ -1040,11 +1045,10 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
 
     {
       // ECMA-262, section 15.10.7.1.
-      FieldDescriptor field(factory->source_string(),
-                            JSRegExp::kSourceFieldIndex,
-                            final,
-                            Representation::Tagged());
-      initial_map->AppendDescriptor(&field);
+      Handle<AccessorInfo> regexp_source(
+          Accessors::RegExpSourceInfo(isolate, final));
+      CallbacksDescriptor d(factory->source_string(), regexp_source, final);
+      initial_map->AppendDescriptor(&d);
     }
     {
       // ECMA-262, section 15.10.7.2.
@@ -1081,19 +1085,17 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
       initial_map->AppendDescriptor(&field);
     }
 
-    initial_map->set_inobject_properties(5);
-    initial_map->set_pre_allocated_property_fields(5);
+    static const int num_fields = JSRegExp::kInObjectFieldCount;
+    initial_map->set_inobject_properties(num_fields);
+    initial_map->set_pre_allocated_property_fields(num_fields);
     initial_map->set_unused_property_fields(0);
-    initial_map->set_instance_size(
-        initial_map->instance_size() + 5 * kPointerSize);
-    initial_map->set_visitor_id(StaticVisitorBase::GetVisitorId(*initial_map));
+    initial_map->set_instance_size(initial_map->instance_size() +
+                                   num_fields * kPointerSize);
 
     // RegExp prototype object is itself a RegExp.
-    Handle<Map> proto_map = Map::Copy(initial_map);
-    proto_map->set_prototype(native_context()->initial_object_prototype());
+    Handle<Map> proto_map = Map::Copy(initial_map, "RegExpPrototype");
+    DCHECK(proto_map->prototype() == *isolate->initial_object_prototype());
     Handle<JSObject> proto = factory->NewJSObjectFromMap(proto_map);
-    proto->InObjectPropertyAtPut(JSRegExp::kSourceFieldIndex,
-                                 heap->query_colon_string());
     proto->InObjectPropertyAtPut(JSRegExp::kGlobalFieldIndex,
                                  heap->false_value());
     proto->InObjectPropertyAtPut(JSRegExp::kIgnoreCaseFieldIndex,
@@ -1104,7 +1106,7 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
                                  Smi::FromInt(0),
                                  SKIP_WRITE_BARRIER);  // It's a Smi.
     proto_map->set_is_prototype_map(true);
-    initial_map->set_prototype(*proto);
+    initial_map->SetPrototype(proto);
     factory->SetRegExpIrregexpData(Handle<JSRegExp>::cast(proto),
                                    JSRegExp::IRREGEXP, factory->empty_string(),
                                    JSRegExp::Flags(0), 0);
@@ -1244,7 +1246,8 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
   }
 
   {  // --- aliased arguments map
-    Handle<Map> map = Map::Copy(isolate->sloppy_arguments_map());
+    Handle<Map> map =
+        Map::Copy(isolate->sloppy_arguments_map(), "AliasedArguments");
     map->set_elements_kind(SLOPPY_ARGUMENTS_ELEMENTS);
     DCHECK_EQ(2, map->pre_allocated_property_fields());
     native_context()->set_aliased_arguments_map(*map);
@@ -1288,7 +1291,9 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
     // @@iterator method is added later.
 
     map->set_function_with_prototype(true);
-    map->set_prototype(native_context()->object_function()->prototype());
+    DCHECK_EQ(native_context()->object_function()->prototype(),
+              *isolate->initial_object_prototype());
+    map->SetPrototype(isolate->initial_object_prototype());
     map->set_pre_allocated_property_fields(1);
     map->set_inobject_properties(1);
 
@@ -1340,11 +1345,6 @@ void Genesis::InitializeGlobal(Handle<GlobalObject> global_object,
     delegate->shared()->DontAdaptArguments();
   }
 
-#define FEATURE_INITIALIZE_GLOBAL(id, descr) InitializeGlobal_##id();
-
-  HARMONY_SHIPPING(FEATURE_INITIALIZE_GLOBAL)
-#undef FEATURE_INITIALIZE_GLOBAL
-
   // Initialize the embedder data slot.
   Handle<FixedArray> embedder_data = factory->NewFixedArray(3);
   native_context()->set_embedder_data(*embedder_data);
@@ -1379,6 +1379,7 @@ void Genesis::InitializeExperimentalGlobal() {
 
   HARMONY_INPROGRESS(FEATURE_INITIALIZE_GLOBAL)
   HARMONY_STAGED(FEATURE_INITIALIZE_GLOBAL)
+  HARMONY_SHIPPING(FEATURE_INITIALIZE_GLOBAL)
 #undef FEATURE_INITIALIZE_GLOBAL
 }
 
@@ -1396,8 +1397,8 @@ bool Genesis::CompileExperimentalBuiltin(Isolate* isolate, int index) {
   Factory* factory = isolate->factory();
   Handle<String> source_code;
   ASSIGN_RETURN_ON_EXCEPTION_VALUE(
-      isolate, source_code, factory->NewStringFromAscii(
-                                ExperimentalNatives::GetRawScriptSource(index)),
+      isolate, source_code,
+      factory->NewStringFromAscii(ExperimentalNatives::GetScriptSource(index)),
       false);
   return CompileNative(isolate, name, source_code);
 }
@@ -1527,6 +1528,7 @@ void Genesis::InstallNativeFunctions() {
   INSTALL_NATIVE(JSFunction, "ToInteger", to_integer_fun);
   INSTALL_NATIVE(JSFunction, "ToUint32", to_uint32_fun);
   INSTALL_NATIVE(JSFunction, "ToInt32", to_int32_fun);
+  INSTALL_NATIVE(JSFunction, "ToLength", to_length_fun);
 
   INSTALL_NATIVE(JSFunction, "GlobalEval", global_eval_fun);
   INSTALL_NATIVE(JSFunction, "Instantiate", instantiate_fun);
@@ -1557,14 +1559,7 @@ void Genesis::InstallNativeFunctions() {
                  native_object_get_notifier);
   INSTALL_NATIVE(JSFunction, "NativeObjectNotifierPerformChange",
                  native_object_notifier_perform_change);
-
-  INSTALL_NATIVE(Symbol, "symbolIterator", iterator_symbol);
-  INSTALL_NATIVE(Symbol, "symbolUnscopables", unscopables_symbol);
   INSTALL_NATIVE(JSFunction, "ArrayValues", array_values_iterator);
-
-#define INSTALL_NATIVE_FUNCTIONS_FOR(id, descr) InstallNativeFunctions_##id();
-  HARMONY_SHIPPING(INSTALL_NATIVE_FUNCTIONS_FOR)
-#undef INSTALL_NATIVE_FUNCTIONS_FOR
 }
 
 
@@ -1579,6 +1574,7 @@ void Genesis::InstallExperimentalNativeFunctions() {
 #define INSTALL_NATIVE_FUNCTIONS_FOR(id, descr) InstallNativeFunctions_##id();
   HARMONY_INPROGRESS(INSTALL_NATIVE_FUNCTIONS_FOR)
   HARMONY_STAGED(INSTALL_NATIVE_FUNCTIONS_FOR)
+  HARMONY_SHIPPING(INSTALL_NATIVE_FUNCTIONS_FOR)
 #undef INSTALL_NATIVE_FUNCTIONS_FOR
 }
 
@@ -1590,12 +1586,16 @@ EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_scoping)
 EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_modules)
 EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_strings)
 EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_arrays)
+EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_array_includes)
 EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_classes)
 EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_object_literals)
 EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_regexps)
 EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_arrow_functions)
 EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_numeric_literals)
 EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_tostring)
+EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_templates)
+EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_sloppy)
+EMPTY_NATIVE_FUNCTIONS_FOR_FEATURE(harmony_unicode)
 
 
 void Genesis::InstallNativeFunctions_harmony_proxies() {
@@ -1616,12 +1616,16 @@ EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_scoping)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_modules)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_strings)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_arrays)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_array_includes)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_classes)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_object_literals)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_arrow_functions)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_numeric_literals)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_tostring)
 EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_proxies)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_templates)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_sloppy)
+EMPTY_INITIALIZE_GLOBAL_FOR_FEATURE(harmony_unicode)
 
 void Genesis::InitializeGlobal_harmony_regexps() {
   Handle<JSObject> builtins(native_context()->builtins());
@@ -1657,7 +1661,7 @@ Handle<JSFunction> Genesis::InstallInternalArray(
   array_function->shared()->DontAdaptArguments();
 
   Handle<Map> original_map(array_function->initial_map());
-  Handle<Map> initial_map = Map::Copy(original_map);
+  Handle<Map> initial_map = Map::Copy(original_map, "InternalArray");
   initial_map->set_elements_kind(elements_kind);
   JSFunction::SetInitialMap(array_function, initial_map, prototype);
 
@@ -1672,7 +1676,7 @@ Handle<JSFunction> Genesis::InstallInternalArray(
   {  // Add length.
     CallbacksDescriptor d(
         Handle<Name>(Name::cast(array_length->name())), array_length, attribs);
-    array_function->initial_map()->AppendDescriptor(&d);
+    initial_map->AppendDescriptor(&d);
   }
 
   return array_function;
@@ -1702,7 +1706,6 @@ bool Genesis::InstallNatives() {
       Handle<JSBuiltinsObject>::cast(factory()->NewGlobalObject(builtins_fun));
   builtins->set_builtins(*builtins);
   builtins->set_native_context(*native_context());
-  builtins->set_global_context(*native_context());
   builtins->set_global_proxy(native_context()->global_proxy());
 
 
@@ -1935,8 +1938,8 @@ bool Genesis::InstallNatives() {
     // Create maps for generator functions and their prototypes.  Store those
     // maps in the native context.
     Handle<Map> generator_function_map =
-        Map::Copy(sloppy_function_map_writable_prototype_);
-    generator_function_map->set_prototype(*generator_function_prototype);
+        Map::Copy(sloppy_function_map_writable_prototype_, "GeneratorFunction");
+    generator_function_map->SetPrototype(generator_function_prototype);
     native_context()->set_sloppy_generator_function_map(
         *generator_function_map);
 
@@ -1966,15 +1969,16 @@ bool Genesis::InstallNatives() {
                      rw_attribs, poison_pair);
 
     Handle<Map> strict_function_map(native_context()->strict_function_map());
-    Handle<Map> strict_generator_function_map = Map::Copy(strict_function_map);
+    Handle<Map> strict_generator_function_map =
+        Map::Copy(strict_function_map, "StrictGeneratorFunction");
     // "arguments" and "caller" already poisoned.
-    strict_generator_function_map->set_prototype(*generator_function_prototype);
+    strict_generator_function_map->SetPrototype(generator_function_prototype);
     native_context()->set_strict_generator_function_map(
         *strict_generator_function_map);
 
     Handle<JSFunction> object_function(native_context()->object_function());
     Handle<Map> generator_object_prototype_map = Map::Create(isolate(), 0);
-    generator_object_prototype_map->set_prototype(*generator_object_prototype);
+    generator_object_prototype_map->SetPrototype(generator_object_prototype);
     native_context()->set_generator_object_prototype_map(
         *generator_object_prototype_map);
   }
@@ -1984,6 +1988,17 @@ bool Genesis::InstallNatives() {
     return true;
   }
 
+  // Install public symbols.
+  {
+    static const PropertyAttributes attributes =
+        static_cast<PropertyAttributes>(READ_ONLY | DONT_DELETE);
+#define INSTALL_PUBLIC_SYMBOL(name, varname, description)                 \
+  Handle<String> varname = factory()->NewStringFromStaticChars(#varname); \
+  JSObject::AddProperty(builtins, varname, factory()->name(), attributes);
+    PUBLIC_SYMBOL_LIST(INSTALL_PUBLIC_SYMBOL)
+#undef INSTALL_PUBLIC_SYMBOL
+  }
+
   // Install natives.
   for (int i = Natives::GetDebuggerCount();
        i < Natives::GetBuiltinsCount();
@@ -2024,8 +2039,10 @@ bool Genesis::InstallNatives() {
     if (FLAG_vector_ics) {
       // Apply embeds an IC, so we need a type vector of size 1 in the shared
       // function info.
+      FeedbackVectorSpec spec(0, 1);
+      spec.SetKind(0, Code::CALL_IC);
       Handle<TypeFeedbackVector> feedback_vector =
-          factory()->NewTypeFeedbackVector(0, 1);
+          factory()->NewTypeFeedbackVector(spec);
       apply->shared()->set_feedback_vector(*feedback_vector);
     }
 
@@ -2062,7 +2079,7 @@ bool Genesis::InstallNatives() {
 
     // Set prototype on map.
     initial_map->set_non_instance_prototype(false);
-    initial_map->set_prototype(*array_prototype);
+    initial_map->SetPrototype(array_prototype);
 
     // Update map with length accessor from Array and add "index" and "input".
     Map::EnsureDescriptorSlack(initial_map, 3);
@@ -2110,22 +2127,22 @@ bool Genesis::InstallNatives() {
     Handle<AccessorInfo> arguments_iterator =
         Accessors::ArgumentsIteratorInfo(isolate(), attribs);
     {
-      CallbacksDescriptor d(Handle<Name>(native_context()->iterator_symbol()),
-                            arguments_iterator, attribs);
+      CallbacksDescriptor d(factory()->iterator_symbol(), arguments_iterator,
+                            attribs);
       Handle<Map> map(native_context()->sloppy_arguments_map());
       Map::EnsureDescriptorSlack(map, 1);
       map->AppendDescriptor(&d);
     }
     {
-      CallbacksDescriptor d(Handle<Name>(native_context()->iterator_symbol()),
-                            arguments_iterator, attribs);
+      CallbacksDescriptor d(factory()->iterator_symbol(), arguments_iterator,
+                            attribs);
       Handle<Map> map(native_context()->aliased_arguments_map());
       Map::EnsureDescriptorSlack(map, 1);
       map->AppendDescriptor(&d);
     }
     {
-      CallbacksDescriptor d(Handle<Name>(native_context()->iterator_symbol()),
-                            arguments_iterator, attribs);
+      CallbacksDescriptor d(factory()->iterator_symbol(), arguments_iterator,
+                            attribs);
       Handle<Map> map(native_context()->strict_arguments_map());
       Map::EnsureDescriptorSlack(map, 1);
       map->AppendDescriptor(&d);
@@ -2140,17 +2157,11 @@ bool Genesis::InstallNatives() {
 }
 
 
-#define INSTALL_EXPERIMENTAL_NATIVE(i, flag, file)                             \
-  if (FLAG_##flag &&                                                           \
-      strcmp(ExperimentalNatives::GetScriptName(i).start(), "native " file) == \
-          0) {                                                                 \
-    if (!CompileExperimentalBuiltin(isolate(), i)) return false;               \
-  }
-
-
 bool Genesis::InstallExperimentalNatives() {
   static const char* harmony_arrays_natives[] = {
       "native harmony-array.js", "native harmony-typedarray.js", NULL};
+  static const char* harmony_array_includes_natives[] = {
+      "native harmony-array-includes.js", NULL};
   static const char* harmony_proxies_natives[] = {"native proxy.js", NULL};
   static const char* harmony_strings_natives[] = {"native harmony-string.js",
                                                   NULL};
@@ -2159,33 +2170,35 @@ bool Genesis::InstallExperimentalNatives() {
   static const char* harmony_modules_natives[] = {NULL};
   static const char* harmony_scoping_natives[] = {NULL};
   static const char* harmony_object_literals_natives[] = {NULL};
-  static const char* harmony_regexps_natives[] = {NULL};
+  static const char* harmony_regexps_natives[] = {
+      "native harmony-regexp.js", NULL};
   static const char* harmony_arrow_functions_natives[] = {NULL};
   static const char* harmony_numeric_literals_natives[] = {NULL};
   static const char* harmony_tostring_natives[] = {"native harmony-tostring.js",
                                                    NULL};
+  static const char* harmony_templates_natives[] = {
+      "native harmony-templates.js", NULL};
+  static const char* harmony_sloppy_natives[] = {NULL};
+  static const char* harmony_unicode_natives[] = {NULL};
 
   for (int i = ExperimentalNatives::GetDebuggerCount();
        i < ExperimentalNatives::GetBuiltinsCount(); i++) {
-#define INSTALL_EXPERIMENTAL_NATIVES(id, desc)                       \
-  if (FLAG_##id) {                                                   \
-    for (size_t j = 0; id##_natives[j] != NULL; j++) {               \
-      if (strcmp(ExperimentalNatives::GetScriptName(i).start(),      \
-                 id##_natives[j]) == 0) {                            \
-        if (!CompileExperimentalBuiltin(isolate(), i)) return false; \
-      }                                                              \
-    }                                                                \
-  }
-    // Iterate over flags that are not enabled by default.
+#define INSTALL_EXPERIMENTAL_NATIVES(id, desc)                                \
+  if (FLAG_##id) {                                                            \
+    for (size_t j = 0; id##_natives[j] != NULL; j++) {                        \
+      Vector<const char> script_name = ExperimentalNatives::GetScriptName(i); \
+      if (strncmp(script_name.start(), id##_natives[j],                       \
+                  script_name.length()) == 0) {                               \
+        if (!CompileExperimentalBuiltin(isolate(), i)) return false;          \
+      }                                                                       \
+    }                                                                         \
+  }
     HARMONY_INPROGRESS(INSTALL_EXPERIMENTAL_NATIVES);
     HARMONY_STAGED(INSTALL_EXPERIMENTAL_NATIVES);
+    HARMONY_SHIPPING(INSTALL_EXPERIMENTAL_NATIVES);
 #undef INSTALL_EXPERIMENTAL_NATIVES
   }
 
-#define USE_NATIVES_FOR_FEATURE(id, descr) USE(id##_natives);
-  HARMONY_SHIPPING(USE_NATIVES_FOR_FEATURE)
-#undef USE_NATIVES_FOR_FEATURE
-
   InstallExperimentalNativeFunctions();
   return true;
 }
@@ -2568,6 +2581,8 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
           JSObject::AddProperty(to, key, constant, details.attributes());
           break;
         }
+        case ACCESSOR_FIELD:
+          UNREACHABLE();
         case CALLBACKS: {
           Handle<Name> key(descs->GetKey(i));
           LookupIterator it(to, key, LookupIterator::OWN_SKIP_INTERCEPTOR);
@@ -2578,15 +2593,10 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
           DCHECK(!to->HasFastProperties());
           // Add to dictionary.
           Handle<Object> callbacks(descs->GetCallbacksObject(i), isolate());
-          PropertyDetails d = PropertyDetails(
-              details.attributes(), CALLBACKS, i + 1);
+          PropertyDetails d(details.attributes(), CALLBACKS, i + 1);
           JSObject::SetNormalizedProperty(to, key, callbacks, d);
           break;
         }
-        // Do not occur since the from object has fast properties.
-        case NORMAL:
-          UNREACHABLE();
-          break;
       }
     }
   } else {
@@ -2611,6 +2621,7 @@ void Genesis::TransferNamedProperties(Handle<JSObject> from,
                                  isolate());
         }
         PropertyDetails details = properties->DetailsAt(i);
+        DCHECK_EQ(DATA, details.kind());
         JSObject::AddProperty(to, key, value, details.attributes());
       }
     }
@@ -2712,6 +2723,15 @@ Genesis::Genesis(Isolate* isolate,
     AddToWeakNativeContextList(*native_context());
     isolate->set_context(*native_context());
     isolate->counters()->contexts_created_by_snapshot()->Increment();
+#if TRACE_MAPS
+    if (FLAG_trace_maps) {
+      Handle<JSFunction> object_fun = isolate->object_function();
+      PrintF("[TraceMap: InitialMap map= %p SFI= %d_Object ]\n",
+             reinterpret_cast<void*>(object_fun->initial_map()),
+             object_fun->shared()->unique_id());
+      Map::TraceAllTransitions(object_fun->initial_map());
+    }
+#endif
     Handle<GlobalObject> global_object;
     Handle<JSGlobalProxy> global_proxy = CreateNewGlobals(
         global_proxy_template, maybe_global_proxy, &global_object);
index 0cc8486..9d4f270 100644 (file)
@@ -158,8 +158,8 @@ class NativesExternalStringResource FINAL
   NativesExternalStringResource(Bootstrapper* bootstrapper,
                                 const char* source,
                                 size_t length);
-  virtual const char* data() const OVERRIDE { return data_; }
-  virtual size_t length() const OVERRIDE { return length_; }
+  const char* data() const OVERRIDE { return data_; }
+  size_t length() const OVERRIDE { return length_; }
 
  private:
   const char* data_;
index 5eefd27..b8d0b42 100644 (file)
@@ -197,7 +197,6 @@ static bool ArrayPrototypeHasNoElements(Heap* heap, PrototypeIterator* iter) {
 
 static inline bool IsJSArrayFastElementMovingAllowed(Heap* heap,
                                                      JSArray* receiver) {
-  if (!FLAG_clever_optimizations) return false;
   DisallowHeapAllocation no_gc;
   PrototypeIterator iter(heap->isolate(), receiver);
   return ArrayPrototypeHasNoElements(heap, &iter);
@@ -420,6 +419,10 @@ BUILTIN(ArrayPop) {
   int len = Smi::cast(array->length())->value();
   if (len == 0) return isolate->heap()->undefined_value();
 
+  if (JSArray::HasReadOnlyLength(array)) {
+    return CallJsBuiltin(isolate, "ArrayPop", args);
+  }
+
   ElementsAccessor* accessor = array->GetElementsAccessor();
   int new_length = len - 1;
   Handle<Object> element =
@@ -451,6 +454,10 @@ BUILTIN(ArrayShift) {
   int len = Smi::cast(array->length())->value();
   if (len == 0) return heap->undefined_value();
 
+  if (JSArray::HasReadOnlyLength(array)) {
+    return CallJsBuiltin(isolate, "ArrayShift", args);
+  }
+
   // Get first element
   ElementsAccessor* accessor = array->GetElementsAccessor();
   Handle<Object> first =
@@ -756,6 +763,11 @@ BUILTIN(ArraySplice) {
     return CallJsBuiltin(isolate, "ArraySplice", args);
   }
 
+  if (new_length != len && JSArray::HasReadOnlyLength(array)) {
+    AllowHeapAllocation allow_allocation;
+    return CallJsBuiltin(isolate, "ArraySplice", args);
+  }
+
   if (new_length == 0) {
     Handle<JSArray> result = isolate->factory()->NewJSArrayWithElements(
         elms_obj, elements_kind, actual_delete_count);
index 6303855..6ba64c1 100644 (file)
@@ -7,12 +7,6 @@
 
 #include "src/base/logging.h"
 
-#ifdef DEBUG
-#ifndef OPTIMIZED_DEBUG
-#define ENABLE_SLOW_DCHECKS    1
-#endif
-#endif
-
 namespace v8 {
 
 class Value;
index dc527d7..800a09d 100644 (file)
@@ -8,6 +8,7 @@
 #include "src/code-stubs.h"
 #include "src/field-index.h"
 #include "src/hydrogen.h"
+#include "src/ic/ic.h"
 #include "src/lithium.h"
 
 namespace v8 {
@@ -34,11 +35,11 @@ static LChunk* OptimizeGraph(HGraph* graph) {
 
 class CodeStubGraphBuilderBase : public HGraphBuilder {
  public:
-  CodeStubGraphBuilderBase(Isolate* isolate, HydrogenCodeStub* stub)
-      : HGraphBuilder(&info_),
+  explicit CodeStubGraphBuilderBase(CompilationInfoWithZone* info)
+      : HGraphBuilder(info),
         arguments_length_(NULL),
-        info_(stub, isolate),
-        descriptor_(stub),
+        info_(info),
+        descriptor_(info->code_stub()),
         context_(NULL) {
     int parameter_count = descriptor_.GetEnvironmentParameterCount();
     parameters_.Reset(new HParameter*[parameter_count]);
@@ -56,10 +57,10 @@ class CodeStubGraphBuilderBase : public HGraphBuilder {
     DCHECK(arguments_length_ != NULL);
     return arguments_length_;
   }
-  CompilationInfo* info() { return &info_; }
-  HydrogenCodeStub* stub() { return info_.code_stub(); }
+  CompilationInfo* info() { return info_; }
+  HydrogenCodeStub* stub() { return info_->code_stub(); }
   HContext* context() { return context_; }
-  Isolate* isolate() { return info_.isolate(); }
+  Isolate* isolate() { return info_->isolate(); }
 
   HLoadNamedField* BuildLoadNamedField(HValue* object,
                                        FieldIndex index);
@@ -99,6 +100,21 @@ class CodeStubGraphBuilderBase : public HGraphBuilder {
                                         HValue* shared_info,
                                         HValue* native_context);
 
+  // Tail calls handler found at array[map_index + 1].
+  void TailCallHandler(HValue* receiver, HValue* name, HValue* array,
+                       HValue* map_index, HValue* slot, HValue* vector);
+
+  // Tail calls handler_code.
+  void TailCallHandler(HValue* receiver, HValue* name, HValue* slot,
+                       HValue* vector, HValue* handler_code);
+
+  void TailCallMiss(HValue* receiver, HValue* name, HValue* slot,
+                    HValue* vector, bool keyed_load);
+
+  // Handle MONOMORPHIC and POLYMORPHIC LoadIC and KeyedLoadIC cases.
+  void HandleArrayCases(HValue* array, HValue* receiver, HValue* name,
+                        HValue* slot, HValue* vector, bool keyed_load);
+
  private:
   HValue* BuildArraySingleArgumentConstructor(JSArrayBuilder* builder);
   HValue* BuildArrayNArgumentsConstructor(JSArrayBuilder* builder,
@@ -106,7 +122,7 @@ class CodeStubGraphBuilderBase : public HGraphBuilder {
 
   SmartArrayPointer<HParameter*> parameters_;
   HValue* arguments_length_;
-  CompilationInfoWithZone info_;
+  CompilationInfoWithZone* info_;
   CodeStubDescriptor descriptor_;
   HContext* context_;
 };
@@ -120,7 +136,7 @@ bool CodeStubGraphBuilderBase::BuildGraph() {
     const char* name = CodeStub::MajorName(stub()->MajorKey(), false);
     PrintF("-----------------------------------------------------------\n");
     PrintF("Compiling stub %s using hydrogen\n", name);
-    isolate()->GetHTracer()->TraceCompilation(&info_);
+    isolate()->GetHTracer()->TraceCompilation(info());
   }
 
   int param_count = descriptor_.GetEnvironmentParameterCount();
@@ -189,8 +205,8 @@ bool CodeStubGraphBuilderBase::BuildGraph() {
 template <class Stub>
 class CodeStubGraphBuilder: public CodeStubGraphBuilderBase {
  public:
-  CodeStubGraphBuilder(Isolate* isolate, Stub* stub)
-      : CodeStubGraphBuilderBase(isolate, stub) {}
+  explicit CodeStubGraphBuilder(CompilationInfoWithZone* info)
+      : CodeStubGraphBuilderBase(info) {}
 
  protected:
   virtual HValue* BuildCodeStub() {
@@ -271,7 +287,8 @@ static Handle<Code> DoGenerateCode(Stub* stub) {
   if (FLAG_profile_hydrogen_code_stub_compilation) {
     timer.Start();
   }
-  CodeStubGraphBuilder<Stub> builder(isolate, stub);
+  CompilationInfoWithZone info(stub, isolate);
+  CodeStubGraphBuilder<Stub> builder(&info);
   LChunk* chunk = OptimizeGraph(builder.CreateGraph());
   Handle<Code> code = chunk->Codegen();
   if (FLAG_profile_hydrogen_code_stub_compilation) {
@@ -306,10 +323,8 @@ HValue* CodeStubGraphBuilder<FastCloneShallowArrayStub>::BuildCodeStub() {
   // so that it doesn't build and eager frame.
   info()->MarkMustNotHaveEagerFrame();
 
-  HInstruction* allocation_site = Add<HLoadKeyed>(GetParameter(0),
-                                                  GetParameter(1),
-                                                  static_cast<HValue*>(NULL),
-                                                  FAST_ELEMENTS);
+  HInstruction* allocation_site =
+      Add<HLoadKeyed>(GetParameter(0), GetParameter(1), nullptr, FAST_ELEMENTS);
   IfBuilder checker(this);
   checker.IfNot<HCompareObjectEqAndBranch, HValue*>(allocation_site,
                                                     undefined);
@@ -317,8 +332,8 @@ HValue* CodeStubGraphBuilder<FastCloneShallowArrayStub>::BuildCodeStub() {
 
   HObjectAccess access = HObjectAccess::ForAllocationSiteOffset(
       AllocationSite::kTransitionInfoOffset);
-  HInstruction* boilerplate = Add<HLoadNamedField>(
-      allocation_site, static_cast<HValue*>(NULL), access);
+  HInstruction* boilerplate =
+      Add<HLoadNamedField>(allocation_site, nullptr, access);
   HValue* elements = AddLoadElements(boilerplate);
   HValue* capacity = AddLoadFixedArrayLength(elements);
   IfBuilder zero_capacity(this);
@@ -370,10 +385,8 @@ template <>
 HValue* CodeStubGraphBuilder<FastCloneShallowObjectStub>::BuildCodeStub() {
   HValue* undefined = graph()->GetConstantUndefined();
 
-  HInstruction* allocation_site = Add<HLoadKeyed>(GetParameter(0),
-                                                  GetParameter(1),
-                                                  static_cast<HValue*>(NULL),
-                                                  FAST_ELEMENTS);
+  HInstruction* allocation_site =
+      Add<HLoadKeyed>(GetParameter(0), GetParameter(1), nullptr, FAST_ELEMENTS);
 
   IfBuilder checker(this);
   checker.IfNot<HCompareObjectEqAndBranch, HValue*>(allocation_site,
@@ -382,8 +395,8 @@ HValue* CodeStubGraphBuilder<FastCloneShallowObjectStub>::BuildCodeStub() {
 
   HObjectAccess access = HObjectAccess::ForAllocationSiteOffset(
       AllocationSite::kTransitionInfoOffset);
-  HInstruction* boilerplate = Add<HLoadNamedField>(
-      allocation_site, static_cast<HValue*>(NULL), access);
+  HInstruction* boilerplate =
+      Add<HLoadNamedField>(allocation_site, nullptr, access);
 
   int length = casted_stub()->length();
   if (length == 0) {
@@ -396,12 +409,10 @@ HValue* CodeStubGraphBuilder<FastCloneShallowObjectStub>::BuildCodeStub() {
     size += AllocationMemento::kSize;
   }
 
-  HValue* boilerplate_map = Add<HLoadNamedField>(
-      boilerplate, static_cast<HValue*>(NULL),
-      HObjectAccess::ForMap());
+  HValue* boilerplate_map =
+      Add<HLoadNamedField>(boilerplate, nullptr, HObjectAccess::ForMap());
   HValue* boilerplate_size = Add<HLoadNamedField>(
-      boilerplate_map, static_cast<HValue*>(NULL),
-      HObjectAccess::ForMapInstanceSize());
+      boilerplate_map, nullptr, HObjectAccess::ForMapInstanceSize());
   HValue* size_in_words = Add<HConstant>(object_size >> kPointerSizeLog2);
   checker.If<HCompareNumericAndBranch>(boilerplate_size,
                                        size_in_words, Token::EQ);
@@ -414,9 +425,8 @@ HValue* CodeStubGraphBuilder<FastCloneShallowObjectStub>::BuildCodeStub() {
 
   for (int i = 0; i < object_size; i += kPointerSize) {
     HObjectAccess access = HObjectAccess::ForObservableJSObjectOffset(i);
-    Add<HStoreNamedField>(
-        object, access, Add<HLoadNamedField>(
-            boilerplate, static_cast<HValue*>(NULL), access));
+    Add<HStoreNamedField>(object, access,
+                          Add<HLoadNamedField>(boilerplate, nullptr, access));
   }
 
   DCHECK(FLAG_allocation_site_pretenuring || (size == object_size));
@@ -485,9 +495,8 @@ HValue* CodeStubGraphBuilder<CreateAllocationSiteStub>::BuildCodeStub() {
   // Link the object to the allocation site list
   HValue* site_list = Add<HConstant>(
       ExternalReference::allocation_sites_list_address(isolate()));
-  HValue* site = Add<HLoadNamedField>(
-      site_list, static_cast<HValue*>(NULL),
-      HObjectAccess::ForAllocationSiteList());
+  HValue* site = Add<HLoadNamedField>(site_list, nullptr,
+                                      HObjectAccess::ForAllocationSiteList());
   // TODO(mvstanton): This is a store to a weak pointer, which we may want to
   // mark as such in order to skip the write barrier, once we have a unified
   // system for weakness. For now we decided to keep it like this because having
@@ -514,6 +523,40 @@ Handle<Code> CreateAllocationSiteStub::GenerateCode() {
 
 
 template <>
+HValue* CodeStubGraphBuilder<LoadScriptContextFieldStub>::BuildCodeStub() {
+  int context_index = casted_stub()->context_index();
+  int slot_index = casted_stub()->slot_index();
+
+  HValue* script_context = BuildGetScriptContext(context_index);
+  return Add<HLoadNamedField>(script_context, nullptr,
+                              HObjectAccess::ForContextSlot(slot_index));
+}
+
+
+Handle<Code> LoadScriptContextFieldStub::GenerateCode() {
+  return DoGenerateCode(this);
+}
+
+
+template <>
+HValue* CodeStubGraphBuilder<StoreScriptContextFieldStub>::BuildCodeStub() {
+  int context_index = casted_stub()->context_index();
+  int slot_index = casted_stub()->slot_index();
+
+  HValue* script_context = BuildGetScriptContext(context_index);
+  Add<HStoreNamedField>(script_context,
+                        HObjectAccess::ForContextSlot(slot_index),
+                        GetParameter(2), STORE_TO_INITIALIZED_ENTRY);
+  return GetParameter(2);
+}
+
+
+Handle<Code> StoreScriptContextFieldStub::GenerateCode() {
+  return DoGenerateCode(this);
+}
+
+
+template <>
 HValue* CodeStubGraphBuilder<LoadFastElementStub>::BuildCodeStub() {
   HInstruction* load = BuildUncheckedMonomorphicElementAccess(
       GetParameter(LoadDescriptor::kReceiverIndex),
@@ -538,15 +581,15 @@ HLoadNamedField* CodeStubGraphBuilderBase::BuildLoadNamedField(
   HObjectAccess access = index.is_inobject()
       ? HObjectAccess::ForObservableJSObjectOffset(offset, representation)
       : HObjectAccess::ForBackingStoreOffset(offset, representation);
-  if (index.is_double()) {
+  if (index.is_double() &&
+      (!FLAG_unbox_double_fields || !index.is_inobject())) {
     // Load the heap number.
     object = Add<HLoadNamedField>(
-        object, static_cast<HValue*>(NULL),
-        access.WithRepresentation(Representation::Tagged()));
+        object, nullptr, access.WithRepresentation(Representation::Tagged()));
     // Load the double value from it.
     access = HObjectAccess::ForHeapNumberValue();
   }
-  return Add<HLoadNamedField>(object, static_cast<HValue*>(NULL), access);
+  return Add<HLoadNamedField>(object, nullptr, access);
 }
 
 
@@ -566,12 +609,10 @@ HValue* CodeStubGraphBuilder<LoadConstantStub>::BuildCodeStub() {
   HValue* map = AddLoadMap(GetParameter(0), NULL);
   HObjectAccess descriptors_access = HObjectAccess::ForObservableJSObjectOffset(
       Map::kDescriptorsOffset, Representation::Tagged());
-  HValue* descriptors =
-      Add<HLoadNamedField>(map, static_cast<HValue*>(NULL), descriptors_access);
+  HValue* descriptors = Add<HLoadNamedField>(map, nullptr, descriptors_access);
   HObjectAccess value_access = HObjectAccess::ForObservableJSObjectOffset(
       DescriptorArray::GetValueOffset(casted_stub()->constant_index()));
-  return Add<HLoadNamedField>(descriptors, static_cast<HValue*>(NULL),
-                              value_access);
+  return Add<HLoadNamedField>(descriptors, nullptr, value_access);
 }
 
 
@@ -580,20 +621,19 @@ Handle<Code> LoadConstantStub::GenerateCode() { return DoGenerateCode(this); }
 
 HValue* CodeStubGraphBuilderBase::UnmappedCase(HValue* elements, HValue* key) {
   HValue* result;
-  HInstruction* backing_store = Add<HLoadKeyed>(
-      elements, graph()->GetConstant1(), static_cast<HValue*>(NULL),
-      FAST_ELEMENTS, ALLOW_RETURN_HOLE);
+  HInstruction* backing_store =
+      Add<HLoadKeyed>(elements, graph()->GetConstant1(), nullptr, FAST_ELEMENTS,
+                      ALLOW_RETURN_HOLE);
   Add<HCheckMaps>(backing_store, isolate()->factory()->fixed_array_map());
-  HValue* backing_store_length =
-      Add<HLoadNamedField>(backing_store, static_cast<HValue*>(NULL),
-                           HObjectAccess::ForFixedArrayLength());
+  HValue* backing_store_length = Add<HLoadNamedField>(
+      backing_store, nullptr, HObjectAccess::ForFixedArrayLength());
   IfBuilder in_unmapped_range(this);
   in_unmapped_range.If<HCompareNumericAndBranch>(key, backing_store_length,
                                                  Token::LT);
   in_unmapped_range.Then();
   {
-    result = Add<HLoadKeyed>(backing_store, key, static_cast<HValue*>(NULL),
-                             FAST_HOLEY_ELEMENTS, NEVER_RETURN_HOLE);
+    result = Add<HLoadKeyed>(backing_store, key, nullptr, FAST_HOLEY_ELEMENTS,
+                             NEVER_RETURN_HOLE);
   }
   in_unmapped_range.ElseDeopt("Outside of range");
   in_unmapped_range.End();
@@ -640,19 +680,17 @@ HValue* CodeStubGraphBuilder<KeyedLoadSloppyArgumentsStub>::BuildCodeStub() {
   positive_smi.End();
 
   HValue* constant_two = Add<HConstant>(2);
-  HValue* elements = AddLoadElements(receiver, static_cast<HValue*>(NULL));
-  HValue* elements_length =
-      Add<HLoadNamedField>(elements, static_cast<HValue*>(NULL),
-                           HObjectAccess::ForFixedArrayLength());
+  HValue* elements = AddLoadElements(receiver, nullptr);
+  HValue* elements_length = Add<HLoadNamedField>(
+      elements, nullptr, HObjectAccess::ForFixedArrayLength());
   HValue* adjusted_length = AddUncasted<HSub>(elements_length, constant_two);
   IfBuilder in_range(this);
   in_range.If<HCompareNumericAndBranch>(key, adjusted_length, Token::LT);
   in_range.Then();
   {
     HValue* index = AddUncasted<HAdd>(key, constant_two);
-    HInstruction* mapped_index =
-        Add<HLoadKeyed>(elements, index, static_cast<HValue*>(NULL),
-                        FAST_HOLEY_ELEMENTS, ALLOW_RETURN_HOLE);
+    HInstruction* mapped_index = Add<HLoadKeyed>(
+        elements, index, nullptr, FAST_HOLEY_ELEMENTS, ALLOW_RETURN_HOLE);
 
     IfBuilder is_valid(this);
     is_valid.IfNot<HCompareObjectEqAndBranch>(mapped_index,
@@ -662,13 +700,11 @@ HValue* CodeStubGraphBuilder<KeyedLoadSloppyArgumentsStub>::BuildCodeStub() {
       // TODO(mvstanton): I'd like to assert from this point, that if the
       // mapped_index is not the hole that it is indeed, a smi. An unnecessary
       // smi check is being emitted.
-      HValue* the_context =
-          Add<HLoadKeyed>(elements, graph()->GetConstant0(),
-                          static_cast<HValue*>(NULL), FAST_ELEMENTS);
+      HValue* the_context = Add<HLoadKeyed>(elements, graph()->GetConstant0(),
+                                            nullptr, FAST_ELEMENTS);
       DCHECK(Context::kHeaderSize == FixedArray::kHeaderSize);
-      HValue* result =
-          Add<HLoadKeyed>(the_context, mapped_index, static_cast<HValue*>(NULL),
-                          FAST_ELEMENTS, ALLOW_RETURN_HOLE);
+      HValue* result = Add<HLoadKeyed>(the_context, mapped_index, nullptr,
+                                       FAST_ELEMENTS, ALLOW_RETURN_HOLE);
       environment()->Push(result);
     }
     is_valid.Else();
@@ -705,30 +741,31 @@ void CodeStubGraphBuilderBase::BuildStoreNamedField(
           : HObjectAccess::ForBackingStoreOffset(offset, representation);
 
   if (representation.IsDouble()) {
-    HObjectAccess heap_number_access =
-        access.WithRepresentation(Representation::Tagged());
-    if (transition_to_field) {
-      // The store requires a mutable HeapNumber to be allocated.
-      NoObservableSideEffectsScope no_side_effects(this);
-      HInstruction* heap_number_size = Add<HConstant>(HeapNumber::kSize);
-
-      // TODO(hpayer): Allocation site pretenuring support.
-      HInstruction* heap_number =
-          Add<HAllocate>(heap_number_size, HType::HeapObject(), NOT_TENURED,
-                         MUTABLE_HEAP_NUMBER_TYPE);
-      AddStoreMapConstant(heap_number,
-                          isolate()->factory()->mutable_heap_number_map());
-      Add<HStoreNamedField>(heap_number, HObjectAccess::ForHeapNumberValue(),
-                            value);
-      // Store the new mutable heap number into the object.
-      access = heap_number_access;
-      value = heap_number;
-    } else {
-      // Load the heap number.
-      object = Add<HLoadNamedField>(object, static_cast<HValue*>(NULL),
-                                    heap_number_access);
-      // Store the double value into it.
-      access = HObjectAccess::ForHeapNumberValue();
+    if (!FLAG_unbox_double_fields || !index.is_inobject()) {
+      HObjectAccess heap_number_access =
+          access.WithRepresentation(Representation::Tagged());
+      if (transition_to_field) {
+        // The store requires a mutable HeapNumber to be allocated.
+        NoObservableSideEffectsScope no_side_effects(this);
+        HInstruction* heap_number_size = Add<HConstant>(HeapNumber::kSize);
+
+        // TODO(hpayer): Allocation site pretenuring support.
+        HInstruction* heap_number =
+            Add<HAllocate>(heap_number_size, HType::HeapObject(), NOT_TENURED,
+                           MUTABLE_HEAP_NUMBER_TYPE);
+        AddStoreMapConstant(heap_number,
+                            isolate()->factory()->mutable_heap_number_map());
+        Add<HStoreNamedField>(heap_number, HObjectAccess::ForHeapNumberValue(),
+                              value);
+        // Store the new mutable heap number into the object.
+        access = heap_number_access;
+        value = heap_number;
+      } else {
+        // Load the heap number.
+        object = Add<HLoadNamedField>(object, nullptr, heap_number_access);
+        // Store the double value into it.
+        access = HObjectAccess::ForHeapNumberValue();
+      }
     }
   } else if (representation.IsHeapObject()) {
     BuildCheckHeapObject(value);
@@ -755,9 +792,8 @@ HValue* CodeStubGraphBuilder<StoreTransitionStub>::BuildCodeStub() {
 
   switch (casted_stub()->store_mode()) {
     case StoreTransitionStub::ExtendStorageAndStoreMapAndValue: {
-      HValue* properties =
-          Add<HLoadNamedField>(object, static_cast<HValue*>(NULL),
-                               HObjectAccess::ForPropertiesPointer());
+      HValue* properties = Add<HLoadNamedField>(
+          object, nullptr, HObjectAccess::ForPropertiesPointer());
       HValue* length = AddLoadFixedArrayLength(properties);
       HValue* delta =
           Add<HConstant>(static_cast<int32_t>(JSObject::kFieldsAdded));
@@ -1066,7 +1102,7 @@ HValue* CodeStubGraphBuilder<CompareNilICStub>::BuildCodeInitializedStub() {
   HIfContinuation continuation;
   Handle<Map> sentinel_map(isolate->heap()->meta_map());
   Type* type = stub->GetType(zone(), sentinel_map);
-  BuildCompareNil(GetParameter(0), type, &continuation);
+  BuildCompareNil(GetParameter(0), type, &continuation, kEmbedMapsViaWeakCells);
   IfBuilder if_nil(this, &continuation);
   if_nil.Then();
   if (continuation.IsFalseReachable()) {
@@ -1291,8 +1327,7 @@ HValue* CodeStubGraphBuilder<StoreGlobalStub>::BuildCodeInitializedStub() {
 
   HValue* cell = Add<HConstant>(placeholder_cell);
   HObjectAccess access(HObjectAccess::ForCellPayload(isolate()));
-  HValue* cell_contents = Add<HLoadNamedField>(
-      cell, static_cast<HValue*>(NULL), access);
+  HValue* cell_contents = Add<HLoadNamedField>(cell, nullptr, access);
 
   if (stub->is_constant()) {
     IfBuilder builder(this);
@@ -1390,7 +1425,7 @@ void CodeStubGraphBuilderBase::BuildCheckAndInstallOptimizedCode(
 
   // Now link a function into a list of optimized functions.
   HValue* optimized_functions_list = Add<HLoadNamedField>(
-      native_context, static_cast<HValue*>(NULL),
+      native_context, nullptr,
       HObjectAccess::ForContextSlot(Context::OPTIMIZED_FUNCTIONS_LIST));
   Add<HStoreNamedField>(js_function,
                         HObjectAccess::ForNextFunctionLinkPointer(),
@@ -1410,8 +1445,8 @@ void CodeStubGraphBuilderBase::BuildInstallCode(HValue* js_function,
   Add<HStoreNamedField>(js_function,
                         HObjectAccess::ForNextFunctionLinkPointer(),
                         graph()->GetConstantUndefined());
-  HValue* code_object = Add<HLoadNamedField>(
-      shared_info, static_cast<HValue*>(NULL), HObjectAccess::ForCodeOffset());
+  HValue* code_object = Add<HLoadNamedField>(shared_info, nullptr,
+                                             HObjectAccess::ForCodeOffset());
   Add<HStoreCodeEntry>(js_function, code_object);
 }
 
@@ -1428,8 +1463,8 @@ HInstruction* CodeStubGraphBuilderBase::LoadFromOptimizedCodeMap(
     HValue* field_offset_value = Add<HConstant>(field_offset);
     field_slot = AddUncasted<HAdd>(iterator, field_offset_value);
   }
-  HInstruction* field_entry = Add<HLoadKeyed>(optimized_map, field_slot,
-      static_cast<HValue*>(NULL), FAST_ELEMENTS);
+  HInstruction* field_entry =
+      Add<HLoadKeyed>(optimized_map, field_slot, nullptr, FAST_ELEMENTS);
   return field_entry;
 }
 
@@ -1441,8 +1476,7 @@ void CodeStubGraphBuilderBase::BuildInstallFromOptimizedCodeMap(
   Counters* counters = isolate()->counters();
   IfBuilder is_optimized(this);
   HInstruction* optimized_map = Add<HLoadNamedField>(
-      shared_info, static_cast<HValue*>(NULL),
-      HObjectAccess::ForOptimizedCodeMap());
+      shared_info, nullptr, HObjectAccess::ForOptimizedCodeMap());
   HValue* null_constant = Add<HConstant>(0);
   is_optimized.If<HCompareObjectEqAndBranch>(optimized_map, null_constant);
   is_optimized.Then();
@@ -1475,8 +1509,7 @@ void CodeStubGraphBuilderBase::BuildInstallFromOptimizedCodeMap(
                                LoopBuilder::kPostDecrement,
                                shared_function_entry_length);
       HValue* array_length = Add<HLoadNamedField>(
-          optimized_map, static_cast<HValue*>(NULL),
-          HObjectAccess::ForFixedArrayLength());
+          optimized_map, nullptr, HObjectAccess::ForFixedArrayLength());
       HValue* start_pos = AddUncasted<HSub>(array_length,
                                             shared_function_entry_length);
       HValue* slot_iterator = loop_builder.BeginBody(start_pos,
@@ -1520,8 +1553,8 @@ HValue* CodeStubGraphBuilder<FastNewClosureStub>::BuildCodeStub() {
 
   // Create a new closure from the given function info in new space
   HValue* size = Add<HConstant>(JSFunction::kSize);
-  HInstruction* js_function = Add<HAllocate>(size, HType::JSObject(),
-                                             NOT_TENURED, JS_FUNCTION_TYPE);
+  HInstruction* js_function =
+      Add<HAllocate>(size, HType::JSObject(), NOT_TENURED, JS_FUNCTION_TYPE);
 
   int map_index = Context::FunctionMapIndex(casted_stub()->strict_mode(),
                                             casted_stub()->kind());
@@ -1530,8 +1563,7 @@ HValue* CodeStubGraphBuilder<FastNewClosureStub>::BuildCodeStub() {
   // as the map of the allocated object.
   HInstruction* native_context = BuildGetNativeContext();
   HInstruction* map_slot_value = Add<HLoadNamedField>(
-      native_context, static_cast<HValue*>(NULL),
-      HObjectAccess::ForContextSlot(map_index));
+      native_context, nullptr, HObjectAccess::ForContextSlot(map_index));
   Add<HStoreNamedField>(js_function, HObjectAccess::ForMap(), map_slot_value);
 
   // Initialize the rest of the function.
@@ -1543,9 +1575,8 @@ HValue* CodeStubGraphBuilder<FastNewClosureStub>::BuildCodeStub() {
                         empty_fixed_array);
   Add<HStoreNamedField>(js_function, HObjectAccess::ForPrototypeOrInitialMap(),
                         graph()->GetConstantHole());
-  Add<HStoreNamedField>(js_function,
-                        HObjectAccess::ForSharedFunctionInfoPointer(),
-                        shared_info);
+  Add<HStoreNamedField>(
+      js_function, HObjectAccess::ForSharedFunctionInfoPointer(), shared_info);
   Add<HStoreNamedField>(js_function, HObjectAccess::ForFunctionContextPointer(),
                         context());
 
@@ -1599,7 +1630,7 @@ HValue* CodeStubGraphBuilder<FastNewContextStub>::BuildCodeStub() {
 
   // Copy the global object from the previous context.
   HValue* global_object = Add<HLoadNamedField>(
-      context(), static_cast<HValue*>(NULL),
+      context(), nullptr,
       HObjectAccess::ForContextSlot(Context::GLOBAL_OBJECT_INDEX));
   Add<HStoreNamedField>(function_context,
                         HObjectAccess::ForContextSlot(
@@ -1664,8 +1695,8 @@ template <>
 class CodeStubGraphBuilder<KeyedLoadGenericStub>
     : public CodeStubGraphBuilderBase {
  public:
-  CodeStubGraphBuilder(Isolate* isolate, KeyedLoadGenericStub* stub)
-      : CodeStubGraphBuilderBase(isolate, stub) {}
+  explicit CodeStubGraphBuilder(CompilationInfoWithZone* info)
+      : CodeStubGraphBuilderBase(info) {}
 
  protected:
   virtual HValue* BuildCodeStub();
@@ -1764,16 +1795,14 @@ HValue* CodeStubGraphBuilder<KeyedLoadGenericStub>::BuildCodeStub() {
       (1 << Map::kHasIndexedInterceptor);
     BuildJSObjectCheck(receiver, bit_field_mask);
 
-    HValue* map = Add<HLoadNamedField>(receiver, static_cast<HValue*>(NULL),
-                                       HObjectAccess::ForMap());
+    HValue* map =
+        Add<HLoadNamedField>(receiver, nullptr, HObjectAccess::ForMap());
 
     HValue* instance_type =
-      Add<HLoadNamedField>(map, static_cast<HValue*>(NULL),
-                           HObjectAccess::ForMapInstanceType());
+        Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapInstanceType());
 
-    HValue* bit_field2 = Add<HLoadNamedField>(map,
-                                              static_cast<HValue*>(NULL),
-                                              HObjectAccess::ForMapBitField2());
+    HValue* bit_field2 =
+        Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapBitField2());
 
     IfBuilder kind_if(this);
     BuildFastElementLoad(&kind_if, receiver, key, instance_type, bit_field2,
@@ -1863,12 +1892,10 @@ HValue* CodeStubGraphBuilder<KeyedLoadGenericStub>::BuildCodeStub() {
       BuildNonGlobalObjectCheck(receiver);
 
       HValue* properties = Add<HLoadNamedField>(
-          receiver, static_cast<HValue*>(NULL),
-          HObjectAccess::ForPropertiesPointer());
+          receiver, nullptr, HObjectAccess::ForPropertiesPointer());
 
       HValue* hash =
-          Add<HLoadNamedField>(key, static_cast<HValue*>(NULL),
-          HObjectAccess::ForNameHashField());
+          Add<HLoadNamedField>(key, nullptr, HObjectAccess::ForNameHashField());
 
       hash = AddUncasted<HShr>(hash, Add<HConstant>(Name::kHashShift));
 
@@ -1887,8 +1914,8 @@ HValue* CodeStubGraphBuilder<KeyedLoadGenericStub>::BuildCodeStub() {
           ExternalReference::keyed_lookup_cache_keys(isolate());
       HValue* cache_keys = Add<HConstant>(cache_keys_ref);
 
-      HValue* map = Add<HLoadNamedField>(receiver, static_cast<HValue*>(NULL),
-                                         HObjectAccess::ForMap());
+      HValue* map =
+          Add<HLoadNamedField>(receiver, nullptr, HObjectAccess::ForMap());
       HValue* base_index = AddUncasted<HMul>(hash, Add<HConstant>(2));
       base_index->ClearFlag(HValue::kCanOverflow);
 
@@ -1910,13 +1937,13 @@ HValue* CodeStubGraphBuilder<KeyedLoadGenericStub>::BuildCodeStub() {
               Add<HConstant>(probe_base + KeyedLookupCache::kKeyIndex));
           key_index->ClearFlag(HValue::kCanOverflow);
           HValue* map_to_check =
-              Add<HLoadKeyed>(cache_keys, map_index, static_cast<HValue*>(NULL),
-                              FAST_ELEMENTS, NEVER_RETURN_HOLE, 0);
+              Add<HLoadKeyed>(cache_keys, map_index, nullptr, FAST_ELEMENTS,
+                              NEVER_RETURN_HOLE, 0);
           lookup_if->If<HCompareObjectEqAndBranch>(map_to_check, map);
           lookup_if->And();
           HValue* key_to_check =
-              Add<HLoadKeyed>(cache_keys, key_index, static_cast<HValue*>(NULL),
-                              FAST_ELEMENTS, NEVER_RETURN_HOLE, 0);
+              Add<HLoadKeyed>(cache_keys, key_index, nullptr, FAST_ELEMENTS,
+                              NEVER_RETURN_HOLE, 0);
           lookup_if->If<HCompareObjectEqAndBranch>(key_to_check, key);
           lookup_if->Then();
           {
@@ -1926,9 +1953,9 @@ HValue* CodeStubGraphBuilder<KeyedLoadGenericStub>::BuildCodeStub() {
                 Add<HConstant>(cache_field_offsets_ref);
             HValue* index = AddUncasted<HAdd>(hash, Add<HConstant>(probe));
             index->ClearFlag(HValue::kCanOverflow);
-            HValue* property_index = Add<HLoadKeyed>(
-                cache_field_offsets, index, static_cast<HValue*>(NULL),
-                EXTERNAL_INT32_ELEMENTS, NEVER_RETURN_HOLE, 0);
+            HValue* property_index =
+                Add<HLoadKeyed>(cache_field_offsets, index, nullptr,
+                                EXTERNAL_INT32_ELEMENTS, NEVER_RETURN_HOLE, 0);
             Push(property_index);
           }
           lookup_if->Else();
@@ -1967,11 +1994,129 @@ Handle<Code> KeyedLoadGenericStub::GenerateCode() {
 }
 
 
+void CodeStubGraphBuilderBase::TailCallHandler(HValue* receiver, HValue* name,
+                                               HValue* array, HValue* map_index,
+                                               HValue* slot, HValue* vector) {
+  // The handler is at array[map_index + 1]. Compute this with a custom offset
+  // to HLoadKeyed.
+  int offset =
+      GetDefaultHeaderSizeForElementsKind(FAST_ELEMENTS) + kPointerSize;
+  HValue* handler_code = Add<HLoadKeyed>(
+      array, map_index, nullptr, FAST_ELEMENTS, NEVER_RETURN_HOLE, offset);
+  TailCallHandler(receiver, name, slot, vector, handler_code);
+}
+
+
+void CodeStubGraphBuilderBase::TailCallHandler(HValue* receiver, HValue* name,
+                                               HValue* slot, HValue* vector,
+                                               HValue* handler_code) {
+  VectorLoadICDescriptor descriptor(isolate());
+  HValue* op_vals[] = {context(), receiver, name, slot, vector};
+  Add<HCallWithDescriptor>(handler_code, 0, descriptor,
+                           Vector<HValue*>(op_vals, 5), TAIL_CALL);
+  // We never return here, it is a tail call.
+}
+
+
+void CodeStubGraphBuilderBase::TailCallMiss(HValue* receiver, HValue* name,
+                                            HValue* slot, HValue* vector,
+                                            bool keyed_load) {
+  DCHECK(FLAG_vector_ics);
+  Add<HTailCallThroughMegamorphicCache>(
+      receiver, name, slot, vector,
+      HTailCallThroughMegamorphicCache::ComputeFlags(keyed_load, true));
+  // We never return here, it is a tail call.
+}
+
+
+void CodeStubGraphBuilderBase::HandleArrayCases(HValue* array, HValue* receiver,
+                                                HValue* name, HValue* slot,
+                                                HValue* vector,
+                                                bool keyed_load) {
+  IfBuilder if_receiver_heap_object(this);
+  if_receiver_heap_object.IfNot<HIsSmiAndBranch>(receiver);
+  if_receiver_heap_object.Then();
+  {
+    HConstant* constant_two = Add<HConstant>(2);
+    HConstant* constant_three = Add<HConstant>(3);
+
+    HValue* receiver_map = AddLoadMap(receiver, nullptr);
+    HValue* start =
+        keyed_load ? graph()->GetConstant1() : graph()->GetConstant0();
+    HValue* weak_cell = Add<HLoadKeyed>(array, start, nullptr, FAST_ELEMENTS,
+                                        ALLOW_RETURN_HOLE);
+    // Load the weak cell value. It may be Smi(0), or a map. Compare nonetheless
+    // against the receiver_map.
+    HValue* array_map = Add<HLoadNamedField>(weak_cell, nullptr,
+                                             HObjectAccess::ForWeakCellValue());
+
+    IfBuilder if_correct_map(this);
+    if_correct_map.If<HCompareObjectEqAndBranch>(receiver_map, array_map);
+    if_correct_map.Then();
+    { TailCallHandler(receiver, name, array, start, slot, vector); }
+    if_correct_map.Else();
+    {
+      // If our array has more elements, the ic is polymorphic. Look for the
+      // receiver map in the rest of the array.
+      HValue* length = AddLoadFixedArrayLength(array, nullptr);
+      LoopBuilder builder(this, context(), LoopBuilder::kPostIncrement,
+                          constant_two);
+      start = keyed_load ? constant_three : constant_two;
+      HValue* key = builder.BeginBody(start, length, Token::LT);
+      {
+        HValue* weak_cell = Add<HLoadKeyed>(array, key, nullptr, FAST_ELEMENTS,
+                                            ALLOW_RETURN_HOLE);
+        HValue* array_map = Add<HLoadNamedField>(
+            weak_cell, nullptr, HObjectAccess::ForWeakCellValue());
+        IfBuilder if_correct_poly_map(this);
+        if_correct_poly_map.If<HCompareObjectEqAndBranch>(receiver_map,
+                                                          array_map);
+        if_correct_poly_map.Then();
+        { TailCallHandler(receiver, name, array, key, slot, vector); }
+      }
+      builder.EndBody();
+    }
+    if_correct_map.End();
+  }
+}
+
+
 template <>
 HValue* CodeStubGraphBuilder<VectorLoadStub>::BuildCodeStub() {
   HValue* receiver = GetParameter(VectorLoadICDescriptor::kReceiverIndex);
-  Add<HDeoptimize>("Always deopt", Deoptimizer::EAGER);
-  return receiver;
+  HValue* name = GetParameter(VectorLoadICDescriptor::kNameIndex);
+  HValue* slot = GetParameter(VectorLoadICDescriptor::kSlotIndex);
+  HValue* vector = GetParameter(VectorLoadICDescriptor::kVectorIndex);
+
+  // If the feedback is an array, then the IC is in the monomorphic or
+  // polymorphic state.
+  HValue* feedback =
+      Add<HLoadKeyed>(vector, slot, nullptr, FAST_ELEMENTS, ALLOW_RETURN_HOLE);
+  IfBuilder array_checker(this);
+  array_checker.If<HCompareMap>(feedback,
+                                isolate()->factory()->fixed_array_map());
+  array_checker.Then();
+  { HandleArrayCases(feedback, receiver, name, slot, vector, false); }
+  array_checker.Else();
+  {
+    // Is the IC megamorphic?
+    IfBuilder mega_checker(this);
+    HConstant* megamorphic_symbol =
+        Add<HConstant>(isolate()->factory()->megamorphic_symbol());
+    mega_checker.If<HCompareObjectEqAndBranch>(feedback, megamorphic_symbol);
+    mega_checker.Then();
+    {
+      // Probe the stub cache.
+      Add<HTailCallThroughMegamorphicCache>(
+          receiver, name, slot, vector,
+          HTailCallThroughMegamorphicCache::ComputeFlags(false, false));
+    }
+    mega_checker.End();
+  }
+  array_checker.End();
+
+  TailCallMiss(receiver, name, slot, vector, false);
+  return graph()->GetConstant0();
 }
 
 
@@ -1981,8 +2126,65 @@ Handle<Code> VectorLoadStub::GenerateCode() { return DoGenerateCode(this); }
 template <>
 HValue* CodeStubGraphBuilder<VectorKeyedLoadStub>::BuildCodeStub() {
   HValue* receiver = GetParameter(VectorLoadICDescriptor::kReceiverIndex);
-  Add<HDeoptimize>("Always deopt", Deoptimizer::EAGER);
-  return receiver;
+  HValue* name = GetParameter(VectorLoadICDescriptor::kNameIndex);
+  HValue* slot = GetParameter(VectorLoadICDescriptor::kSlotIndex);
+  HValue* vector = GetParameter(VectorLoadICDescriptor::kVectorIndex);
+  HConstant* zero = graph()->GetConstant0();
+
+  // If the feedback is an array, then the IC is in the monomorphic or
+  // polymorphic state.
+  HValue* feedback =
+      Add<HLoadKeyed>(vector, slot, nullptr, FAST_ELEMENTS, ALLOW_RETURN_HOLE);
+  IfBuilder array_checker(this);
+  array_checker.If<HCompareMap>(feedback,
+                                isolate()->factory()->fixed_array_map());
+  array_checker.Then();
+  {
+    // If feedback[0] is 0, then the IC has element handlers and name should be
+    // a smi. If feedback[0] is a string, verify that it matches name.
+    HValue* recorded_name = Add<HLoadKeyed>(feedback, zero, nullptr,
+                                            FAST_ELEMENTS, ALLOW_RETURN_HOLE);
+
+    IfBuilder recorded_name_is_zero(this);
+    recorded_name_is_zero.If<HCompareObjectEqAndBranch>(recorded_name, zero);
+    recorded_name_is_zero.Then();
+    { Add<HCheckSmi>(name); }
+    recorded_name_is_zero.Else();
+    {
+      IfBuilder strings_match(this);
+      strings_match.IfNot<HCompareObjectEqAndBranch>(name, recorded_name);
+      strings_match.Then();
+      TailCallMiss(receiver, name, slot, vector, true);
+      strings_match.End();
+    }
+    recorded_name_is_zero.End();
+
+    HandleArrayCases(feedback, receiver, name, slot, vector, true);
+  }
+  array_checker.Else();
+  {
+    // Check if the IC is in generic state.
+    IfBuilder generic_checker(this);
+    HConstant* generic_symbol =
+        Add<HConstant>(isolate()->factory()->generic_symbol());
+    generic_checker.If<HCompareObjectEqAndBranch>(feedback, generic_symbol);
+    generic_checker.Then();
+    {
+      // Tail-call to the generic KeyedLoadIC, treating it like a handler.
+      Handle<Code> stub = KeyedLoadIC::generic_stub(isolate());
+      HValue* constant_stub = Add<HConstant>(stub);
+      LoadDescriptor descriptor(isolate());
+      HValue* op_vals[] = {context(), receiver, name};
+      Add<HCallWithDescriptor>(constant_stub, 0, descriptor,
+                               Vector<HValue*>(op_vals, 3), TAIL_CALL);
+      // We never return here, it is a tail call.
+    }
+    generic_checker.End();
+  }
+  array_checker.End();
+
+  TailCallMiss(receiver, name, slot, vector, true);
+  return zero;
 }
 
 
@@ -1998,14 +2200,15 @@ Handle<Code> MegamorphicLoadStub::GenerateCode() {
 
 template <>
 HValue* CodeStubGraphBuilder<MegamorphicLoadStub>::BuildCodeStub() {
-  // The return address is on the stack.
   HValue* receiver = GetParameter(LoadDescriptor::kReceiverIndex);
   HValue* name = GetParameter(LoadDescriptor::kNameIndex);
 
+  // We shouldn't generate this when FLAG_vector_ics is true because the
+  // megamorphic case is handled as part of the default stub.
+  DCHECK(!FLAG_vector_ics);
+
   // Probe the stub cache.
-  Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
-      Code::ComputeHandlerFlags(Code::LOAD_IC));
-  Add<HTailCallThroughMegamorphicCache>(receiver, name, flags);
+  Add<HTailCallThroughMegamorphicCache>(receiver, name);
 
   // We never continue.
   return graph()->GetConstant0();
index 7442f3e..895569d 100644 (file)
@@ -75,7 +75,6 @@ bool CodeStub::FindCodeInCache(Code** code_out) {
 
 
 void CodeStub::RecordCodeGeneration(Handle<Code> code) {
-  IC::RegisterWeakMapDependency(code);
   std::ostringstream os;
   os << *this;
   PROFILE(isolate(),
index 06eff69..8448e55 100644 (file)
@@ -69,6 +69,7 @@ namespace internal {
   V(InternalArrayNoArgumentConstructor)     \
   V(InternalArraySingleArgumentConstructor) \
   V(KeyedLoadGeneric)                       \
+  V(LoadScriptContextField)                 \
   V(LoadDictionaryElement)                  \
   V(LoadFastElement)                        \
   V(MegamorphicLoad)                        \
@@ -76,6 +77,7 @@ namespace internal {
   V(NumberToString)                         \
   V(RegExpConstructResult)                  \
   V(StoreFastElement)                       \
+  V(StoreScriptContextField)                \
   V(StringAdd)                              \
   V(ToBoolean)                              \
   V(TransitionElementsKind)                 \
@@ -92,9 +94,7 @@ namespace internal {
 
 // List of code stubs only used on ARM 32 bits platforms.
 #if V8_TARGET_ARCH_ARM
-#define CODE_STUB_LIST_ARM(V) \
-  V(DirectCEntry)             \
-  V(WriteInt32ToHeapNumber)
+#define CODE_STUB_LIST_ARM(V) V(DirectCEntry)
 
 #else
 #define CODE_STUB_LIST_ARM(V)
@@ -113,17 +113,15 @@ namespace internal {
 
 // List of code stubs only used on MIPS platforms.
 #if V8_TARGET_ARCH_MIPS
-#define CODE_STUB_LIST_MIPS(V)  \
-  V(DirectCEntry)               \
-  V(RestoreRegistersState)      \
-  V(StoreRegistersState)        \
-  V(WriteInt32ToHeapNumber)
+#define CODE_STUB_LIST_MIPS(V) \
+  V(DirectCEntry)              \
+  V(RestoreRegistersState)     \
+  V(StoreRegistersState)
 #elif V8_TARGET_ARCH_MIPS64
-#define CODE_STUB_LIST_MIPS(V)  \
-  V(DirectCEntry)               \
-  V(RestoreRegistersState)      \
-  V(StoreRegistersState)        \
-  V(WriteInt32ToHeapNumber)
+#define CODE_STUB_LIST_MIPS(V) \
+  V(DirectCEntry)              \
+  V(RestoreRegistersState)     \
+  V(StoreRegistersState)
 #else
 #define CODE_STUB_LIST_MIPS(V)
 #endif
@@ -291,54 +289,52 @@ class CodeStub BASE_EMBEDDED {
   DISALLOW_COPY_AND_ASSIGN(NAME)
 
 
-#define DEFINE_CODE_STUB(NAME, SUPER)              \
- protected:                                        \
-  virtual inline Major MajorKey() const OVERRIDE { \
-    return NAME;                                   \
-  };                                               \
+#define DEFINE_CODE_STUB(NAME, SUPER)                      \
+ protected:                                                \
+  inline Major MajorKey() const OVERRIDE { return NAME; }; \
   DEFINE_CODE_STUB_BASE(NAME##Stub, SUPER)
 
 
-#define DEFINE_PLATFORM_CODE_STUB(NAME, SUPER)          \
- private:                                               \
-  virtual void Generate(MacroAssembler* masm) OVERRIDE; \
+#define DEFINE_PLATFORM_CODE_STUB(NAME, SUPER)  \
+ private:                                       \
+  void Generate(MacroAssembler* masm) OVERRIDE; \
   DEFINE_CODE_STUB(NAME, SUPER)
 
 
-#define DEFINE_HYDROGEN_CODE_STUB(NAME, SUPER)                                \
- public:                                                                      \
-  virtual void InitializeDescriptor(CodeStubDescriptor* descriptor) OVERRIDE; \
-  virtual Handle<Code> GenerateCode() OVERRIDE;                               \
+#define DEFINE_HYDROGEN_CODE_STUB(NAME, SUPER)                        \
+ public:                                                              \
+  void InitializeDescriptor(CodeStubDescriptor* descriptor) OVERRIDE; \
+  Handle<Code> GenerateCode() OVERRIDE;                               \
   DEFINE_CODE_STUB(NAME, SUPER)
 
-#define DEFINE_HANDLER_CODE_STUB(NAME, SUPER)   \
- public:                                        \
-  virtual Handle<Code> GenerateCode() OVERRIDE; \
+#define DEFINE_HANDLER_CODE_STUB(NAME, SUPER) \
+ public:                                      \
+  Handle<Code> GenerateCode() OVERRIDE;       \
   DEFINE_CODE_STUB(NAME, SUPER)
 
-#define DEFINE_CALL_INTERFACE_DESCRIPTOR(NAME)                            \
- public:                                                                  \
-  virtual CallInterfaceDescriptor GetCallInterfaceDescriptor() OVERRIDE { \
-    return NAME##Descriptor(isolate());                                   \
+#define DEFINE_CALL_INTERFACE_DESCRIPTOR(NAME)                    \
+ public:                                                          \
+  CallInterfaceDescriptor GetCallInterfaceDescriptor() OVERRIDE { \
+    return NAME##Descriptor(isolate());                           \
   }
 
 // There are some code stubs we just can't describe right now with a
 // CallInterfaceDescriptor. Isolate behavior for those cases with this macro.
 // An attempt to retrieve a descriptor will fail.
-#define DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR()                           \
- public:                                                                  \
-  virtual CallInterfaceDescriptor GetCallInterfaceDescriptor() OVERRIDE { \
-    UNREACHABLE();                                                        \
-    return CallInterfaceDescriptor();                                     \
+#define DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR()                   \
+ public:                                                          \
+  CallInterfaceDescriptor GetCallInterfaceDescriptor() OVERRIDE { \
+    UNREACHABLE();                                                \
+    return CallInterfaceDescriptor();                             \
   }
 
 
 class PlatformCodeStub : public CodeStub {
  public:
   // Retrieve the code for the stub. Generate the code if needed.
-  virtual Handle<Code> GenerateCode() OVERRIDE;
+  Handle<Code> GenerateCode() OVERRIDE;
 
-  virtual Code::Kind GetCodeKind() const OVERRIDE { return Code::STUB; }
+  Code::Kind GetCodeKind() const OVERRIDE { return Code::STUB; }
 
  protected:
   explicit PlatformCodeStub(Isolate* isolate) : CodeStub(isolate) {}
@@ -438,7 +434,7 @@ class HydrogenCodeStub : public CodeStub {
     INITIALIZED
   };
 
-  virtual Code::Kind GetCodeKind() const OVERRIDE { return Code::STUB; }
+  Code::Kind GetCodeKind() const OVERRIDE { return Code::STUB; }
 
   template<class SubClass>
   static Handle<Code> GetUninitialized(Isolate* isolate) {
@@ -447,7 +443,7 @@ class HydrogenCodeStub : public CodeStub {
   }
 
   // Retrieve the code for the stub. Generate the code if needed.
-  virtual Handle<Code> GenerateCode() = 0;
+  Handle<Code> GenerateCode() OVERRIDE = 0;
 
   bool IsUninitialized() const { return IsMissBits::decode(minor_key_); }
 
@@ -577,10 +573,11 @@ class FastNewClosureStub : public HydrogenCodeStub {
   bool is_arrow() const { return IsArrowFunction(kind()); }
   bool is_generator() const { return IsGeneratorFunction(kind()); }
   bool is_concise_method() const { return IsConciseMethod(kind()); }
+  bool is_default_constructor() const { return IsDefaultConstructor(kind()); }
 
  private:
   class StrictModeBits : public BitField<StrictMode, 0, 1> {};
-  class FunctionKindBits : public BitField<FunctionKind, 1, 3> {};
+  class FunctionKindBits : public BitField<FunctionKind, 1, 4> {};
 
   DEFINE_CALL_INTERFACE_DESCRIPTOR(FastNewClosure);
   DEFINE_HYDROGEN_CODE_STUB(FastNewClosure, HydrogenCodeStub);
@@ -679,7 +676,7 @@ class InstanceofStub: public PlatformCodeStub {
   static Register left() { return InstanceofDescriptor::left(); }
   static Register right() { return InstanceofDescriptor::right(); }
 
-  virtual CallInterfaceDescriptor GetCallInterfaceDescriptor() OVERRIDE {
+  CallInterfaceDescriptor GetCallInterfaceDescriptor() OVERRIDE {
     if (HasArgsInRegisters()) {
       return InstanceofDescriptor(isolate());
     }
@@ -699,7 +696,7 @@ class InstanceofStub: public PlatformCodeStub {
     return (flags() & kReturnTrueFalseObject) != 0;
   }
 
-  virtual void PrintName(std::ostream& os) const OVERRIDE;  // NOLINT
+  void PrintName(std::ostream& os) const OVERRIDE;  // NOLINT
 
   class FlagBits : public BitField<Flags, 0, 3> {};
 
@@ -730,7 +727,7 @@ class ArrayConstructorStub: public PlatformCodeStub {
   void GenerateDispatchToArrayStub(MacroAssembler* masm,
                                    AllocationSiteOverrideMode mode);
 
-  virtual void PrintName(std::ostream& os) const OVERRIDE;  // NOLINT
+  void PrintName(std::ostream& os) const OVERRIDE;  // NOLINT
 
   class ArgumentCountBits : public BitField<ArgumentCountKey, 0, 2> {};
 
@@ -760,7 +757,7 @@ class MathPowStub: public PlatformCodeStub {
     minor_key_ = ExponentTypeBits::encode(exponent_type);
   }
 
-  virtual CallInterfaceDescriptor GetCallInterfaceDescriptor() OVERRIDE {
+  CallInterfaceDescriptor GetCallInterfaceDescriptor() OVERRIDE {
     if (exponent_type() == TAGGED) {
       return MathPowTaggedDescriptor(isolate());
     } else if (exponent_type() == INTEGER) {
@@ -793,11 +790,11 @@ class CallICStub: public PlatformCodeStub {
     return state.arg_count();
   }
 
-  virtual Code::Kind GetCodeKind() const OVERRIDE { return Code::CALL_IC; }
+  Code::Kind GetCodeKind() const OVERRIDE { return Code::CALL_IC; }
 
-  virtual InlineCacheState GetICState() const OVERRIDE { return DEFAULT; }
+  InlineCacheState GetICState() const OVERRIDE { return DEFAULT; }
 
-  virtual ExtraICState GetExtraICState() const FINAL OVERRIDE {
+  ExtraICState GetExtraICState() const FINAL {
     return static_cast<ExtraICState>(minor_key_);
   }
 
@@ -816,7 +813,7 @@ class CallICStub: public PlatformCodeStub {
   void GenerateMiss(MacroAssembler* masm);
 
  private:
-  virtual void PrintState(std::ostream& os) const OVERRIDE;  // NOLINT
+  void PrintState(std::ostream& os) const OVERRIDE;  // NOLINT
 
   DEFINE_CALL_INTERFACE_DESCRIPTOR(CallFunctionWithFeedback);
   DEFINE_PLATFORM_CODE_STUB(CallIC, PlatformCodeStub);
@@ -828,12 +825,10 @@ class CallIC_ArrayStub: public CallICStub {
   CallIC_ArrayStub(Isolate* isolate, const CallICState& state_in)
       : CallICStub(isolate, state_in) {}
 
-  virtual InlineCacheState GetICState() const FINAL OVERRIDE {
-    return MONOMORPHIC;
-  }
+  InlineCacheState GetICState() const FINAL { return MONOMORPHIC; }
 
  private:
-  virtual void PrintState(std::ostream& os) const OVERRIDE;  // NOLINT
+  void PrintState(std::ostream& os) const OVERRIDE;  // NOLINT
 
   DEFINE_PLATFORM_CODE_STUB(CallIC_Array, CallICStub);
 };
@@ -845,12 +840,12 @@ class FunctionPrototypeStub : public PlatformCodeStub {
   explicit FunctionPrototypeStub(Isolate* isolate)
       : PlatformCodeStub(isolate) {}
 
-  virtual Code::Kind GetCodeKind() const OVERRIDE { return Code::HANDLER; }
+  Code::Kind GetCodeKind() const OVERRIDE { return Code::HANDLER; }
 
   // TODO(mvstanton): only the receiver register is accessed. When this is
   // translated to a hydrogen code stub, a new CallInterfaceDescriptor
   // should be created that just uses that register for more efficient code.
-  virtual CallInterfaceDescriptor GetCallInterfaceDescriptor() OVERRIDE {
+  CallInterfaceDescriptor GetCallInterfaceDescriptor() OVERRIDE {
     if (FLAG_vector_ics) {
       return VectorLoadICDescriptor(isolate());
     }
@@ -867,8 +862,8 @@ class LoadIndexedInterceptorStub : public PlatformCodeStub {
   explicit LoadIndexedInterceptorStub(Isolate* isolate)
       : PlatformCodeStub(isolate) {}
 
-  virtual Code::Kind GetCodeKind() const OVERRIDE { return Code::HANDLER; }
-  virtual Code::StubType GetStubType() OVERRIDE { return Code::FAST; }
+  Code::Kind GetCodeKind() const OVERRIDE { return Code::HANDLER; }
+  Code::StubType GetStubType() OVERRIDE { return Code::FAST; }
 
   DEFINE_CALL_INTERFACE_DESCRIPTOR(Load);
   DEFINE_PLATFORM_CODE_STUB(LoadIndexedInterceptor, PlatformCodeStub);
@@ -880,8 +875,8 @@ class LoadIndexedStringStub : public PlatformCodeStub {
   explicit LoadIndexedStringStub(Isolate* isolate)
       : PlatformCodeStub(isolate) {}
 
-  virtual Code::Kind GetCodeKind() const OVERRIDE { return Code::HANDLER; }
-  virtual Code::StubType GetStubType() OVERRIDE { return Code::FAST; }
+  Code::Kind GetCodeKind() const OVERRIDE { return Code::HANDLER; }
+  Code::StubType GetStubType() OVERRIDE { return Code::FAST; }
 
   DEFINE_CALL_INTERFACE_DESCRIPTOR(Load);
   DEFINE_PLATFORM_CODE_STUB(LoadIndexedString, PlatformCodeStub);
@@ -890,13 +885,13 @@ class LoadIndexedStringStub : public PlatformCodeStub {
 
 class HandlerStub : public HydrogenCodeStub {
  public:
-  virtual Code::Kind GetCodeKind() const OVERRIDE { return Code::HANDLER; }
-  virtual ExtraICState GetExtraICState() const OVERRIDE { return kind(); }
-  virtual InlineCacheState GetICState() const OVERRIDE { return MONOMORPHIC; }
+  Code::Kind GetCodeKind() const OVERRIDE { return Code::HANDLER; }
+  ExtraICState GetExtraICState() const OVERRIDE { return kind(); }
+  InlineCacheState GetICState() const OVERRIDE { return MONOMORPHIC; }
 
-  virtual void InitializeDescriptor(CodeStubDescriptor* descriptor) OVERRIDE;
+  void InitializeDescriptor(CodeStubDescriptor* descriptor) OVERRIDE;
 
-  virtual CallInterfaceDescriptor GetCallInterfaceDescriptor() OVERRIDE;
+  CallInterfaceDescriptor GetCallInterfaceDescriptor() OVERRIDE;
 
  protected:
   explicit HandlerStub(Isolate* isolate) : HydrogenCodeStub(isolate) {}
@@ -920,8 +915,8 @@ class LoadFieldStub: public HandlerStub {
   }
 
  protected:
-  virtual Code::Kind kind() const { return Code::LOAD_IC; }
-  virtual Code::StubType GetStubType() OVERRIDE { return Code::FAST; }
+  Code::Kind kind() const OVERRIDE { return Code::LOAD_IC; }
+  Code::StubType GetStubType() OVERRIDE { return Code::FAST; }
 
  private:
   class LoadFieldByIndexBits : public BitField<int, 0, 13> {};
@@ -936,8 +931,8 @@ class KeyedLoadSloppyArgumentsStub : public HandlerStub {
       : HandlerStub(isolate) {}
 
  protected:
-  virtual Code::Kind kind() const OVERRIDE { return Code::KEYED_LOAD_IC; }
-  virtual Code::StubType GetStubType() OVERRIDE { return Code::FAST; }
+  Code::Kind kind() const OVERRIDE { return Code::KEYED_LOAD_IC; }
+  Code::StubType GetStubType() OVERRIDE { return Code::FAST; }
 
  private:
   DEFINE_HANDLER_CODE_STUB(KeyedLoadSloppyArguments, HandlerStub);
@@ -956,8 +951,8 @@ class LoadConstantStub : public HandlerStub {
   }
 
  protected:
-  virtual Code::Kind kind() const OVERRIDE { return Code::LOAD_IC; }
-  virtual Code::StubType GetStubType() OVERRIDE { return Code::FAST; }
+  Code::Kind kind() const OVERRIDE { return Code::LOAD_IC; }
+  Code::StubType GetStubType() OVERRIDE { return Code::FAST; }
 
  private:
   class ConstantIndexBits : public BitField<int, 0, kSubMinorKeyBits> {};
@@ -971,8 +966,8 @@ class StringLengthStub: public HandlerStub {
   explicit StringLengthStub(Isolate* isolate) : HandlerStub(isolate) {}
 
  protected:
-  virtual Code::Kind kind() const OVERRIDE { return Code::LOAD_IC; }
-  virtual Code::StubType GetStubType() OVERRIDE { return Code::FAST; }
+  Code::Kind kind() const OVERRIDE { return Code::LOAD_IC; }
+  Code::StubType GetStubType() OVERRIDE { return Code::FAST; }
 
   DEFINE_HANDLER_CODE_STUB(StringLength, HandlerStub);
 };
@@ -1000,8 +995,8 @@ class StoreFieldStub : public HandlerStub {
   }
 
  protected:
-  virtual Code::Kind kind() const OVERRIDE { return Code::STORE_IC; }
-  virtual Code::StubType GetStubType() OVERRIDE { return Code::FAST; }
+  Code::Kind kind() const OVERRIDE { return Code::STORE_IC; }
+  Code::StubType GetStubType() OVERRIDE { return Code::FAST; }
 
  private:
   class StoreFieldByIndexBits : public BitField<int, 0, 13> {};
@@ -1050,11 +1045,11 @@ class StoreTransitionStub : public HandlerStub {
     return StoreModeBits::decode(sub_minor_key());
   }
 
-  virtual CallInterfaceDescriptor GetCallInterfaceDescriptor() OVERRIDE;
+  CallInterfaceDescriptor GetCallInterfaceDescriptor() OVERRIDE;
 
  protected:
-  virtual Code::Kind kind() const OVERRIDE { return Code::STORE_IC; }
-  virtual Code::StubType GetStubType() OVERRIDE { return Code::FAST; }
+  Code::Kind kind() const OVERRIDE { return Code::STORE_IC; }
+  Code::StubType GetStubType() OVERRIDE { return Code::FAST; }
 
  private:
   class StoreFieldByIndexBits : public BitField<int, 0, 13> {};
@@ -1092,7 +1087,7 @@ class StoreGlobalStub : public HandlerStub {
     }
   }
 
-  virtual Code::Kind kind() const OVERRIDE { return Code::STORE_IC; }
+  Code::Kind kind() const OVERRIDE { return Code::STORE_IC; }
 
   bool is_constant() const { return IsConstantBits::decode(sub_minor_key()); }
 
@@ -1174,15 +1169,11 @@ class BinaryOpICStub : public HydrogenCodeStub {
 
   static void GenerateAheadOfTime(Isolate* isolate);
 
-  virtual Code::Kind GetCodeKind() const OVERRIDE {
-    return Code::BINARY_OP_IC;
-  }
+  Code::Kind GetCodeKind() const OVERRIDE { return Code::BINARY_OP_IC; }
 
-  virtual InlineCacheState GetICState() const FINAL OVERRIDE {
-    return state().GetICState();
-  }
+  InlineCacheState GetICState() const FINAL { return state().GetICState(); }
 
-  virtual ExtraICState GetExtraICState() const FINAL OVERRIDE {
+  ExtraICState GetExtraICState() const FINAL {
     return static_cast<ExtraICState>(sub_minor_key());
   }
 
@@ -1190,7 +1181,7 @@ class BinaryOpICStub : public HydrogenCodeStub {
     return BinaryOpICState(isolate(), GetExtraICState());
   }
 
-  virtual void PrintState(std::ostream& os) const FINAL OVERRIDE;  // NOLINT
+  void PrintState(std::ostream& os) const FINAL;  // NOLINT
 
   // Parameters accessed via CodeStubGraphBuilder::GetParameter()
   static const int kLeft = 0;
@@ -1223,19 +1214,15 @@ class BinaryOpICWithAllocationSiteStub FINAL : public PlatformCodeStub {
     return CodeStub::GetCodeCopy(pattern);
   }
 
-  virtual Code::Kind GetCodeKind() const OVERRIDE {
-    return Code::BINARY_OP_IC;
-  }
+  Code::Kind GetCodeKind() const OVERRIDE { return Code::BINARY_OP_IC; }
 
-  virtual InlineCacheState GetICState() const OVERRIDE {
-    return state().GetICState();
-  }
+  InlineCacheState GetICState() const OVERRIDE { return state().GetICState(); }
 
-  virtual ExtraICState GetExtraICState() const OVERRIDE {
+  ExtraICState GetExtraICState() const OVERRIDE {
     return static_cast<ExtraICState>(minor_key_);
   }
 
-  virtual void PrintState(std::ostream& os) const OVERRIDE;  // NOLINT
+  void PrintState(std::ostream& os) const OVERRIDE;  // NOLINT
 
  private:
   BinaryOpICState state() const {
@@ -1260,9 +1247,7 @@ class BinaryOpWithAllocationSiteStub FINAL : public BinaryOpICStub {
   BinaryOpWithAllocationSiteStub(Isolate* isolate, const BinaryOpICState& state)
       : BinaryOpICStub(isolate, state) {}
 
-  virtual Code::Kind GetCodeKind() const FINAL OVERRIDE {
-    return Code::STUB;
-  }
+  Code::Kind GetCodeKind() const FINAL { return Code::STUB; }
 
   // Parameters accessed via CodeStubGraphBuilder::GetParameter()
   static const int kAllocationSite = 0;
@@ -1311,7 +1296,7 @@ class StringAddStub FINAL : public HydrogenCodeStub {
   class StringAddFlagsBits: public BitField<StringAddFlags, 0, 2> {};
   class PretenureFlagBits: public BitField<PretenureFlag, 2, 1> {};
 
-  virtual void PrintBaseName(std::ostream& os) const OVERRIDE;  // NOLINT
+  void PrintBaseName(std::ostream& os) const OVERRIDE;  // NOLINT
 
   DEFINE_CALL_INTERFACE_DESCRIPTOR(StringAdd);
   DEFINE_HYDROGEN_CODE_STUB(StringAdd, HydrogenCodeStub);
@@ -1330,7 +1315,7 @@ class CompareICStub : public PlatformCodeStub {
 
   void set_known_map(Handle<Map> map) { known_map_ = map; }
 
-  virtual InlineCacheState GetICState() const OVERRIDE;
+  InlineCacheState GetICState() const OVERRIDE;
 
   Token::Value op() const {
     return static_cast<Token::Value>(Token::EQ + OpBits::decode(minor_key_));
@@ -1345,7 +1330,7 @@ class CompareICStub : public PlatformCodeStub {
   CompareICState::State state() const { return StateBits::decode(minor_key_); }
 
  private:
-  virtual Code::Kind GetCodeKind() const OVERRIDE { return Code::COMPARE_IC; }
+  Code::Kind GetCodeKind() const OVERRIDE { return Code::COMPARE_IC; }
 
   void GenerateSmis(MacroAssembler* masm);
   void GenerateNumbers(MacroAssembler* masm);
@@ -1360,9 +1345,9 @@ class CompareICStub : public PlatformCodeStub {
   bool strict() const { return op() == Token::EQ_STRICT; }
   Condition GetCondition() const;
 
-  virtual void AddToSpecialCache(Handle<Code> new_object) OVERRIDE;
-  virtual bool FindCodeInSpecialCache(Code** code_out) OVERRIDE;
-  virtual bool UseSpecialCache() OVERRIDE {
+  void AddToSpecialCache(Handle<Code> new_object) OVERRIDE;
+  bool FindCodeInSpecialCache(Code** code_out) OVERRIDE;
+  bool UseSpecialCache() OVERRIDE {
     return state() == CompareICState::KNOWN_OBJECT;
   }
 
@@ -1398,7 +1383,7 @@ class CompareNilICStub : public HydrogenCodeStub  {
     return CompareNilICStub(isolate, nil, UNINITIALIZED).GetCode();
   }
 
-  virtual InlineCacheState GetICState() const OVERRIDE {
+  InlineCacheState GetICState() const OVERRIDE {
     State state = this->state();
     if (state.Contains(GENERIC)) {
       return MEGAMORPHIC;
@@ -1409,13 +1394,9 @@ class CompareNilICStub : public HydrogenCodeStub  {
     }
   }
 
-  virtual Code::Kind GetCodeKind() const OVERRIDE {
-    return Code::COMPARE_NIL_IC;
-  }
+  Code::Kind GetCodeKind() const OVERRIDE { return Code::COMPARE_NIL_IC; }
 
-  virtual ExtraICState GetExtraICState() const OVERRIDE {
-    return sub_minor_key();
-  }
+  ExtraICState GetExtraICState() const OVERRIDE { return sub_minor_key(); }
 
   void UpdateStatus(Handle<Object> object);
 
@@ -1427,8 +1408,8 @@ class CompareNilICStub : public HydrogenCodeStub  {
     set_sub_minor_key(TypesBits::update(sub_minor_key(), 0));
   }
 
-  virtual void PrintState(std::ostream& os) const OVERRIDE;     // NOLINT
-  virtual void PrintBaseName(std::ostream& os) const OVERRIDE;  // NOLINT
+  void PrintState(std::ostream& os) const OVERRIDE;     // NOLINT
+  void PrintBaseName(std::ostream& os) const OVERRIDE;  // NOLINT
 
  private:
   CompareNilICStub(Isolate* isolate, NilValue nil,
@@ -1515,9 +1496,9 @@ class JSEntryStub : public PlatformCodeStub {
   }
 
  private:
-  virtual void FinishCode(Handle<Code> code) OVERRIDE;
+  void FinishCode(Handle<Code> code) OVERRIDE;
 
-  virtual void PrintName(std::ostream& os) const OVERRIDE {  // NOLINT
+  void PrintName(std::ostream& os) const OVERRIDE {  // NOLINT
     os << (type() == StackFrame::ENTRY ? "JSEntryStub"
                                        : "JSConstructEntryStub");
   }
@@ -1548,7 +1529,7 @@ class ArgumentsAccessStub: public PlatformCodeStub {
     minor_key_ = TypeBits::encode(type);
   }
 
-  virtual CallInterfaceDescriptor GetCallInterfaceDescriptor() OVERRIDE {
+  CallInterfaceDescriptor GetCallInterfaceDescriptor() OVERRIDE {
     if (type() == READ_ELEMENT) {
       return ArgumentsAccessReadDescriptor(isolate());
     }
@@ -1563,7 +1544,7 @@ class ArgumentsAccessStub: public PlatformCodeStub {
   void GenerateNewSloppyFast(MacroAssembler* masm);
   void GenerateNewSloppySlow(MacroAssembler* masm);
 
-  virtual void PrintName(std::ostream& os) const OVERRIDE;  // NOLINT
+  void PrintName(std::ostream& os) const OVERRIDE;  // NOLINT
 
   class TypeBits : public BitField<Type, 0, 2> {};
 
@@ -1617,7 +1598,7 @@ class CallFunctionStub: public PlatformCodeStub {
 
   bool NeedsChecks() const { return flags() != WRAP_AND_CALL; }
 
-  virtual void PrintName(std::ostream& os) const OVERRIDE;  // NOLINT
+  void PrintName(std::ostream& os) const OVERRIDE;  // NOLINT
 
   // Minor key encoding in 32 bits with Bitfield <Type, shift, size>.
   class FlagBits : public BitField<CallFunctionFlags, 0, 2> {};
@@ -1636,7 +1617,7 @@ class CallConstructStub: public PlatformCodeStub {
     minor_key_ = FlagBits::encode(flags);
   }
 
-  virtual void FinishCode(Handle<Code> code) OVERRIDE {
+  void FinishCode(Handle<Code> code) OVERRIDE {
     code->set_has_function_cache(RecordCallTarget());
   }
 
@@ -1647,7 +1628,7 @@ class CallConstructStub: public PlatformCodeStub {
     return (flags() & RECORD_CONSTRUCTOR_TARGET) != 0;
   }
 
-  virtual void PrintName(std::ostream& os) const OVERRIDE;  // NOLINT
+  void PrintName(std::ostream& os) const OVERRIDE;  // NOLINT
 
   class FlagBits : public BitField<CallConstructorFlags, 0, 1> {};
 
@@ -1837,7 +1818,7 @@ class LoadDictionaryElementStub : public HydrogenCodeStub {
   explicit LoadDictionaryElementStub(Isolate* isolate)
       : HydrogenCodeStub(isolate) {}
 
-  virtual CallInterfaceDescriptor GetCallInterfaceDescriptor() OVERRIDE {
+  CallInterfaceDescriptor GetCallInterfaceDescriptor() OVERRIDE {
     if (FLAG_vector_ics) {
       return VectorLoadICDescriptor(isolate());
     }
@@ -1852,10 +1833,8 @@ class KeyedLoadGenericStub : public HydrogenCodeStub {
  public:
   explicit KeyedLoadGenericStub(Isolate* isolate) : HydrogenCodeStub(isolate) {}
 
-  virtual Code::Kind GetCodeKind() const OVERRIDE {
-    return Code::KEYED_LOAD_IC;
-  }
-  virtual InlineCacheState GetICState() const OVERRIDE { return GENERIC; }
+  Code::Kind GetCodeKind() const OVERRIDE { return Code::KEYED_LOAD_IC; }
+  InlineCacheState GetICState() const OVERRIDE { return GENERIC; }
 
   // Since KeyedLoadGeneric stub doesn't miss (simply calls runtime), it
   // doesn't need to use the VectorLoadICDescriptor for the case when
@@ -1873,11 +1852,11 @@ class LoadICTrampolineStub : public PlatformCodeStub {
     minor_key_ = state.GetExtraICState();
   }
 
-  virtual Code::Kind GetCodeKind() const OVERRIDE { return Code::LOAD_IC; }
+  Code::Kind GetCodeKind() const OVERRIDE { return Code::LOAD_IC; }
 
-  virtual InlineCacheState GetICState() const FINAL OVERRIDE { return DEFAULT; }
+  InlineCacheState GetICState() const FINAL { return DEFAULT; }
 
-  virtual ExtraICState GetExtraICState() const FINAL OVERRIDE {
+  ExtraICState GetExtraICState() const FINAL {
     return static_cast<ExtraICState>(minor_key_);
   }
 
@@ -1896,9 +1875,7 @@ class KeyedLoadICTrampolineStub : public LoadICTrampolineStub {
   explicit KeyedLoadICTrampolineStub(Isolate* isolate)
       : LoadICTrampolineStub(isolate, LoadICState(0)) {}
 
-  virtual Code::Kind GetCodeKind() const OVERRIDE {
-    return Code::KEYED_LOAD_IC;
-  }
+  Code::Kind GetCodeKind() const OVERRIDE { return Code::KEYED_LOAD_IC; }
 
   DEFINE_PLATFORM_CODE_STUB(KeyedLoadICTrampoline, LoadICTrampolineStub);
 };
@@ -1911,17 +1888,15 @@ class MegamorphicLoadStub : public HydrogenCodeStub {
     set_sub_minor_key(state.GetExtraICState());
   }
 
-  virtual Code::Kind GetCodeKind() const OVERRIDE { return Code::LOAD_IC; }
+  Code::Kind GetCodeKind() const OVERRIDE { return Code::LOAD_IC; }
 
-  virtual InlineCacheState GetICState() const FINAL OVERRIDE {
-    return MEGAMORPHIC;
-  }
+  InlineCacheState GetICState() const FINAL { return MEGAMORPHIC; }
 
-  virtual ExtraICState GetExtraICState() const FINAL OVERRIDE {
+  ExtraICState GetExtraICState() const FINAL {
     return static_cast<ExtraICState>(sub_minor_key());
   }
 
-  virtual CallInterfaceDescriptor GetCallInterfaceDescriptor() OVERRIDE {
+  CallInterfaceDescriptor GetCallInterfaceDescriptor() OVERRIDE {
     if (FLAG_vector_ics) {
       return VectorLoadICDescriptor(isolate());
     }
@@ -1939,11 +1914,11 @@ class VectorLoadStub : public HydrogenCodeStub {
     set_sub_minor_key(state.GetExtraICState());
   }
 
-  virtual Code::Kind GetCodeKind() const OVERRIDE { return Code::LOAD_IC; }
+  Code::Kind GetCodeKind() const OVERRIDE { return Code::LOAD_IC; }
 
-  virtual InlineCacheState GetICState() const FINAL OVERRIDE { return DEFAULT; }
+  InlineCacheState GetICState() const FINAL { return DEFAULT; }
 
-  virtual ExtraICState GetExtraICState() const FINAL OVERRIDE {
+  ExtraICState GetExtraICState() const FINAL {
     return static_cast<ExtraICState>(sub_minor_key());
   }
 
@@ -1960,9 +1935,7 @@ class VectorKeyedLoadStub : public VectorLoadStub {
   explicit VectorKeyedLoadStub(Isolate* isolate)
       : VectorLoadStub(isolate, LoadICState(0)) {}
 
-  virtual Code::Kind GetCodeKind() const OVERRIDE {
-    return Code::KEYED_LOAD_IC;
-  }
+  Code::Kind GetCodeKind() const OVERRIDE { return Code::KEYED_LOAD_IC; }
 
   DEFINE_CALL_INTERFACE_DESCRIPTOR(VectorLoadIC);
   DEFINE_HYDROGEN_CODE_STUB(VectorKeyedLoad, VectorLoadStub);
@@ -1982,7 +1955,7 @@ class DoubleToIStub : public PlatformCodeStub {
                  SSE3Bits::encode(CpuFeatures::IsSupported(SSE3) ? 1 : 0);
   }
 
-  virtual bool SometimesSetsUpAFrame() OVERRIDE { return false; }
+  bool SometimesSetsUpAFrame() OVERRIDE { return false; }
 
  private:
   Register source() const {
@@ -2016,6 +1989,66 @@ class DoubleToIStub : public PlatformCodeStub {
 };
 
 
+class ScriptContextFieldStub : public HandlerStub {
+ public:
+  ScriptContextFieldStub(Isolate* isolate,
+                         const ScriptContextTable::LookupResult* lookup_result)
+      : HandlerStub(isolate) {
+    DCHECK(Accepted(lookup_result));
+    set_sub_minor_key(ContextIndexBits::encode(lookup_result->context_index) |
+                      SlotIndexBits::encode(lookup_result->slot_index));
+  }
+
+  int context_index() const {
+    return ContextIndexBits::decode(sub_minor_key());
+  }
+
+  int slot_index() const { return SlotIndexBits::decode(sub_minor_key()); }
+
+  static bool Accepted(const ScriptContextTable::LookupResult* lookup_result) {
+    return ContextIndexBits::is_valid(lookup_result->context_index) &&
+           SlotIndexBits::is_valid(lookup_result->slot_index);
+  }
+
+ private:
+  static const int kContextIndexBits = 13;
+  static const int kSlotIndexBits = 13;
+  class ContextIndexBits : public BitField<int, 0, kContextIndexBits> {};
+  class SlotIndexBits
+      : public BitField<int, kContextIndexBits, kSlotIndexBits> {};
+
+  Code::StubType GetStubType() OVERRIDE { return Code::FAST; }
+
+  DEFINE_CODE_STUB_BASE(ScriptContextFieldStub, HandlerStub);
+};
+
+
+class LoadScriptContextFieldStub : public ScriptContextFieldStub {
+ public:
+  LoadScriptContextFieldStub(
+      Isolate* isolate, const ScriptContextTable::LookupResult* lookup_result)
+      : ScriptContextFieldStub(isolate, lookup_result) {}
+
+ private:
+  Code::Kind kind() const OVERRIDE { return Code::LOAD_IC; }
+
+  DEFINE_HANDLER_CODE_STUB(LoadScriptContextField, ScriptContextFieldStub);
+};
+
+
+class StoreScriptContextFieldStub : public ScriptContextFieldStub {
+ public:
+  StoreScriptContextFieldStub(
+      Isolate* isolate, const ScriptContextTable::LookupResult* lookup_result)
+      : ScriptContextFieldStub(isolate, lookup_result) {}
+
+ private:
+  Code::Kind kind() const OVERRIDE { return Code::STORE_IC; }
+
+  DEFINE_HANDLER_CODE_STUB(StoreScriptContextField, ScriptContextFieldStub);
+};
+
+
 class LoadFastElementStub : public HydrogenCodeStub {
  public:
   LoadFastElementStub(Isolate* isolate, bool is_js_array,
@@ -2035,7 +2068,7 @@ class LoadFastElementStub : public HydrogenCodeStub {
   class ElementsKindBits: public BitField<ElementsKind, 0, 8> {};
   class IsJSArrayBits: public BitField<bool, 8, 1> {};
 
-  virtual CallInterfaceDescriptor GetCallInterfaceDescriptor() OVERRIDE {
+  CallInterfaceDescriptor GetCallInterfaceDescriptor() OVERRIDE {
     if (FLAG_vector_ics) {
       return VectorLoadICDescriptor(isolate());
     }
@@ -2171,7 +2204,7 @@ class ArrayNoArgumentConstructorStub : public ArrayConstructorStubBase {
   }
 
  private:
-  virtual void PrintName(std::ostream& os) const OVERRIDE {  // NOLINT
+  void PrintName(std::ostream& os) const OVERRIDE {  // NOLINT
     BasePrintName(os, "ArrayNoArgumentConstructorStub");
   }
 
@@ -2191,7 +2224,7 @@ class ArraySingleArgumentConstructorStub : public ArrayConstructorStubBase {
   }
 
  private:
-  virtual void PrintName(std::ostream& os) const OVERRIDE {  // NOLINT
+  void PrintName(std::ostream& os) const OVERRIDE {  // NOLINT
     BasePrintName(os, "ArraySingleArgumentConstructorStub");
   }
 
@@ -2211,7 +2244,7 @@ class ArrayNArgumentsConstructorStub : public ArrayConstructorStubBase {
   }
 
  private:
-  virtual void PrintName(std::ostream& os) const OVERRIDE {  // NOLINT
+  void PrintName(std::ostream& os) const OVERRIDE {  // NOLINT
     BasePrintName(os, "ArrayNArgumentsConstructorStub");
   }
 
@@ -2355,22 +2388,18 @@ class ToBooleanStub: public HydrogenCodeStub {
   Types types() const { return Types(TypesBits::decode(sub_minor_key())); }
   ResultMode mode() const { return ResultModeBits::decode(sub_minor_key()); }
 
-  virtual Code::Kind GetCodeKind() const OVERRIDE {
-    return Code::TO_BOOLEAN_IC;
-  }
-  virtual void PrintState(std::ostream& os) const OVERRIDE;  // NOLINT
+  Code::Kind GetCodeKind() const OVERRIDE { return Code::TO_BOOLEAN_IC; }
+  void PrintState(std::ostream& os) const OVERRIDE;  // NOLINT
 
-  virtual bool SometimesSetsUpAFrame() OVERRIDE { return false; }
+  bool SometimesSetsUpAFrame() OVERRIDE { return false; }
 
   static Handle<Code> GetUninitialized(Isolate* isolate) {
     return ToBooleanStub(isolate, UNINITIALIZED).GetCode();
   }
 
-  virtual ExtraICState GetExtraICState() const OVERRIDE {
-    return types().ToIntegral();
-  }
+  ExtraICState GetExtraICState() const OVERRIDE { return types().ToIntegral(); }
 
-  virtual InlineCacheState GetICState() const OVERRIDE {
+  InlineCacheState GetICState() const OVERRIDE {
     if (types().IsEmpty()) {
       return ::v8::internal::UNINITIALIZED;
     } else {
@@ -2482,7 +2511,7 @@ class ProfileEntryHookStub : public PlatformCodeStub {
   explicit ProfileEntryHookStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
 
   // The profile entry hook function is not allowed to cause a GC.
-  virtual bool SometimesSetsUpAFrame() OVERRIDE { return false; }
+  bool SometimesSetsUpAFrame() OVERRIDE { return false; }
 
   // Generates a call to the entry hook if it's enabled.
   static void MaybeCallEntryHook(MacroAssembler* masm);
@@ -2507,7 +2536,7 @@ class StoreBufferOverflowStub : public PlatformCodeStub {
   }
 
   static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
-  virtual bool SometimesSetsUpAFrame() OVERRIDE { return false; }
+  bool SometimesSetsUpAFrame() OVERRIDE { return false; }
 
  private:
   bool save_doubles() const { return SaveDoublesBits::decode(minor_key_); }
index 92d45a9..c1a9a27 100644 (file)
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-'use strict';
+"use strict";
 
 
 // This file relies on the fact that the following declaration has been made
index 6a32d69..94bda70 100644 (file)
@@ -30,7 +30,7 @@ function SetConstructor(iterable) {
     }
   }
 
-  %SetInitialize(this);
+  %_SetInitialize(this);
 
   if (IS_UNDEFINED(iter)) return;
 
@@ -56,7 +56,7 @@ function SetAddJS(key) {
   if (key === 0) {
     key = 0;
   }
-  return %SetAdd(this, key);
+  return %_SetAdd(this, key);
 }
 
 
@@ -65,7 +65,7 @@ function SetHasJS(key) {
     throw MakeTypeError('incompatible_method_receiver',
                         ['Set.prototype.has', this]);
   }
-  return %SetHas(this, key);
+  return %_SetHas(this, key);
 }
 
 
@@ -74,7 +74,7 @@ function SetDeleteJS(key) {
     throw MakeTypeError('incompatible_method_receiver',
                         ['Set.prototype.delete', this]);
   }
-  return %SetDelete(this, key);
+  return %_SetDelete(this, key);
 }
 
 
@@ -83,7 +83,7 @@ function SetGetSizeJS() {
     throw MakeTypeError('incompatible_method_receiver',
                         ['Set.prototype.size', this]);
   }
-  return %SetGetSize(this);
+  return %_SetGetSize(this);
 }
 
 
@@ -92,7 +92,7 @@ function SetClearJS() {
     throw MakeTypeError('incompatible_method_receiver',
                         ['Set.prototype.clear', this]);
   }
-  %SetClear(this);
+  %_SetClear(this);
 }
 
 
@@ -170,7 +170,7 @@ function MapConstructor(iterable) {
     }
   }
 
-  %MapInitialize(this);
+  %_MapInitialize(this);
 
   if (IS_UNDEFINED(iter)) return;
 
@@ -193,7 +193,7 @@ function MapGetJS(key) {
     throw MakeTypeError('incompatible_method_receiver',
                         ['Map.prototype.get', this]);
   }
-  return %MapGet(this, key);
+  return %_MapGet(this, key);
 }
 
 
@@ -209,7 +209,7 @@ function MapSetJS(key, value) {
   if (key === 0) {
     key = 0;
   }
-  return %MapSet(this, key, value);
+  return %_MapSet(this, key, value);
 }
 
 
@@ -218,7 +218,7 @@ function MapHasJS(key) {
     throw MakeTypeError('incompatible_method_receiver',
                         ['Map.prototype.has', this]);
   }
-  return %MapHas(this, key);
+  return %_MapHas(this, key);
 }
 
 
@@ -227,7 +227,7 @@ function MapDeleteJS(key) {
     throw MakeTypeError('incompatible_method_receiver',
                         ['Map.prototype.delete', this]);
   }
-  return %MapDelete(this, key);
+  return %_MapDelete(this, key);
 }
 
 
@@ -236,7 +236,7 @@ function MapGetSizeJS() {
     throw MakeTypeError('incompatible_method_receiver',
                         ['Map.prototype.size', this]);
   }
-  return %MapGetSize(this);
+  return %_MapGetSize(this);
 }
 
 
@@ -245,7 +245,7 @@ function MapClearJS() {
     throw MakeTypeError('incompatible_method_receiver',
                         ['Map.prototype.clear', this]);
   }
-  %MapClear(this);
+  %_MapClear(this);
 }
 
 
index ba5f3fd..3b52aa2 100644 (file)
@@ -7,6 +7,7 @@
 #include "src/compiler.h"
 
 #include "src/ast-numbering.h"
+#include "src/ast-this-access-visitor.h"
 #include "src/bootstrapper.h"
 #include "src/codegen.h"
 #include "src/compilation-cache.h"
@@ -20,6 +21,7 @@
 #include "src/isolate-inl.h"
 #include "src/lithium.h"
 #include "src/liveedit.h"
+#include "src/messages.h"
 #include "src/parser.h"
 #include "src/rewriter.h"
 #include "src/runtime-profiler.h"
@@ -34,7 +36,7 @@ namespace internal {
 
 
 ScriptData::ScriptData(const byte* data, int length)
-    : owns_data_(false), data_(data), length_(length) {
+    : owns_data_(false), rejected_(false), data_(data), length_(length) {
   if (!IsAligned(reinterpret_cast<intptr_t>(data), kPointerAlignment)) {
     byte* copy = NewArray<byte>(length);
     DCHECK(IsAligned(reinterpret_cast<intptr_t>(copy), kPointerAlignment));
@@ -144,7 +146,7 @@ void CompilationInfo::Initialize(Isolate* isolate,
   isolate_ = isolate;
   function_ = NULL;
   scope_ = NULL;
-  global_scope_ = NULL;
+  script_scope_ = NULL;
   extension_ = NULL;
   cached_data_ = NULL;
   compile_options_ = ScriptCompiler::kNoCompileOptions;
@@ -291,14 +293,14 @@ bool CompilationInfo::ShouldSelfOptimize() {
 void CompilationInfo::PrepareForCompilation(Scope* scope) {
   DCHECK(scope_ == NULL);
   scope_ = scope;
+}
+
 
+void CompilationInfo::EnsureFeedbackVector() {
   if (feedback_vector_.is_null()) {
-    // Allocate the feedback vector too.
     feedback_vector_ = isolate()->factory()->NewTypeFeedbackVector(
-        function()->slot_count(), function()->ic_slot_count());
+        function()->feedback_vector_spec());
   }
-  DCHECK(feedback_vector_->Slots() == function()->slot_count() &&
-         feedback_vector_->ICSlots() == function()->ic_slot_count());
 }
 
 
@@ -308,29 +310,29 @@ class HOptimizedGraphBuilderWithPositions: public HOptimizedGraphBuilder {
       : HOptimizedGraphBuilder(info) {
   }
 
-#define DEF_VISIT(type)                                 \
-  virtual void Visit##type(type* node) OVERRIDE {    \
-    if (node->position() != RelocInfo::kNoPosition) {   \
-      SetSourcePosition(node->position());              \
-    }                                                   \
-    HOptimizedGraphBuilder::Visit##type(node);          \
+#define DEF_VISIT(type)                               \
+  void Visit##type(type* node) OVERRIDE {             \
+    if (node->position() != RelocInfo::kNoPosition) { \
+      SetSourcePosition(node->position());            \
+    }                                                 \
+    HOptimizedGraphBuilder::Visit##type(node);        \
   }
   EXPRESSION_NODE_LIST(DEF_VISIT)
 #undef DEF_VISIT
 
-#define DEF_VISIT(type)                                          \
-  virtual void Visit##type(type* node) OVERRIDE {             \
-    if (node->position() != RelocInfo::kNoPosition) {            \
-      SetSourcePosition(node->position());                       \
-    }                                                            \
-    HOptimizedGraphBuilder::Visit##type(node);                   \
+#define DEF_VISIT(type)                               \
+  void Visit##type(type* node) OVERRIDE {             \
+    if (node->position() != RelocInfo::kNoPosition) { \
+      SetSourcePosition(node->position());            \
+    }                                                 \
+    HOptimizedGraphBuilder::Visit##type(node);        \
   }
   STATEMENT_NODE_LIST(DEF_VISIT)
 #undef DEF_VISIT
 
-#define DEF_VISIT(type)                                            \
-  virtual void Visit##type(type* node) OVERRIDE {               \
-    HOptimizedGraphBuilder::Visit##type(node);                     \
+#define DEF_VISIT(type)                        \
+  void Visit##type(type* node) OVERRIDE {      \
+    HOptimizedGraphBuilder::Visit##type(node); \
   }
   MODULE_NODE_LIST(DEF_VISIT)
   DECLARATION_NODE_LIST(DEF_VISIT)
@@ -342,9 +344,11 @@ OptimizedCompileJob::Status OptimizedCompileJob::CreateGraph() {
   DCHECK(info()->IsOptimizing());
   DCHECK(!info()->IsCompilingForDebugging());
 
-  // We should never arrive here if optimization has been disabled on the
-  // shared function info.
-  DCHECK(!info()->shared_info()->optimization_disabled());
+  // Optimization could have been disabled by the parser.
+  if (info()->shared_info()->optimization_disabled()) {
+    return AbortOptimization(
+        info()->shared_info()->disable_optimization_reason());
+  }
 
   // Do not use crankshaft if we need to be able to set break points.
   if (isolate()->DebuggerHasBreakPoints()) {
@@ -411,6 +415,12 @@ OptimizedCompileJob::Status OptimizedCompileJob::CreateGraph() {
   // Check the whitelist for TurboFan.
   if ((FLAG_turbo_asm && info()->shared_info()->asm_function()) ||
       info()->closure()->PassesFilter(FLAG_turbo_filter)) {
+    if (FLAG_trace_opt) {
+      OFStream os(stdout);
+      os << "[compiling method " << Brief(*info()->closure())
+         << " using TurboFan]" << std::endl;
+    }
+    Timer t(this, &time_taken_to_create_graph_);
     compiler::Pipeline pipeline(info());
     pipeline.GenerateCode();
     if (!info()->code().is_null()) {
@@ -418,10 +428,13 @@ OptimizedCompileJob::Status OptimizedCompileJob::CreateGraph() {
     }
   }
 
+  if (FLAG_trace_opt) {
+    OFStream os(stdout);
+    os << "[compiling method " << Brief(*info()->closure())
+       << " using Crankshaft]" << std::endl;
+  }
+
   if (FLAG_trace_hydrogen) {
-    Handle<String> name = info()->function()->debug_name();
-    PrintF("-----------------------------------------------------------\n");
-    PrintF("Compiling method %s using hydrogen\n", name->ToCString().get());
     isolate()->GetHTracer()->TraceCompilation(info());
   }
 
@@ -566,18 +579,24 @@ void SetExpectedNofPropertiesFromEstimate(Handle<SharedFunctionInfo> shared,
   // the estimate conservatively.
   if (shared->GetIsolate()->serializer_enabled()) {
     estimate += 2;
-  } else if (FLAG_clever_optimizations) {
+  } else {
     // Inobject slack tracking will reclaim redundant inobject space later,
     // so we can afford to adjust the estimate generously.
     estimate += 8;
-  } else {
-    estimate += 3;
   }
 
   shared->set_expected_nof_properties(estimate);
 }
 
 
+static void MaybeDisableOptimization(Handle<SharedFunctionInfo> shared_info,
+                                     BailoutReason bailout_reason) {
+  if (bailout_reason != kNoReason) {
+    shared_info->DisableOptimization(bailout_reason);
+  }
+}
+
+
 // Sets the function info on a function.
 // The start_position points to the first '(' character after the function name
 // in the full script source. When counting characters in the script source the
@@ -604,9 +623,12 @@ static void SetFunctionInfo(Handle<SharedFunctionInfo> function_info,
   function_info->set_has_duplicate_parameters(lit->has_duplicate_parameters());
   function_info->set_ast_node_count(lit->ast_node_count());
   function_info->set_is_function(lit->is_function());
-  function_info->set_bailout_reason(lit->dont_optimize_reason());
+  MaybeDisableOptimization(function_info, lit->dont_optimize_reason());
   function_info->set_dont_cache(lit->flags()->Contains(kDontCache));
   function_info->set_kind(lit->kind());
+  function_info->set_uses_super_property(lit->uses_super_property());
+  function_info->set_uses_super_constructor_call(
+      lit->uses_super_constructor_call());
   function_info->set_asm_function(lit->scope()->asm_function());
 }
 
@@ -667,7 +689,7 @@ MUST_USE_RESULT static MaybeHandle<Code> GetUnoptimizedCodeCommon(
   FunctionLiteral* lit = info->function();
   shared->set_strict_mode(lit->strict_mode());
   SetExpectedNofPropertiesFromEstimate(shared, lit->expected_property_count());
-  shared->set_bailout_reason(lit->dont_optimize_reason());
+  MaybeDisableOptimization(shared, lit->dont_optimize_reason());
 
   // Compile unoptimized code.
   if (!CompileUnoptimizedCode(info)) return MaybeHandle<Code>();
@@ -740,18 +762,92 @@ static void InsertCodeIntoOptimizedCodeMap(CompilationInfo* info) {
 static bool Renumber(CompilationInfo* info) {
   if (!AstNumbering::Renumber(info->function(), info->zone())) return false;
   if (!info->shared_info().is_null()) {
-    info->shared_info()->set_ast_node_count(info->function()->ast_node_count());
+    FunctionLiteral* lit = info->function();
+    info->shared_info()->set_ast_node_count(lit->ast_node_count());
+    MaybeDisableOptimization(info->shared_info(), lit->dont_optimize_reason());
+    info->shared_info()->set_dont_cache(lit->flags()->Contains(kDontCache));
   }
   return true;
 }
 
 
+static void ThrowSuperConstructorCheckError(CompilationInfo* info,
+                                            Statement* stmt) {
+  MaybeHandle<Object> obj = info->isolate()->factory()->NewTypeError(
+      "super_constructor_call", HandleVector<Object>(nullptr, 0));
+  Handle<Object> exception;
+  if (!obj.ToHandle(&exception)) return;
+
+  MessageLocation location(info->script(), stmt->position(), stmt->position());
+  USE(info->isolate()->Throw(*exception, &location));
+}
+
+
+static bool CheckSuperConstructorCall(CompilationInfo* info) {
+  FunctionLiteral* function = info->function();
+  if (!function->uses_super_constructor_call()) return true;
+
+  if (function->is_default_constructor()) return true;
+
+  ZoneList<Statement*>* body = function->body();
+  CHECK(body->length() > 0);
+
+  int super_call_index = 0;
+  // Allow 'use strict' and similiar and empty statements.
+  while (true) {
+    CHECK(super_call_index < body->length());  // We know there is a super call.
+    Statement* stmt = body->at(super_call_index);
+    if (stmt->IsExpressionStatement() &&
+        stmt->AsExpressionStatement()->expression()->IsLiteral()) {
+      super_call_index++;
+      continue;
+    }
+    if (stmt->IsEmptyStatement()) {
+      super_call_index++;
+      continue;
+    }
+    break;
+  }
+
+  Statement* stmt = body->at(super_call_index);
+  ExpressionStatement* exprStm = stmt->AsExpressionStatement();
+  if (exprStm == nullptr) {
+    ThrowSuperConstructorCheckError(info, stmt);
+    return false;
+  }
+  Call* callExpr = exprStm->expression()->AsCall();
+  if (callExpr == nullptr) {
+    ThrowSuperConstructorCheckError(info, stmt);
+    return false;
+  }
+
+  if (!callExpr->expression()->IsSuperReference()) {
+    ThrowSuperConstructorCheckError(info, stmt);
+    return false;
+  }
+
+  ZoneList<Expression*>* arguments = callExpr->arguments();
+
+  AstThisAccessVisitor this_access_visitor(info->zone());
+  this_access_visitor.VisitExpressions(arguments);
+
+  if (this_access_visitor.HasStackOverflow()) return false;
+  if (this_access_visitor.UsesThis()) {
+    ThrowSuperConstructorCheckError(info, stmt);
+    return false;
+  }
+
+  return true;
+}
+
+
 bool Compiler::Analyze(CompilationInfo* info) {
   DCHECK(info->function() != NULL);
   if (!Rewriter::Rewrite(info)) return false;
   if (!Scope::Analyze(info)) return false;
   if (!Renumber(info)) return false;
   DCHECK(info->scope() != NULL);
+  if (!CheckSuperConstructorCall(info)) return false;
   return true;
 }
 
@@ -784,11 +880,6 @@ static bool GetOptimizedCodeNow(CompilationInfo* info) {
   InsertCodeIntoOptimizedCodeMap(info);
   RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info,
                             info->shared_info());
-  if (FLAG_trace_opt) {
-    PrintF("[completed optimizing ");
-    info->closure()->ShortPrint();
-    PrintF("]\n");
-  }
   return true;
 }
 
@@ -857,12 +948,8 @@ MaybeHandle<Code> Compiler::GetLazyCode(Handle<JSFunction> function) {
     VMState<COMPILER> state(isolate);
     PostponeInterruptsScope postpone(isolate);
 
-    info.SetOptimizing(BailoutId::None(),
-                       Handle<Code>(function->shared()->code()));
-
+    info.SetOptimizing(BailoutId::None(), handle(function->shared()->code()));
     info.MarkAsContextSpecializing();
-    info.MarkAsTypingEnabled();
-    info.MarkAsInliningDisabled();
 
     if (GetOptimizedCodeNow(&info)) {
       DCHECK(function->shared()->is_compiled());
@@ -927,16 +1014,23 @@ bool Compiler::EnsureDeoptimizationSupport(CompilationInfo* info) {
   DCHECK(info->function() != NULL);
   DCHECK(info->scope() != NULL);
   if (!info->shared_info()->has_deoptimization_support()) {
-    CompilationInfoWithZone unoptimized(info->shared_info());
+    Handle<SharedFunctionInfo> shared = info->shared_info();
+    CompilationInfoWithZone unoptimized(shared);
     // Note that we use the same AST that we will use for generating the
     // optimized code.
     unoptimized.SetFunction(info->function());
     unoptimized.PrepareForCompilation(info->scope());
     unoptimized.SetContext(info->context());
     unoptimized.EnableDeoptimizationSupport();
+    // If the current code has reloc info for serialization, also include
+    // reloc info for serialization for the new code, so that deopt support
+    // can be added without losing IC state.
+    if (shared->code()->kind() == Code::FUNCTION &&
+        shared->code()->has_reloc_info_for_serialization()) {
+      unoptimized.PrepareForSerializing();
+    }
     if (!FullCodeGenerator::MakeCode(&unoptimized)) return false;
 
-    Handle<SharedFunctionInfo> shared = info->shared_info();
     shared->EnableDeoptimizationSupport(*unoptimized.code());
     shared->set_feedback_vector(*unoptimized.feedback_vector());
 
@@ -1167,19 +1261,20 @@ Handle<SharedFunctionInfo> Compiler::CompileScript(
     int column_offset, bool is_shared_cross_origin, Handle<Context> context,
     v8::Extension* extension, ScriptData** cached_data,
     ScriptCompiler::CompileOptions compile_options, NativesFlag natives) {
+  Isolate* isolate = source->GetIsolate();
   if (compile_options == ScriptCompiler::kNoCompileOptions) {
     cached_data = NULL;
   } else if (compile_options == ScriptCompiler::kProduceParserCache ||
              compile_options == ScriptCompiler::kProduceCodeCache) {
     DCHECK(cached_data && !*cached_data);
     DCHECK(extension == NULL);
+    DCHECK(!isolate->debug()->is_loaded());
   } else {
     DCHECK(compile_options == ScriptCompiler::kConsumeParserCache ||
            compile_options == ScriptCompiler::kConsumeCodeCache);
     DCHECK(cached_data && *cached_data);
     DCHECK(extension == NULL);
   }
-  Isolate* isolate = source->GetIsolate();
   int source_length = source->length();
   isolate->counters()->total_load_size()->Increment(source_length);
   isolate->counters()->total_compile_size()->Increment(source_length);
@@ -1243,10 +1338,7 @@ Handle<SharedFunctionInfo> Compiler::CompileScript(
     result = CompileToplevel(&info);
     if (extension == NULL && !result.is_null() && !result->dont_cache()) {
       compilation_cache->PutScript(source, context, result);
-      // TODO(yangguo): Issue 3628
-      // With block scoping, top-level variables may resolve to a global,
-      // context, which makes the code context-dependent.
-      if (FLAG_serialize_toplevel && !FLAG_harmony_scoping &&
+      if (FLAG_serialize_toplevel &&
           compile_options == ScriptCompiler::kProduceCodeCache) {
         HistogramTimerScope histogram_timer(
             isolate->counters()->compile_serialize());
@@ -1305,10 +1397,10 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(
   bool allow_lazy = literal->AllowsLazyCompilation() &&
       !DebuggerWantsEagerCompilation(&info, allow_lazy_without_ctx);
 
-
   if (outer_info->is_toplevel() && outer_info->will_serialize()) {
     // Make sure that if the toplevel code (possibly to be serialized),
     // the inner function must be allowed to be compiled lazily.
+    // This is necessary to serialize toplevel code without inner functions.
     DCHECK(allow_lazy);
   }
 
@@ -1317,8 +1409,18 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(
   if (FLAG_lazy && allow_lazy && !literal->is_parenthesized()) {
     Handle<Code> code = isolate->builtins()->CompileLazy();
     info.SetCode(code);
+    // There's no need in theory for a lazy-compiled function to have a type
+    // feedback vector, but some parts of the system expect all
+    // SharedFunctionInfo instances to have one.  The size of the vector depends
+    // on how many feedback-needing nodes are in the tree, and when lazily
+    // parsing we might not know that, if this function was never parsed before.
+    // In that case the vector will be replaced the next time MakeCode is
+    // called.
+    info.EnsureFeedbackVector();
     scope_info = Handle<ScopeInfo>(ScopeInfo::Empty(isolate));
   } else if (Renumber(&info) && FullCodeGenerator::MakeCode(&info)) {
+    // MakeCode will ensure that the feedback vector is present and
+    // appropriately sized.
     DCHECK(!info.code().is_null());
     scope_info = ScopeInfo::Create(info.scope(), info.zone());
   } else {
@@ -1357,6 +1459,7 @@ MaybeHandle<Code> Compiler::GetOptimizedCode(Handle<JSFunction> function,
   Isolate* isolate = info->isolate();
   DCHECK(AllowCompilation::IsAllowed(isolate));
   VMState<COMPILER> state(isolate);
+  DCHECK(isolate->use_crankshaft());
   DCHECK(!isolate->has_pending_exception());
   PostponeInterruptsScope postpone(isolate);
 
index 2cacb40..3db32ce 100644 (file)
@@ -39,6 +39,9 @@ class ScriptData {
 
   const byte* data() const { return data_; }
   int length() const { return length_; }
+  bool rejected() const { return rejected_; }
+
+  void Reject() { rejected_ = true; }
 
   void AcquireDataOwnership() {
     DCHECK(!owns_data_);
@@ -51,7 +54,8 @@ class ScriptData {
   }
 
  private:
-  bool owns_data_;
+  bool owns_data_ : 1;
+  bool rejected_ : 1;
   const byte* data_;
   int length_;
 
@@ -105,7 +109,7 @@ class CompilationInfo {
   }
   FunctionLiteral* function() const { return function_; }
   Scope* scope() const { return scope_; }
-  Scope* global_scope() const { return global_scope_; }
+  Scope* script_scope() const { return script_scope_; }
   Handle<Code> code() const { return code_; }
   Handle<JSFunction> closure() const { return closure_; }
   Handle<SharedFunctionInfo> shared_info() const { return shared_info_; }
@@ -200,8 +204,6 @@ class CompilationInfo {
 
   void MarkAsInliningEnabled() { SetFlag(kInliningEnabled); }
 
-  void MarkAsInliningDisabled() { SetFlag(kInliningEnabled, false); }
-
   bool is_inlining_enabled() const { return GetFlag(kInliningEnabled); }
 
   void MarkAsTypingEnabled() { SetFlag(kTypingEnabled); }
@@ -231,10 +233,11 @@ class CompilationInfo {
     function_ = literal;
   }
   void PrepareForCompilation(Scope* scope);
-  void SetGlobalScope(Scope* global_scope) {
-    DCHECK(global_scope_ == NULL);
-    global_scope_ = global_scope;
+  void SetScriptScope(Scope* script_scope) {
+    DCHECK(script_scope_ == NULL);
+    script_scope_ = script_scope;
   }
+  void EnsureFeedbackVector();
   Handle<TypeFeedbackVector> feedback_vector() const {
     return feedback_vector_;
   }
@@ -441,8 +444,8 @@ class CompilationInfo {
   // The scope of the function literal as a convenience.  Set to indicate
   // that scopes have been analyzed.
   Scope* scope_;
-  // The global scope provided as a convenience.
-  Scope* global_scope_;
+  // The script scope provided as a convenience.
+  Scope* script_scope_;
   // For compiled stubs, the stub object
   HydrogenCodeStub* code_stub_;
   // The compiled code.
@@ -460,7 +463,7 @@ class CompilationInfo {
   ScriptData** cached_data_;
   ScriptCompiler::CompileOptions compile_options_;
 
-  // The context of the caller for eval code, and the global context for a
+  // The context of the caller for eval code, and the script context for a
   // global script. Will be a null handle otherwise.
   Handle<Context> context_;
 
index 583bd3a..8c8e530 100644 (file)
@@ -40,21 +40,21 @@ FieldAccess AccessBuilder::ForJSFunctionContext() {
 // static
 FieldAccess AccessBuilder::ForJSArrayBufferBackingStore() {
   return {kTaggedBase, JSArrayBuffer::kBackingStoreOffset, MaybeHandle<Name>(),
-          Type::UntaggedPtr(), kMachPtr};
+          Type::UntaggedPointer(), kMachPtr};
 }
 
 
 // static
 FieldAccess AccessBuilder::ForExternalArrayPointer() {
   return {kTaggedBase, ExternalArray::kExternalPointerOffset,
-          MaybeHandle<Name>(), Type::UntaggedPtr(), kMachPtr};
+          MaybeHandle<Name>(), Type::UntaggedPointer(), kMachPtr};
 }
 
 
 // static
 FieldAccess AccessBuilder::ForMapInstanceType() {
   return {kTaggedBase, Map::kInstanceTypeOffset, Handle<Name>(),
-          Type::UntaggedInt8(), kMachUint8};
+          Type::UntaggedUnsigned8(), kMachUint8};
 }
 
 
@@ -73,9 +73,17 @@ FieldAccess AccessBuilder::ForValue() {
 
 
 // static
+FieldAccess AccessBuilder::ForContextSlot(size_t index) {
+  int offset = Context::kHeaderSize + static_cast<int>(index) * kPointerSize;
+  DCHECK_EQ(offset,
+            Context::SlotOffset(static_cast<int>(index)) + kHeapObjectTag);
+  return {kTaggedBase, offset, Handle<Name>(), Type::Any(), kMachAnyTagged};
+}
+
+
+// static
 ElementAccess AccessBuilder::ForFixedArrayElement() {
-  return {kNoBoundsCheck, kTaggedBase, FixedArray::kHeaderSize, Type::Any(),
-          kMachAnyTagged};
+  return {kTaggedBase, FixedArray::kHeaderSize, Type::Any(), kMachAnyTagged};
 }
 
 
@@ -86,33 +94,25 @@ ElementAccess AccessBuilder::ForTypedArrayElement(ExternalArrayType type,
   int header_size = is_external ? 0 : FixedTypedArrayBase::kDataOffset;
   switch (type) {
     case kExternalInt8Array:
-      return {kTypedArrayBoundsCheck, taggedness, header_size, Type::Signed32(),
-              kMachInt8};
+      return {taggedness, header_size, Type::Signed32(), kMachInt8};
     case kExternalUint8Array:
     case kExternalUint8ClampedArray:
-      return {kTypedArrayBoundsCheck, taggedness, header_size,
-              Type::Unsigned32(), kMachUint8};
+      return {taggedness, header_size, Type::Unsigned32(), kMachUint8};
     case kExternalInt16Array:
-      return {kTypedArrayBoundsCheck, taggedness, header_size, Type::Signed32(),
-              kMachInt16};
+      return {taggedness, header_size, Type::Signed32(), kMachInt16};
     case kExternalUint16Array:
-      return {kTypedArrayBoundsCheck, taggedness, header_size,
-              Type::Unsigned32(), kMachUint16};
+      return {taggedness, header_size, Type::Unsigned32(), kMachUint16};
     case kExternalInt32Array:
-      return {kTypedArrayBoundsCheck, taggedness, header_size, Type::Signed32(),
-              kMachInt32};
+      return {taggedness, header_size, Type::Signed32(), kMachInt32};
     case kExternalUint32Array:
-      return {kTypedArrayBoundsCheck, taggedness, header_size,
-              Type::Unsigned32(), kMachUint32};
+      return {taggedness, header_size, Type::Unsigned32(), kMachUint32};
     case kExternalFloat32Array:
-      return {kTypedArrayBoundsCheck, taggedness, header_size, Type::Number(),
-              kMachFloat32};
+      return {taggedness, header_size, Type::Number(), kMachFloat32};
     case kExternalFloat64Array:
-      return {kTypedArrayBoundsCheck, taggedness, header_size, Type::Number(),
-              kMachFloat64};
+      return {taggedness, header_size, Type::Number(), kMachFloat64};
   }
   UNREACHABLE();
-  return {kTypedArrayBoundsCheck, kUntaggedBase, 0, Type::None(), kMachNone};
+  return {kUntaggedBase, 0, Type::None(), kMachNone};
 }
 
 }  // namespace compiler
index 4c22efa..d6385e4 100644 (file)
@@ -43,6 +43,9 @@ class AccessBuilder FINAL : public AllStatic {
   // Provides access to JSValue::value() field.
   static FieldAccess ForValue();
 
+  // Provides access Context slots.
+  static FieldAccess ForContextSlot(size_t index);
+
   // Provides access to FixedArray elements.
   static ElementAccess ForFixedArrayElement();
 
index 3433765..cfa4de9 100644 (file)
@@ -77,6 +77,7 @@ class ArmOperandConverter FINAL : public InstructionOperandConverter {
       case Constant::kInt64:
       case Constant::kExternalReference:
       case Constant::kHeapObject:
+      case Constant::kRpoNumber:
         break;
     }
     UNREACHABLE();
@@ -141,9 +142,8 @@ class ArmOperandConverter FINAL : public InstructionOperandConverter {
     return MemOperand(r0);
   }
 
-  MemOperand InputOffset() {
-    int index = 0;
-    return InputOffset(&index);
+  MemOperand InputOffset(int first_index = 0) {
+    return InputOffset(&first_index);
   }
 
   MemOperand ToMemOperand(InstructionOperand* op) const {
@@ -158,6 +158,112 @@ class ArmOperandConverter FINAL : public InstructionOperandConverter {
 };
 
 
+namespace {
+
+class OutOfLineLoadFloat32 FINAL : public OutOfLineCode {
+ public:
+  OutOfLineLoadFloat32(CodeGenerator* gen, SwVfpRegister result)
+      : OutOfLineCode(gen), result_(result) {}
+
+  void Generate() FINAL {
+    __ vmov(result_, std::numeric_limits<float>::quiet_NaN());
+  }
+
+ private:
+  SwVfpRegister const result_;
+};
+
+
+class OutOfLineLoadFloat64 FINAL : public OutOfLineCode {
+ public:
+  OutOfLineLoadFloat64(CodeGenerator* gen, DwVfpRegister result)
+      : OutOfLineCode(gen), result_(result) {}
+
+  void Generate() FINAL {
+    __ vmov(result_, std::numeric_limits<double>::quiet_NaN(), kScratchReg);
+  }
+
+ private:
+  DwVfpRegister const result_;
+};
+
+
+class OutOfLineLoadInteger FINAL : public OutOfLineCode {
+ public:
+  OutOfLineLoadInteger(CodeGenerator* gen, Register result)
+      : OutOfLineCode(gen), result_(result) {}
+
+  void Generate() FINAL { __ mov(result_, Operand::Zero()); }
+
+ private:
+  Register const result_;
+};
+
+}  // namespace
+
+
+#define ASSEMBLE_CHECKED_LOAD_FLOAT(width)                           \
+  do {                                                               \
+    auto result = i.OutputFloat##width##Register();                  \
+    auto offset = i.InputRegister(0);                                \
+    if (instr->InputAt(1)->IsRegister()) {                           \
+      __ cmp(offset, i.InputRegister(1));                            \
+    } else {                                                         \
+      __ cmp(offset, i.InputImmediate(1));                           \
+    }                                                                \
+    auto ool = new (zone()) OutOfLineLoadFloat##width(this, result); \
+    __ b(hs, ool->entry());                                          \
+    __ vldr(result, i.InputOffset(2));                               \
+    __ bind(ool->exit());                                            \
+    DCHECK_EQ(LeaveCC, i.OutputSBit());                              \
+  } while (0)
+
+
+#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr)                \
+  do {                                                          \
+    auto result = i.OutputRegister();                           \
+    auto offset = i.InputRegister(0);                           \
+    if (instr->InputAt(1)->IsRegister()) {                      \
+      __ cmp(offset, i.InputRegister(1));                       \
+    } else {                                                    \
+      __ cmp(offset, i.InputImmediate(1));                      \
+    }                                                           \
+    auto ool = new (zone()) OutOfLineLoadInteger(this, result); \
+    __ b(hs, ool->entry());                                     \
+    __ asm_instr(result, i.InputOffset(2));                     \
+    __ bind(ool->exit());                                       \
+    DCHECK_EQ(LeaveCC, i.OutputSBit());                         \
+  } while (0)
+
+
+#define ASSEMBLE_CHECKED_STORE_FLOAT(width)        \
+  do {                                             \
+    auto offset = i.InputRegister(0);              \
+    if (instr->InputAt(1)->IsRegister()) {         \
+      __ cmp(offset, i.InputRegister(1));          \
+    } else {                                       \
+      __ cmp(offset, i.InputImmediate(1));         \
+    }                                              \
+    auto value = i.InputFloat##width##Register(2); \
+    __ vstr(value, i.InputOffset(3), lo);          \
+    DCHECK_EQ(LeaveCC, i.OutputSBit());            \
+  } while (0)
+
+
+#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr) \
+  do {                                            \
+    auto offset = i.InputRegister(0);             \
+    if (instr->InputAt(1)->IsRegister()) {        \
+      __ cmp(offset, i.InputRegister(1));         \
+    } else {                                      \
+      __ cmp(offset, i.InputImmediate(1));        \
+    }                                             \
+    auto value = i.InputRegister(2);              \
+    __ asm_instr(value, i.InputOffset(3), lo);    \
+    DCHECK_EQ(LeaveCC, i.OutputSBit());           \
+  } while (0)
+
+
 // Assembles an instruction after register allocation, producing machine code.
 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
   ArmOperandConverter i(this, instr);
@@ -193,7 +299,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
       break;
     }
     case kArchJmp:
-      __ b(GetLabel(i.InputRpo(0)));
+      AssembleArchJump(i.InputRpo(0));
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     case kArchNop:
@@ -299,6 +405,42 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     }
+    case kArmSxtb:
+      __ sxtb(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1));
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArmSxth:
+      __ sxth(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1));
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArmSxtab:
+      __ sxtab(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+               i.InputInt32(2));
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArmSxtah:
+      __ sxtah(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+               i.InputInt32(2));
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArmUxtb:
+      __ uxtb(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1));
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArmUxth:
+      __ uxth(i.OutputRegister(), i.InputRegister(0), i.InputInt32(1));
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArmUxtab:
+      __ uxtab(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+               i.InputInt32(2));
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
+    case kArmUxtah:
+      __ uxtah(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1),
+               i.InputInt32(2));
+      DCHECK_EQ(LeaveCC, i.OutputSBit());
+      break;
     case kArmCmp:
       __ cmp(i.InputRegister(0), i.InputOperand2(1));
       DCHECK_EQ(SetCC, i.OutputSBit());
@@ -498,35 +640,62 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
       DCHECK_EQ(LeaveCC, i.OutputSBit());
       break;
     }
+    case kCheckedLoadInt8:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(ldrsb);
+      break;
+    case kCheckedLoadUint8:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(ldrb);
+      break;
+    case kCheckedLoadInt16:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(ldrsh);
+      break;
+    case kCheckedLoadUint16:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(ldrh);
+      break;
+    case kCheckedLoadWord32:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(ldr);
+      break;
+    case kCheckedLoadFloat32:
+      ASSEMBLE_CHECKED_LOAD_FLOAT(32);
+      break;
+    case kCheckedLoadFloat64:
+      ASSEMBLE_CHECKED_LOAD_FLOAT(64);
+      break;
+    case kCheckedStoreWord8:
+      ASSEMBLE_CHECKED_STORE_INTEGER(strb);
+      break;
+    case kCheckedStoreWord16:
+      ASSEMBLE_CHECKED_STORE_INTEGER(strh);
+      break;
+    case kCheckedStoreWord32:
+      ASSEMBLE_CHECKED_STORE_INTEGER(str);
+      break;
+    case kCheckedStoreFloat32:
+      ASSEMBLE_CHECKED_STORE_FLOAT(32);
+      break;
+    case kCheckedStoreFloat64:
+      ASSEMBLE_CHECKED_STORE_FLOAT(64);
+      break;
   }
 }
 
 
 // Assembles branches after an instruction.
-void CodeGenerator::AssembleArchBranch(Instruction* instr,
-                                       FlagsCondition condition) {
+void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
   ArmOperandConverter i(this, instr);
-  Label done;
-
-  // Emit a branch. The true and false targets are always the last two inputs
-  // to the instruction.
-  BasicBlock::RpoNumber tblock =
-      i.InputRpo(static_cast<int>(instr->InputCount()) - 2);
-  BasicBlock::RpoNumber fblock =
-      i.InputRpo(static_cast<int>(instr->InputCount()) - 1);
-  bool fallthru = IsNextInAssemblyOrder(fblock);
-  Label* tlabel = GetLabel(tblock);
-  Label* flabel = fallthru ? &done : GetLabel(fblock);
-  switch (condition) {
+  Label* tlabel = branch->true_label;
+  Label* flabel = branch->false_label;
+  switch (branch->condition) {
     case kUnorderedEqual:
-      __ b(vs, flabel);
-    // Fall through.
+      // The "eq" condition will not catch the unordered case.
+      // The jump/fall through to false label will be used if the comparison
+      // was unordered.
     case kEqual:
       __ b(eq, tlabel);
       break;
     case kUnorderedNotEqual:
-      __ b(vs, tlabel);
-    // Fall through.
+      // Unordered or not equal can be tested with "ne" condtion.
+      // See ARMv7 manual A8.3 - Conditional execution.
     case kNotEqual:
       __ b(ne, tlabel);
       break;
@@ -543,26 +712,28 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr,
       __ b(gt, tlabel);
       break;
     case kUnorderedLessThan:
-      __ b(vs, flabel);
-    // Fall through.
+      // The "lo" condition will not catch the unordered case.
+      // The jump/fall through to false label will be used if the comparison
+      // was unordered.
     case kUnsignedLessThan:
       __ b(lo, tlabel);
       break;
     case kUnorderedGreaterThanOrEqual:
-      __ b(vs, tlabel);
-    // Fall through.
+      // Unordered, greater than or equal can be tested with "hs" condtion.
+      // See ARMv7 manual A8.3 - Conditional execution.
     case kUnsignedGreaterThanOrEqual:
       __ b(hs, tlabel);
       break;
     case kUnorderedLessThanOrEqual:
-      __ b(vs, flabel);
-    // Fall through.
+      // The "ls" condition will not catch the unordered case.
+      // The jump/fall through to false label will be used if the comparison
+      // was unordered.
     case kUnsignedLessThanOrEqual:
       __ b(ls, tlabel);
       break;
     case kUnorderedGreaterThan:
-      __ b(vs, tlabel);
-    // Fall through.
+      // Unordered or greater than can be tested with "hi" condtion.
+      // See ARMv7 manual A8.3 - Conditional execution.
     case kUnsignedGreaterThan:
       __ b(hi, tlabel);
       break;
@@ -573,8 +744,12 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr,
       __ b(vc, tlabel);
       break;
   }
-  if (!fallthru) __ b(flabel);  // no fallthru to flabel.
-  __ bind(&done);
+  if (!branch->fallthru) __ b(flabel);  // no fallthru to flabel.
+}
+
+
+void CodeGenerator::AssembleArchJump(BasicBlock::RpoNumber target) {
+  if (!IsNextInAssemblyOrder(target)) __ b(GetLabel(target));
 }
 
 
@@ -702,24 +877,6 @@ void CodeGenerator::AssemblePrologue() {
     __ Prologue(info->IsCodePreAgingActive());
     frame()->SetRegisterSaveAreaSize(
         StandardFrameConstants::kFixedFrameSizeFromFp);
-
-    // Sloppy mode functions and builtins need to replace the receiver with the
-    // global proxy when called as functions (without an explicit receiver
-    // object).
-    // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
-    if (info->strict_mode() == SLOPPY && !info->is_native()) {
-      Label ok;
-      // +2 for return address and saved frame pointer.
-      int receiver_slot = info->scope()->num_parameters() + 2;
-      __ ldr(r2, MemOperand(fp, receiver_slot * kPointerSize));
-      __ CompareRoot(r2, Heap::kUndefinedValueRootIndex);
-      __ b(ne, &ok);
-      __ ldr(r2, GlobalObjectOperand());
-      __ ldr(r2, FieldMemOperand(r2, GlobalObject::kGlobalProxyOffset));
-      __ str(r2, MemOperand(fp, receiver_slot * kPointerSize));
-      __ bind(&ok);
-    }
-
   } else {
     __ StubPrologue();
     frame()->SetRegisterSaveAreaSize(
@@ -809,24 +966,26 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
         case Constant::kHeapObject:
           __ Move(dst, src.ToHeapObject());
           break;
+        case Constant::kRpoNumber:
+          UNREACHABLE();  // TODO(dcarney): loading RPO constants on arm.
+          break;
       }
       if (destination->IsStackSlot()) __ str(dst, g.ToMemOperand(destination));
     } else if (src.type() == Constant::kFloat32) {
-      SwVfpRegister dst = destination->IsDoubleRegister()
-                              ? g.ToFloat32Register(destination)
-                              : kScratchDoubleReg.low();
-      // TODO(turbofan): Can we do better here?
-      __ mov(ip, Operand(bit_cast<int32_t>(src.ToFloat32())));
-      __ vmov(dst, ip);
       if (destination->IsDoubleStackSlot()) {
-        __ vstr(dst, g.ToMemOperand(destination));
+        MemOperand dst = g.ToMemOperand(destination);
+        __ mov(ip, Operand(bit_cast<int32_t>(src.ToFloat32())));
+        __ str(ip, dst);
+      } else {
+        SwVfpRegister dst = g.ToFloat32Register(destination);
+        __ vmov(dst, src.ToFloat32());
       }
     } else {
       DCHECK_EQ(Constant::kFloat64, src.type());
       DwVfpRegister dst = destination->IsDoubleRegister()
                               ? g.ToFloat64Register(destination)
                               : kScratchDoubleReg;
-      __ vmov(dst, src.ToFloat64());
+      __ vmov(dst, src.ToFloat64(), kScratchReg);
       if (destination->IsDoubleStackSlot()) {
         __ vstr(dst, g.ToMemOperand(destination));
       }
index c48369e..ecd0b2d 100644 (file)
@@ -35,6 +35,14 @@ namespace compiler {
   V(ArmMvn)                        \
   V(ArmBfc)                        \
   V(ArmUbfx)                       \
+  V(ArmSxtb)                       \
+  V(ArmSxth)                       \
+  V(ArmSxtab)                      \
+  V(ArmSxtah)                      \
+  V(ArmUxtb)                       \
+  V(ArmUxth)                       \
+  V(ArmUxtab)                      \
+  V(ArmUxtah)                      \
   V(ArmVcmpF64)                    \
   V(ArmVaddF64)                    \
   V(ArmVsubF64)                    \
index a071bbc..ef9e89e 100644 (file)
@@ -16,13 +16,6 @@ class ArmOperandGenerator : public OperandGenerator {
   explicit ArmOperandGenerator(InstructionSelector* selector)
       : OperandGenerator(selector) {}
 
-  InstructionOperand* UseOperand(Node* node, InstructionCode opcode) {
-    if (CanBeImmediate(node, opcode)) {
-      return UseImmediate(node);
-    }
-    return UseRegister(node);
-  }
-
   bool CanBeImmediate(int32_t value) const {
     return Assembler::ImmediateFitsAddrMode1Instruction(value);
   }
@@ -74,62 +67,26 @@ class ArmOperandGenerator : public OperandGenerator {
       case kArmStrh:
         return value >= -255 && value <= 255;
 
-      case kArchCallCodeObject:
-      case kArchCallJSFunction:
-      case kArchJmp:
-      case kArchNop:
-      case kArchRet:
-      case kArchStackPointer:
-      case kArchTruncateDoubleToI:
-      case kArmMul:
-      case kArmMla:
-      case kArmMls:
-      case kArmSmmul:
-      case kArmSmmla:
-      case kArmUmull:
-      case kArmSdiv:
-      case kArmUdiv:
-      case kArmBfc:
-      case kArmUbfx:
-      case kArmVcmpF64:
-      case kArmVaddF64:
-      case kArmVsubF64:
-      case kArmVmulF64:
-      case kArmVmlaF64:
-      case kArmVmlsF64:
-      case kArmVdivF64:
-      case kArmVmodF64:
-      case kArmVnegF64:
-      case kArmVsqrtF64:
-      case kArmVfloorF64:
-      case kArmVceilF64:
-      case kArmVroundTruncateF64:
-      case kArmVroundTiesAwayF64:
-      case kArmVcvtF32F64:
-      case kArmVcvtF64F32:
-      case kArmVcvtF64S32:
-      case kArmVcvtF64U32:
-      case kArmVcvtS32F64:
-      case kArmVcvtU32F64:
-      case kArmPush:
-        return false;
+      default:
+        break;
     }
-    UNREACHABLE();
     return false;
   }
 };
 
 
-static void VisitRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
-                           Node* node) {
+namespace {
+
+void VisitRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
+                    Node* node) {
   ArmOperandGenerator g(selector);
   selector->Emit(opcode, g.DefineAsRegister(node),
                  g.UseRegister(node->InputAt(0)));
 }
 
 
-static void VisitRRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
-                            Node* node) {
+void VisitRRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
+                     Node* node) {
   ArmOperandGenerator g(selector);
   selector->Emit(opcode, g.DefineAsRegister(node),
                  g.UseRegister(node->InputAt(0)),
@@ -137,86 +94,69 @@ static void VisitRRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
 }
 
 
-static bool TryMatchROR(InstructionSelector* selector,
-                        InstructionCode* opcode_return, Node* node,
-                        InstructionOperand** value_return,
-                        InstructionOperand** shift_return) {
+template <IrOpcode::Value kOpcode, int kImmMin, int kImmMax,
+          AddressingMode kImmMode, AddressingMode kRegMode>
+bool TryMatchShift(InstructionSelector* selector,
+                   InstructionCode* opcode_return, Node* node,
+                   InstructionOperand** value_return,
+                   InstructionOperand** shift_return) {
   ArmOperandGenerator g(selector);
-  if (node->opcode() != IrOpcode::kWord32Ror) return false;
-  Int32BinopMatcher m(node);
-  *value_return = g.UseRegister(m.left().node());
-  if (m.right().IsInRange(1, 31)) {
-    *opcode_return |= AddressingModeField::encode(kMode_Operand2_R_ROR_I);
-    *shift_return = g.UseImmediate(m.right().node());
-  } else {
-    *opcode_return |= AddressingModeField::encode(kMode_Operand2_R_ROR_R);
-    *shift_return = g.UseRegister(m.right().node());
+  if (node->opcode() == kOpcode) {
+    Int32BinopMatcher m(node);
+    *value_return = g.UseRegister(m.left().node());
+    if (m.right().IsInRange(kImmMin, kImmMax)) {
+      *opcode_return |= AddressingModeField::encode(kImmMode);
+      *shift_return = g.UseImmediate(m.right().node());
+    } else {
+      *opcode_return |= AddressingModeField::encode(kRegMode);
+      *shift_return = g.UseRegister(m.right().node());
+    }
+    return true;
   }
-  return true;
+  return false;
 }
 
 
-static inline bool TryMatchASR(InstructionSelector* selector,
-                               InstructionCode* opcode_return, Node* node,
-                               InstructionOperand** value_return,
-                               InstructionOperand** shift_return) {
-  ArmOperandGenerator g(selector);
-  if (node->opcode() != IrOpcode::kWord32Sar) return false;
-  Int32BinopMatcher m(node);
-  *value_return = g.UseRegister(m.left().node());
-  if (m.right().IsInRange(1, 32)) {
-    *opcode_return |= AddressingModeField::encode(kMode_Operand2_R_ASR_I);
-    *shift_return = g.UseImmediate(m.right().node());
-  } else {
-    *opcode_return |= AddressingModeField::encode(kMode_Operand2_R_ASR_R);
-    *shift_return = g.UseRegister(m.right().node());
-  }
-  return true;
+bool TryMatchROR(InstructionSelector* selector, InstructionCode* opcode_return,
+                 Node* node, InstructionOperand** value_return,
+                 InstructionOperand** shift_return) {
+  return TryMatchShift<IrOpcode::kWord32Ror, 1, 31, kMode_Operand2_R_ROR_I,
+                       kMode_Operand2_R_ROR_R>(selector, opcode_return, node,
+                                               value_return, shift_return);
 }
 
 
-static inline bool TryMatchLSL(InstructionSelector* selector,
-                               InstructionCode* opcode_return, Node* node,
-                               InstructionOperand** value_return,
-                               InstructionOperand** shift_return) {
-  ArmOperandGenerator g(selector);
-  if (node->opcode() != IrOpcode::kWord32Shl) return false;
-  Int32BinopMatcher m(node);
-  *value_return = g.UseRegister(m.left().node());
-  if (m.right().IsInRange(0, 31)) {
-    *opcode_return |= AddressingModeField::encode(kMode_Operand2_R_LSL_I);
-    *shift_return = g.UseImmediate(m.right().node());
-  } else {
-    *opcode_return |= AddressingModeField::encode(kMode_Operand2_R_LSL_R);
-    *shift_return = g.UseRegister(m.right().node());
-  }
-  return true;
+bool TryMatchASR(InstructionSelector* selector, InstructionCode* opcode_return,
+                 Node* node, InstructionOperand** value_return,
+                 InstructionOperand** shift_return) {
+  return TryMatchShift<IrOpcode::kWord32Sar, 1, 32, kMode_Operand2_R_ASR_I,
+                       kMode_Operand2_R_ASR_R>(selector, opcode_return, node,
+                                               value_return, shift_return);
 }
 
 
-static inline bool TryMatchLSR(InstructionSelector* selector,
-                               InstructionCode* opcode_return, Node* node,
-                               InstructionOperand** value_return,
-                               InstructionOperand** shift_return) {
-  ArmOperandGenerator g(selector);
-  if (node->opcode() != IrOpcode::kWord32Shr) return false;
-  Int32BinopMatcher m(node);
-  *value_return = g.UseRegister(m.left().node());
-  if (m.right().IsInRange(1, 32)) {
-    *opcode_return |= AddressingModeField::encode(kMode_Operand2_R_LSR_I);
-    *shift_return = g.UseImmediate(m.right().node());
-  } else {
-    *opcode_return |= AddressingModeField::encode(kMode_Operand2_R_LSR_R);
-    *shift_return = g.UseRegister(m.right().node());
-  }
-  return true;
+bool TryMatchLSL(InstructionSelector* selector, InstructionCode* opcode_return,
+                 Node* node, InstructionOperand** value_return,
+                 InstructionOperand** shift_return) {
+  return TryMatchShift<IrOpcode::kWord32Shl, 0, 31, kMode_Operand2_R_LSL_I,
+                       kMode_Operand2_R_LSL_R>(selector, opcode_return, node,
+                                               value_return, shift_return);
+}
+
+
+bool TryMatchLSR(InstructionSelector* selector, InstructionCode* opcode_return,
+                 Node* node, InstructionOperand** value_return,
+                 InstructionOperand** shift_return) {
+  return TryMatchShift<IrOpcode::kWord32Shr, 1, 32, kMode_Operand2_R_LSR_I,
+                       kMode_Operand2_R_LSR_R>(selector, opcode_return, node,
+                                               value_return, shift_return);
 }
 
 
-static inline bool TryMatchShift(InstructionSelector* selector,
-                                 InstructionCode* opcode_return, Node* node,
-                                 InstructionOperand** value_return,
-                                 InstructionOperand** shift_return) {
+bool TryMatchShift(InstructionSelector* selector,
+                   InstructionCode* opcode_return, Node* node,
+                   InstructionOperand** value_return,
+                   InstructionOperand** shift_return) {
   return (
       TryMatchASR(selector, opcode_return, node, value_return, shift_return) ||
       TryMatchLSL(selector, opcode_return, node, value_return, shift_return) ||
@@ -225,11 +165,10 @@ static inline bool TryMatchShift(InstructionSelector* selector,
 }
 
 
-static inline bool TryMatchImmediateOrShift(InstructionSelector* selector,
-                                            InstructionCode* opcode_return,
-                                            Node* node,
-                                            size_t* input_count_return,
-                                            InstructionOperand** inputs) {
+bool TryMatchImmediateOrShift(InstructionSelector* selector,
+                              InstructionCode* opcode_return, Node* node,
+                              size_t* input_count_return,
+                              InstructionOperand** inputs) {
   ArmOperandGenerator g(selector);
   if (g.CanBeImmediate(node, *opcode_return)) {
     *opcode_return |= AddressingModeField::encode(kMode_Operand2_I);
@@ -245,9 +184,9 @@ static inline bool TryMatchImmediateOrShift(InstructionSelector* selector,
 }
 
 
-static void VisitBinop(InstructionSelector* selector, Node* node,
-                       InstructionCode opcode, InstructionCode reverse_opcode,
-                       FlagsContinuation* cont) {
+void VisitBinop(InstructionSelector* selector, Node* node,
+                InstructionCode opcode, InstructionCode reverse_opcode,
+                FlagsContinuation* cont) {
   ArmOperandGenerator g(selector);
   Int32BinopMatcher m(node);
   InstructionOperand* inputs[5];
@@ -255,8 +194,20 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
   InstructionOperand* outputs[2];
   size_t output_count = 0;
 
-  if (TryMatchImmediateOrShift(selector, &opcode, m.right().node(),
-                               &input_count, &inputs[1])) {
+  if (m.left().node() == m.right().node()) {
+    // If both inputs refer to the same operand, enforce allocating a register
+    // for both of them to ensure that we don't end up generating code like
+    // this:
+    //
+    //   mov r0, r1, asr #16
+    //   adds r0, r0, r1, asr #16
+    //   bvs label
+    InstructionOperand* const input = g.UseRegister(m.left().node());
+    opcode |= AddressingModeField::encode(kMode_Operand2_R);
+    inputs[input_count++] = input;
+    inputs[input_count++] = input;
+  } else if (TryMatchImmediateOrShift(selector, &opcode, m.right().node(),
+                                      &input_count, &inputs[1])) {
     inputs[0] = g.UseRegister(m.left().node());
     input_count++;
   } else if (TryMatchImmediateOrShift(selector, &reverse_opcode,
@@ -293,13 +244,16 @@ static void VisitBinop(InstructionSelector* selector, Node* node,
 }
 
 
-static void VisitBinop(InstructionSelector* selector, Node* node,
-                       InstructionCode opcode, InstructionCode reverse_opcode) {
+void VisitBinop(InstructionSelector* selector, Node* node,
+                InstructionCode opcode, InstructionCode reverse_opcode) {
   FlagsContinuation cont;
   VisitBinop(selector, node, opcode, reverse_opcode, &cont);
 }
 
 
+}  // namespace
+
+
 void InstructionSelector::VisitLoad(Node* node) {
   MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
   MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
@@ -396,8 +350,86 @@ void InstructionSelector::VisitStore(Node* node) {
 }
 
 
-static inline void EmitBic(InstructionSelector* selector, Node* node,
-                           Node* left, Node* right) {
+void InstructionSelector::VisitCheckedLoad(Node* node) {
+  MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
+  MachineType typ = TypeOf(OpParameter<MachineType>(node));
+  ArmOperandGenerator g(this);
+  Node* const buffer = node->InputAt(0);
+  Node* const offset = node->InputAt(1);
+  Node* const length = node->InputAt(2);
+  ArchOpcode opcode;
+  switch (rep) {
+    case kRepWord8:
+      opcode = typ == kTypeInt32 ? kCheckedLoadInt8 : kCheckedLoadUint8;
+      break;
+    case kRepWord16:
+      opcode = typ == kTypeInt32 ? kCheckedLoadInt16 : kCheckedLoadUint16;
+      break;
+    case kRepWord32:
+      opcode = kCheckedLoadWord32;
+      break;
+    case kRepFloat32:
+      opcode = kCheckedLoadFloat32;
+      break;
+    case kRepFloat64:
+      opcode = kCheckedLoadFloat64;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+  InstructionOperand* offset_operand = g.UseRegister(offset);
+  InstructionOperand* length_operand = g.CanBeImmediate(length, kArmCmp)
+                                           ? g.UseImmediate(length)
+                                           : g.UseRegister(length);
+  Emit(opcode | AddressingModeField::encode(kMode_Offset_RR),
+       g.DefineAsRegister(node), offset_operand, length_operand,
+       g.UseRegister(buffer), offset_operand);
+}
+
+
+void InstructionSelector::VisitCheckedStore(Node* node) {
+  MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
+  ArmOperandGenerator g(this);
+  Node* const buffer = node->InputAt(0);
+  Node* const offset = node->InputAt(1);
+  Node* const length = node->InputAt(2);
+  Node* const value = node->InputAt(3);
+  ArchOpcode opcode;
+  switch (rep) {
+    case kRepWord8:
+      opcode = kCheckedStoreWord8;
+      break;
+    case kRepWord16:
+      opcode = kCheckedStoreWord16;
+      break;
+    case kRepWord32:
+      opcode = kCheckedStoreWord32;
+      break;
+    case kRepFloat32:
+      opcode = kCheckedStoreFloat32;
+      break;
+    case kRepFloat64:
+      opcode = kCheckedStoreFloat64;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+  InstructionOperand* offset_operand = g.UseRegister(offset);
+  InstructionOperand* length_operand = g.CanBeImmediate(length, kArmCmp)
+                                           ? g.UseImmediate(length)
+                                           : g.UseRegister(length);
+  Emit(opcode | AddressingModeField::encode(kMode_Offset_RR), nullptr,
+       offset_operand, length_operand, g.UseRegister(value),
+       g.UseRegister(buffer), offset_operand);
+}
+
+
+namespace {
+
+void EmitBic(InstructionSelector* selector, Node* node, Node* left,
+             Node* right) {
   ArmOperandGenerator g(selector);
   InstructionCode opcode = kArmBic;
   InstructionOperand* value_operand;
@@ -413,6 +445,18 @@ static inline void EmitBic(InstructionSelector* selector, Node* node,
 }
 
 
+void EmitUbfx(InstructionSelector* selector, Node* node, Node* left,
+              uint32_t lsb, uint32_t width) {
+  DCHECK_LE(1, width);
+  DCHECK_LE(width, 32 - lsb);
+  ArmOperandGenerator g(selector);
+  selector->Emit(kArmUbfx, g.DefineAsRegister(node), g.UseRegister(left),
+                 g.TempImmediate(lsb), g.TempImmediate(width));
+}
+
+}  // namespace
+
+
 void InstructionSelector::VisitWord32And(Node* node) {
   ArmOperandGenerator g(this);
   Int32BinopMatcher m(node);
@@ -430,27 +474,27 @@ void InstructionSelector::VisitWord32And(Node* node) {
       return;
     }
   }
-  if (IsSupported(ARMv7) && m.right().HasValue()) {
-    // Try to interpret this AND as UBFX.
+  if (m.right().HasValue()) {
     uint32_t const value = m.right().Value();
     uint32_t width = base::bits::CountPopulation32(value);
     uint32_t msb = base::bits::CountLeadingZeros32(value);
-    if (width != 0 && msb + width == 32) {
+    // Try to interpret this AND as UBFX.
+    if (IsSupported(ARMv7) && width != 0 && msb + width == 32) {
       DCHECK_EQ(0, base::bits::CountTrailingZeros32(value));
       if (m.left().IsWord32Shr()) {
         Int32BinopMatcher mleft(m.left().node());
         if (mleft.right().IsInRange(0, 31)) {
-          Emit(kArmUbfx, g.DefineAsRegister(node),
-               g.UseRegister(mleft.left().node()),
-               g.UseImmediate(mleft.right().node()), g.TempImmediate(width));
-          return;
+          // UBFX cannot extract bits past the register size, however since
+          // shifting the original value would have introduced some zeros we can
+          // still use UBFX with a smaller mask and the remaining bits will be
+          // zeros.
+          uint32_t const lsb = mleft.right().Value();
+          return EmitUbfx(this, node, mleft.left().node(), lsb,
+                          std::min(width, 32 - lsb));
         }
       }
-      Emit(kArmUbfx, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
-           g.TempImmediate(0), g.TempImmediate(width));
-      return;
+      return EmitUbfx(this, node, m.left().node(), 0, width);
     }
-
     // Try to interpret this AND as BIC.
     if (g.CanBeImmediate(~value)) {
       Emit(kArmBic | AddressingModeField::encode(kMode_Operand2_I),
@@ -458,16 +502,23 @@ void InstructionSelector::VisitWord32And(Node* node) {
            g.TempImmediate(~value));
       return;
     }
-
-    // Try to interpret this AND as BFC.
-    width = 32 - width;
-    msb = base::bits::CountLeadingZeros32(~value);
-    uint32_t lsb = base::bits::CountTrailingZeros32(~value);
-    if (msb + width + lsb == 32) {
-      Emit(kArmBfc, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
-           g.TempImmediate(lsb), g.TempImmediate(width));
+    // Try to interpret this AND as UXTH.
+    if (value == 0xffff) {
+      Emit(kArmUxth, g.DefineAsRegister(m.node()),
+           g.UseRegister(m.left().node()), g.TempImmediate(0));
       return;
     }
+    // Try to interpret this AND as BFC.
+    if (IsSupported(ARMv7)) {
+      width = 32 - width;
+      msb = base::bits::CountLeadingZeros32(~value);
+      uint32_t lsb = base::bits::CountTrailingZeros32(~value);
+      if (msb + width + lsb == 32) {
+        Emit(kArmBfc, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
+             g.TempImmediate(lsb), g.TempImmediate(width));
+        return;
+      }
+    }
   }
   VisitBinop(this, node, kArmAnd, kArmAnd);
 }
@@ -559,10 +610,7 @@ void InstructionSelector::VisitWord32Shr(Node* node) {
       uint32_t msb = base::bits::CountLeadingZeros32(value);
       if (msb + width + lsb == 32) {
         DCHECK_EQ(lsb, base::bits::CountTrailingZeros32(value));
-        Emit(kArmUbfx, g.DefineAsRegister(node),
-             g.UseRegister(mleft.left().node()), g.TempImmediate(lsb),
-             g.TempImmediate(width));
-        return;
+        return EmitUbfx(this, node, mleft.left().node(), lsb, width);
       }
     }
   }
@@ -571,6 +619,20 @@ void InstructionSelector::VisitWord32Shr(Node* node) {
 
 
 void InstructionSelector::VisitWord32Sar(Node* node) {
+  ArmOperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  if (CanCover(m.node(), m.left().node()) && m.left().IsWord32Shl()) {
+    Int32BinopMatcher mleft(m.left().node());
+    if (mleft.right().Is(16) && m.right().Is(16)) {
+      Emit(kArmSxth, g.DefineAsRegister(node),
+           g.UseRegister(mleft.left().node()), g.TempImmediate(0));
+      return;
+    } else if (mleft.right().Is(24) && m.right().Is(24)) {
+      Emit(kArmSxtb, g.DefineAsRegister(node),
+           g.UseRegister(mleft.left().node()), g.TempImmediate(0));
+      return;
+    }
+  }
   VisitShift(this, node, TryMatchASR);
 }
 
@@ -583,31 +645,113 @@ void InstructionSelector::VisitWord32Ror(Node* node) {
 void InstructionSelector::VisitInt32Add(Node* node) {
   ArmOperandGenerator g(this);
   Int32BinopMatcher m(node);
-  if (m.left().IsInt32Mul() && CanCover(node, m.left().node())) {
-    Int32BinopMatcher mleft(m.left().node());
-    Emit(kArmMla, g.DefineAsRegister(node), g.UseRegister(mleft.left().node()),
-         g.UseRegister(mleft.right().node()), g.UseRegister(m.right().node()));
-    return;
-  }
-  if (m.right().IsInt32Mul() && CanCover(node, m.right().node())) {
-    Int32BinopMatcher mright(m.right().node());
-    Emit(kArmMla, g.DefineAsRegister(node), g.UseRegister(mright.left().node()),
-         g.UseRegister(mright.right().node()), g.UseRegister(m.left().node()));
-    return;
-  }
-  if (m.left().IsInt32MulHigh() && CanCover(node, m.left().node())) {
-    Int32BinopMatcher mleft(m.left().node());
-    Emit(kArmSmmla, g.DefineAsRegister(node),
-         g.UseRegister(mleft.left().node()),
-         g.UseRegister(mleft.right().node()), g.UseRegister(m.right().node()));
-    return;
+  if (CanCover(node, m.left().node())) {
+    switch (m.left().opcode()) {
+      case IrOpcode::kInt32Mul: {
+        Int32BinopMatcher mleft(m.left().node());
+        Emit(kArmMla, g.DefineAsRegister(node),
+             g.UseRegister(mleft.left().node()),
+             g.UseRegister(mleft.right().node()),
+             g.UseRegister(m.right().node()));
+        return;
+      }
+      case IrOpcode::kInt32MulHigh: {
+        Int32BinopMatcher mleft(m.left().node());
+        Emit(kArmSmmla, g.DefineAsRegister(node),
+             g.UseRegister(mleft.left().node()),
+             g.UseRegister(mleft.right().node()),
+             g.UseRegister(m.right().node()));
+        return;
+      }
+      case IrOpcode::kWord32And: {
+        Int32BinopMatcher mleft(m.left().node());
+        if (mleft.right().Is(0xff)) {
+          Emit(kArmUxtab, g.DefineAsRegister(node),
+               g.UseRegister(m.right().node()),
+               g.UseRegister(mleft.left().node()), g.TempImmediate(0));
+          return;
+        } else if (mleft.right().Is(0xffff)) {
+          Emit(kArmUxtah, g.DefineAsRegister(node),
+               g.UseRegister(m.right().node()),
+               g.UseRegister(mleft.left().node()), g.TempImmediate(0));
+          return;
+        }
+      }
+      case IrOpcode::kWord32Sar: {
+        Int32BinopMatcher mleft(m.left().node());
+        if (CanCover(mleft.node(), mleft.left().node()) &&
+            mleft.left().IsWord32Shl()) {
+          Int32BinopMatcher mleftleft(mleft.left().node());
+          if (mleft.right().Is(24) && mleftleft.right().Is(24)) {
+            Emit(kArmSxtab, g.DefineAsRegister(node),
+                 g.UseRegister(m.right().node()),
+                 g.UseRegister(mleftleft.left().node()), g.TempImmediate(0));
+            return;
+          } else if (mleft.right().Is(16) && mleftleft.right().Is(16)) {
+            Emit(kArmSxtah, g.DefineAsRegister(node),
+                 g.UseRegister(m.right().node()),
+                 g.UseRegister(mleftleft.left().node()), g.TempImmediate(0));
+            return;
+          }
+        }
+      }
+      default:
+        break;
+    }
   }
-  if (m.right().IsInt32MulHigh() && CanCover(node, m.right().node())) {
-    Int32BinopMatcher mright(m.right().node());
-    Emit(kArmSmmla, g.DefineAsRegister(node),
-         g.UseRegister(mright.left().node()),
-         g.UseRegister(mright.right().node()), g.UseRegister(m.left().node()));
-    return;
+  if (CanCover(node, m.right().node())) {
+    switch (m.right().opcode()) {
+      case IrOpcode::kInt32Mul: {
+        Int32BinopMatcher mright(m.right().node());
+        Emit(kArmMla, g.DefineAsRegister(node),
+             g.UseRegister(mright.left().node()),
+             g.UseRegister(mright.right().node()),
+             g.UseRegister(m.left().node()));
+        return;
+      }
+      case IrOpcode::kInt32MulHigh: {
+        Int32BinopMatcher mright(m.right().node());
+        Emit(kArmSmmla, g.DefineAsRegister(node),
+             g.UseRegister(mright.left().node()),
+             g.UseRegister(mright.right().node()),
+             g.UseRegister(m.left().node()));
+        return;
+      }
+      case IrOpcode::kWord32And: {
+        Int32BinopMatcher mright(m.right().node());
+        if (mright.right().Is(0xff)) {
+          Emit(kArmUxtab, g.DefineAsRegister(node),
+               g.UseRegister(m.left().node()),
+               g.UseRegister(mright.left().node()), g.TempImmediate(0));
+          return;
+        } else if (mright.right().Is(0xffff)) {
+          Emit(kArmUxtah, g.DefineAsRegister(node),
+               g.UseRegister(m.left().node()),
+               g.UseRegister(mright.left().node()), g.TempImmediate(0));
+          return;
+        }
+      }
+      case IrOpcode::kWord32Sar: {
+        Int32BinopMatcher mright(m.right().node());
+        if (CanCover(mright.node(), mright.left().node()) &&
+            mright.left().IsWord32Shl()) {
+          Int32BinopMatcher mrightleft(mright.left().node());
+          if (mright.right().Is(24) && mrightleft.right().Is(24)) {
+            Emit(kArmSxtab, g.DefineAsRegister(node),
+                 g.UseRegister(m.left().node()),
+                 g.UseRegister(mrightleft.left().node()), g.TempImmediate(0));
+            return;
+          } else if (mright.right().Is(16) && mrightleft.right().Is(16)) {
+            Emit(kArmSxtah, g.DefineAsRegister(node),
+                 g.UseRegister(m.left().node()),
+                 g.UseRegister(mrightleft.left().node()), g.TempImmediate(0));
+            return;
+          }
+        }
+      }
+      default:
+        break;
+    }
   }
   VisitBinop(this, node, kArmAdd, kArmAdd);
 }
@@ -786,16 +930,16 @@ void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
 
 void InstructionSelector::VisitFloat64Add(Node* node) {
   ArmOperandGenerator g(this);
-  Int32BinopMatcher m(node);
+  Float64BinopMatcher m(node);
   if (m.left().IsFloat64Mul() && CanCover(node, m.left().node())) {
-    Int32BinopMatcher mleft(m.left().node());
+    Float64BinopMatcher mleft(m.left().node());
     Emit(kArmVmlaF64, g.DefineSameAsFirst(node),
          g.UseRegister(m.right().node()), g.UseRegister(mleft.left().node()),
          g.UseRegister(mleft.right().node()));
     return;
   }
   if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
-    Int32BinopMatcher mright(m.right().node());
+    Float64BinopMatcher mright(m.right().node());
     Emit(kArmVmlaF64, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
          g.UseRegister(mright.left().node()),
          g.UseRegister(mright.right().node()));
@@ -807,9 +951,14 @@ void InstructionSelector::VisitFloat64Add(Node* node) {
 
 void InstructionSelector::VisitFloat64Sub(Node* node) {
   ArmOperandGenerator g(this);
-  Int32BinopMatcher m(node);
+  Float64BinopMatcher m(node);
+  if (m.left().IsMinusZero()) {
+    Emit(kArmVnegF64, g.DefineAsRegister(node),
+         g.UseRegister(m.right().node()));
+    return;
+  }
   if (m.right().IsFloat64Mul() && CanCover(node, m.right().node())) {
-    Int32BinopMatcher mright(m.right().node());
+    Float64BinopMatcher mright(m.right().node());
     Emit(kArmVmlsF64, g.DefineSameAsFirst(node), g.UseRegister(m.left().node()),
          g.UseRegister(mright.left().node()),
          g.UseRegister(mright.right().node()));
@@ -820,13 +969,7 @@ void InstructionSelector::VisitFloat64Sub(Node* node) {
 
 
 void InstructionSelector::VisitFloat64Mul(Node* node) {
-  ArmOperandGenerator g(this);
-  Float64BinopMatcher m(node);
-  if (m.right().Is(-1.0)) {
-    Emit(kArmVnegF64, g.DefineAsRegister(node), g.UseRegister(m.left().node()));
-  } else {
-    VisitRRRFloat64(this, kArmVmulF64, node);
-  }
+  VisitRRRFloat64(this, kArmVmulF64, node);
 }
 
 
@@ -874,7 +1017,7 @@ void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
 
 void InstructionSelector::VisitCall(Node* node) {
   ArmOperandGenerator g(this);
-  CallDescriptor* descriptor = OpParameter<CallDescriptor*>(node);
+  const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
 
   FrameStateDescriptor* frame_state_descriptor = NULL;
   if (descriptor->NeedsFrameState()) {
@@ -1102,10 +1245,6 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
 void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
                                       BasicBlock* fbranch) {
   FlagsContinuation cont(kNotEqual, tbranch, fbranch);
-  if (IsNextInAssemblyOrder(tbranch)) {  // We can fallthru to the true block.
-    cont.Negate();
-    cont.SwapBlocks();
-  }
   VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
 }
 
@@ -1187,9 +1326,7 @@ MachineOperatorBuilder::Flags
 InstructionSelector::SupportedMachineOperatorFlags() {
   MachineOperatorBuilder::Flags flags =
       MachineOperatorBuilder::kInt32DivIsSafe |
-      MachineOperatorBuilder::kInt32ModIsSafe |
-      MachineOperatorBuilder::kUint32DivIsSafe |
-      MachineOperatorBuilder::kUint32ModIsSafe;
+      MachineOperatorBuilder::kUint32DivIsSafe;
 
   if (CpuFeatures::IsSupported(ARMv8)) {
     flags |= MachineOperatorBuilder::kFloat64Floor |
index 2c644d6..3fca76f 100644 (file)
@@ -51,9 +51,9 @@ CallDescriptor* Linkage::GetRuntimeCallDescriptor(
 
 CallDescriptor* Linkage::GetStubCallDescriptor(
     const CallInterfaceDescriptor& descriptor, int stack_parameter_count,
-    CallDescriptor::Flags flags, Zone* zone) {
+    CallDescriptor::Flags flags, Operator::Properties properties, Zone* zone) {
   return LH::GetStubCallDescriptor(zone, descriptor, stack_parameter_count,
-                                   flags);
+                                   flags, properties);
 }
 
 
index 41d4c54..e025236 100644 (file)
@@ -24,6 +24,18 @@ class Arm64OperandConverter FINAL : public InstructionOperandConverter {
   Arm64OperandConverter(CodeGenerator* gen, Instruction* instr)
       : InstructionOperandConverter(gen, instr) {}
 
+  DoubleRegister InputFloat32Register(int index) {
+    return InputDoubleRegister(index).S();
+  }
+
+  DoubleRegister InputFloat64Register(int index) {
+    return InputDoubleRegister(index);
+  }
+
+  DoubleRegister OutputFloat32Register() { return OutputDoubleRegister().S(); }
+
+  DoubleRegister OutputFloat64Register() { return OutputDoubleRegister(); }
+
   Register InputRegister32(int index) {
     return ToRegister(instr_->InputAt(index)).W();
   }
@@ -106,9 +118,8 @@ class Arm64OperandConverter FINAL : public InstructionOperandConverter {
     return MemOperand(no_reg);
   }
 
-  MemOperand MemoryOperand() {
-    int index = 0;
-    return MemoryOperand(&index);
+  MemOperand MemoryOperand(int first_index = 0) {
+    return MemoryOperand(&first_index);
   }
 
   Operand ToOperand(InstructionOperand* op) {
@@ -142,6 +153,9 @@ class Arm64OperandConverter FINAL : public InstructionOperandConverter {
         return Operand(constant.ToExternalReference());
       case Constant::kHeapObject:
         return Operand(constant.ToHeapObject());
+      case Constant::kRpoNumber:
+        UNREACHABLE();  // TODO(dcarney): RPO immediates on arm64.
+        break;
     }
     UNREACHABLE();
     return Operand(-1);
@@ -160,6 +174,106 @@ class Arm64OperandConverter FINAL : public InstructionOperandConverter {
 };
 
 
+namespace {
+
+class OutOfLineLoadNaN32 FINAL : public OutOfLineCode {
+ public:
+  OutOfLineLoadNaN32(CodeGenerator* gen, DoubleRegister result)
+      : OutOfLineCode(gen), result_(result) {}
+
+  void Generate() FINAL {
+    __ Fmov(result_, std::numeric_limits<float>::quiet_NaN());
+  }
+
+ private:
+  DoubleRegister const result_;
+};
+
+
+class OutOfLineLoadNaN64 FINAL : public OutOfLineCode {
+ public:
+  OutOfLineLoadNaN64(CodeGenerator* gen, DoubleRegister result)
+      : OutOfLineCode(gen), result_(result) {}
+
+  void Generate() FINAL {
+    __ Fmov(result_, std::numeric_limits<double>::quiet_NaN());
+  }
+
+ private:
+  DoubleRegister const result_;
+};
+
+
+class OutOfLineLoadZero FINAL : public OutOfLineCode {
+ public:
+  OutOfLineLoadZero(CodeGenerator* gen, Register result)
+      : OutOfLineCode(gen), result_(result) {}
+
+  void Generate() FINAL { __ Mov(result_, 0); }
+
+ private:
+  Register const result_;
+};
+
+}  // namespace
+
+
+#define ASSEMBLE_CHECKED_LOAD_FLOAT(width)                         \
+  do {                                                             \
+    auto result = i.OutputFloat##width##Register();                \
+    auto buffer = i.InputRegister(0);                              \
+    auto offset = i.InputRegister32(1);                            \
+    auto length = i.InputOperand32(2);                             \
+    __ Cmp(offset, length);                                        \
+    auto ool = new (zone()) OutOfLineLoadNaN##width(this, result); \
+    __ B(hs, ool->entry());                                        \
+    __ Ldr(result, MemOperand(buffer, offset, UXTW));              \
+    __ Bind(ool->exit());                                          \
+  } while (0)
+
+
+#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr)             \
+  do {                                                       \
+    auto result = i.OutputRegister32();                      \
+    auto buffer = i.InputRegister(0);                        \
+    auto offset = i.InputRegister32(1);                      \
+    auto length = i.InputOperand32(2);                       \
+    __ Cmp(offset, length);                                  \
+    auto ool = new (zone()) OutOfLineLoadZero(this, result); \
+    __ B(hs, ool->entry());                                  \
+    __ asm_instr(result, MemOperand(buffer, offset, UXTW));  \
+    __ Bind(ool->exit());                                    \
+  } while (0)
+
+
+#define ASSEMBLE_CHECKED_STORE_FLOAT(width)          \
+  do {                                               \
+    auto buffer = i.InputRegister(0);                \
+    auto offset = i.InputRegister32(1);              \
+    auto length = i.InputOperand32(2);               \
+    auto value = i.InputFloat##width##Register(3);   \
+    __ Cmp(offset, length);                          \
+    Label done;                                      \
+    __ B(hs, &done);                                 \
+    __ Str(value, MemOperand(buffer, offset, UXTW)); \
+    __ Bind(&done);                                  \
+  } while (0)
+
+
+#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr)          \
+  do {                                                     \
+    auto buffer = i.InputRegister(0);                      \
+    auto offset = i.InputRegister32(1);                    \
+    auto length = i.InputOperand32(2);                     \
+    auto value = i.InputRegister32(3);                     \
+    __ Cmp(offset, length);                                \
+    Label done;                                            \
+    __ B(hs, &done);                                       \
+    __ asm_instr(value, MemOperand(buffer, offset, UXTW)); \
+    __ Bind(&done);                                        \
+  } while (0)
+
+
 #define ASSEMBLE_SHIFT(asm_instr, width)                                       \
   do {                                                                         \
     if (instr->InputAt(1)->IsRegister()) {                                     \
@@ -172,15 +286,6 @@ class Arm64OperandConverter FINAL : public InstructionOperandConverter {
   } while (0)
 
 
-#define ASSEMBLE_TEST_AND_BRANCH(asm_instr, width)           \
-  do {                                                       \
-    bool fallthrough = IsNextInAssemblyOrder(i.InputRpo(3)); \
-    __ asm_instr(i.InputRegister##width(0), i.InputInt6(1),  \
-                 GetLabel(i.InputRpo(2)));                   \
-    if (!fallthrough) __ B(GetLabel(i.InputRpo(3)));         \
-  } while (0)
-
-
 // Assembles an instruction after register allocation, producing machine code.
 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
   Arm64OperandConverter i(this, instr);
@@ -216,7 +321,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
       break;
     }
     case kArchJmp:
-      __ B(GetLabel(i.InputRpo(0)));
+      AssembleArchJump(i.InputRpo(0));
       break;
     case kArchNop:
       // don't emit code for nops.
@@ -418,6 +523,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
     case kArm64Mov32:
       __ Mov(i.OutputRegister32(), i.InputRegister32(0));
       break;
+    case kArm64Sxtb32:
+      __ Sxtb(i.OutputRegister32(), i.InputRegister32(0));
+      break;
+    case kArm64Sxth32:
+      __ Sxth(i.OutputRegister32(), i.InputRegister32(0));
+      break;
     case kArm64Sxtw:
       __ Sxtw(i.OutputRegister(), i.InputRegister32(0));
       break;
@@ -429,17 +540,12 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
       __ Ubfx(i.OutputRegister32(), i.InputRegister32(0), i.InputInt8(1),
               i.InputInt8(2));
       break;
-    case kArm64Tbz:
-      ASSEMBLE_TEST_AND_BRANCH(Tbz, 64);
-      break;
-    case kArm64Tbz32:
-      ASSEMBLE_TEST_AND_BRANCH(Tbz, 32);
+    case kArm64TestAndBranch32:
+    case kArm64TestAndBranch:
+      // Pseudo instructions turned into tbz/tbnz in AssembleArchBranch.
       break;
-    case kArm64Tbnz:
-      ASSEMBLE_TEST_AND_BRANCH(Tbnz, 64);
-      break;
-    case kArm64Tbnz32:
-      ASSEMBLE_TEST_AND_BRANCH(Tbnz, 32);
+    case kArm64CompareAndBranch32:
+      // Pseudo instruction turned into cbz/cbnz in AssembleArchBranch.
       break;
     case kArm64Claim: {
       int words = MiscField::decode(instr->opcode());
@@ -597,83 +703,154 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
       }
       break;
     }
-  }
-}
-
-
-// Assemble branches after this instruction.
-void CodeGenerator::AssembleArchBranch(Instruction* instr,
-                                       FlagsCondition condition) {
-  Arm64OperandConverter i(this, instr);
-  Label done;
-
-  // Emit a branch. The true and false targets are always the last two inputs
-  // to the instruction.
-  BasicBlock::RpoNumber tblock =
-      i.InputRpo(static_cast<int>(instr->InputCount()) - 2);
-  BasicBlock::RpoNumber fblock =
-      i.InputRpo(static_cast<int>(instr->InputCount()) - 1);
-  bool fallthru = IsNextInAssemblyOrder(fblock);
-  Label* tlabel = GetLabel(tblock);
-  Label* flabel = fallthru ? &done : GetLabel(fblock);
-  switch (condition) {
-    case kUnorderedEqual:
-      __ B(vs, flabel);
-    // Fall through.
-    case kEqual:
-      __ B(eq, tlabel);
+    case kCheckedLoadInt8:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(Ldrsb);
       break;
-    case kUnorderedNotEqual:
-      __ B(vs, tlabel);
-    // Fall through.
-    case kNotEqual:
-      __ B(ne, tlabel);
+    case kCheckedLoadUint8:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(Ldrb);
       break;
-    case kSignedLessThan:
-      __ B(lt, tlabel);
+    case kCheckedLoadInt16:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(Ldrsh);
       break;
-    case kSignedGreaterThanOrEqual:
-      __ B(ge, tlabel);
+    case kCheckedLoadUint16:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(Ldrh);
       break;
-    case kSignedLessThanOrEqual:
-      __ B(le, tlabel);
+    case kCheckedLoadWord32:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(Ldr);
       break;
-    case kSignedGreaterThan:
-      __ B(gt, tlabel);
+    case kCheckedLoadFloat32:
+      ASSEMBLE_CHECKED_LOAD_FLOAT(32);
       break;
-    case kUnorderedLessThan:
-      __ B(vs, flabel);
-    // Fall through.
-    case kUnsignedLessThan:
-      __ B(lo, tlabel);
+    case kCheckedLoadFloat64:
+      ASSEMBLE_CHECKED_LOAD_FLOAT(64);
       break;
-    case kUnorderedGreaterThanOrEqual:
-      __ B(vs, tlabel);
-    // Fall through.
-    case kUnsignedGreaterThanOrEqual:
-      __ B(hs, tlabel);
+    case kCheckedStoreWord8:
+      ASSEMBLE_CHECKED_STORE_INTEGER(Strb);
       break;
-    case kUnorderedLessThanOrEqual:
-      __ B(vs, flabel);
-    // Fall through.
-    case kUnsignedLessThanOrEqual:
-      __ B(ls, tlabel);
+    case kCheckedStoreWord16:
+      ASSEMBLE_CHECKED_STORE_INTEGER(Strh);
       break;
-    case kUnorderedGreaterThan:
-      __ B(vs, tlabel);
-    // Fall through.
-    case kUnsignedGreaterThan:
-      __ B(hi, tlabel);
+    case kCheckedStoreWord32:
+      ASSEMBLE_CHECKED_STORE_INTEGER(Str);
       break;
-    case kOverflow:
-      __ B(vs, tlabel);
+    case kCheckedStoreFloat32:
+      ASSEMBLE_CHECKED_STORE_FLOAT(32);
       break;
-    case kNotOverflow:
-      __ B(vc, tlabel);
+    case kCheckedStoreFloat64:
+      ASSEMBLE_CHECKED_STORE_FLOAT(64);
       break;
   }
-  if (!fallthru) __ B(flabel);  // no fallthru to flabel.
-  __ Bind(&done);
+}
+
+
+// Assemble branches after this instruction.
+void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
+  Arm64OperandConverter i(this, instr);
+  Label* tlabel = branch->true_label;
+  Label* flabel = branch->false_label;
+  FlagsCondition condition = branch->condition;
+  ArchOpcode opcode = instr->arch_opcode();
+
+  if (opcode == kArm64CompareAndBranch32) {
+    switch (condition) {
+      case kEqual:
+        __ Cbz(i.InputRegister32(0), tlabel);
+        break;
+      case kNotEqual:
+        __ Cbnz(i.InputRegister32(0), tlabel);
+        break;
+      default:
+        UNREACHABLE();
+    }
+  } else if (opcode == kArm64TestAndBranch32) {
+    switch (condition) {
+      case kEqual:
+        __ Tbz(i.InputRegister32(0), i.InputInt5(1), tlabel);
+        break;
+      case kNotEqual:
+        __ Tbnz(i.InputRegister32(0), i.InputInt5(1), tlabel);
+        break;
+      default:
+        UNREACHABLE();
+    }
+  } else if (opcode == kArm64TestAndBranch) {
+    switch (condition) {
+      case kEqual:
+        __ Tbz(i.InputRegister64(0), i.InputInt6(1), tlabel);
+        break;
+      case kNotEqual:
+        __ Tbnz(i.InputRegister64(0), i.InputInt6(1), tlabel);
+        break;
+      default:
+        UNREACHABLE();
+    }
+  } else {
+    switch (condition) {
+      case kUnorderedEqual:
+        // The "eq" condition will not catch the unordered case.
+        // The jump/fall through to false label will be used if the comparison
+        // was unordered.
+      case kEqual:
+        __ B(eq, tlabel);
+        break;
+      case kUnorderedNotEqual:
+        // Unordered or not equal can be tested with "ne" condtion.
+        // See ARMv8 manual C1.2.3 - Condition Code.
+      case kNotEqual:
+        __ B(ne, tlabel);
+        break;
+      case kSignedLessThan:
+        __ B(lt, tlabel);
+        break;
+      case kSignedGreaterThanOrEqual:
+        __ B(ge, tlabel);
+        break;
+      case kSignedLessThanOrEqual:
+        __ B(le, tlabel);
+        break;
+      case kSignedGreaterThan:
+        __ B(gt, tlabel);
+        break;
+      case kUnorderedLessThan:
+        // The "lo" condition will not catch the unordered case.
+        // The jump/fall through to false label will be used if the comparison
+        // was unordered.
+      case kUnsignedLessThan:
+        __ B(lo, tlabel);
+        break;
+      case kUnorderedGreaterThanOrEqual:
+        // Unordered, greater than or equal can be tested with "hs" condtion.
+        // See ARMv8 manual C1.2.3 - Condition Code.
+      case kUnsignedGreaterThanOrEqual:
+        __ B(hs, tlabel);
+        break;
+      case kUnorderedLessThanOrEqual:
+        // The "ls" condition will not catch the unordered case.
+        // The jump/fall through to false label will be used if the comparison
+        // was unordered.
+      case kUnsignedLessThanOrEqual:
+        __ B(ls, tlabel);
+        break;
+      case kUnorderedGreaterThan:
+        // Unordered or greater than can be tested with "hi" condtion.
+        // See ARMv8 manual C1.2.3 - Condition Code.
+      case kUnsignedGreaterThan:
+        __ B(hi, tlabel);
+        break;
+      case kOverflow:
+        __ B(vs, tlabel);
+        break;
+      case kNotOverflow:
+        __ B(vc, tlabel);
+        break;
+    }
+  }
+  if (!branch->fallthru) __ B(flabel);  // no fallthru to flabel.
+}
+
+
+void CodeGenerator::AssembleArchJump(BasicBlock::RpoNumber target) {
+  if (!IsNextInAssemblyOrder(target)) __ B(GetLabel(target));
 }
 
 
@@ -757,7 +934,7 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
       cc = vc;
       break;
   }
-  __ bind(&check);
+  __ Bind(&check);
   __ Cset(reg, cc);
   __ Bind(&done);
 }
@@ -792,23 +969,6 @@ void CodeGenerator::AssemblePrologue() {
     __ Prologue(info->IsCodePreAgingActive());
     frame()->SetRegisterSaveAreaSize(
         StandardFrameConstants::kFixedFrameSizeFromFp);
-
-    // Sloppy mode functions and builtins need to replace the receiver with the
-    // global proxy when called as functions (without an explicit receiver
-    // object).
-    // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
-    if (info->strict_mode() == SLOPPY && !info->is_native()) {
-      Label ok;
-      // +2 for return address and saved frame pointer.
-      int receiver_slot = info->scope()->num_parameters() + 2;
-      __ Ldr(x10, MemOperand(fp, receiver_slot * kXRegSize));
-      __ JumpIfNotRoot(x10, Heap::kUndefinedValueRootIndex, &ok);
-      __ Ldr(x10, GlobalObjectMemOperand());
-      __ Ldr(x10, FieldMemOperand(x10, GlobalObject::kGlobalProxyOffset));
-      __ Str(x10, MemOperand(fp, receiver_slot * kXRegSize));
-      __ Bind(&ok);
-    }
-
   } else {
     __ SetStackPointer(jssp);
     __ StubPrologue();
@@ -966,8 +1126,8 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
     }
   } else if (source->IsStackSlot() || source->IsDoubleStackSlot()) {
     UseScratchRegisterScope scope(masm());
-    CPURegister temp_0 = scope.AcquireX();
-    CPURegister temp_1 = scope.AcquireX();
+    DoubleRegister temp_0 = scope.AcquireD();
+    DoubleRegister temp_1 = scope.AcquireD();
     MemOperand src = g.ToMemOperand(source, masm());
     MemOperand dst = g.ToMemOperand(destination, masm());
     __ Ldr(temp_0, src);
index f7af844..863451f 100644 (file)
@@ -65,13 +65,14 @@ namespace compiler {
   V(Arm64Ror)                      \
   V(Arm64Ror32)                    \
   V(Arm64Mov32)                    \
+  V(Arm64Sxtb32)                   \
+  V(Arm64Sxth32)                   \
   V(Arm64Sxtw)                     \
   V(Arm64Ubfx)                     \
   V(Arm64Ubfx32)                   \
-  V(Arm64Tbz)                      \
-  V(Arm64Tbz32)                    \
-  V(Arm64Tbnz)                     \
-  V(Arm64Tbnz32)                   \
+  V(Arm64TestAndBranch32)          \
+  V(Arm64TestAndBranch)            \
+  V(Arm64CompareAndBranch32)       \
   V(Arm64Claim)                    \
   V(Arm64Poke)                     \
   V(Arm64PokePairZero)             \
index 1040131..72661af 100644 (file)
@@ -362,6 +362,72 @@ void InstructionSelector::VisitStore(Node* node) {
 }
 
 
+void InstructionSelector::VisitCheckedLoad(Node* node) {
+  MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
+  MachineType typ = TypeOf(OpParameter<MachineType>(node));
+  Arm64OperandGenerator g(this);
+  Node* const buffer = node->InputAt(0);
+  Node* const offset = node->InputAt(1);
+  Node* const length = node->InputAt(2);
+  ArchOpcode opcode;
+  switch (rep) {
+    case kRepWord8:
+      opcode = typ == kTypeInt32 ? kCheckedLoadInt8 : kCheckedLoadUint8;
+      break;
+    case kRepWord16:
+      opcode = typ == kTypeInt32 ? kCheckedLoadInt16 : kCheckedLoadUint16;
+      break;
+    case kRepWord32:
+      opcode = kCheckedLoadWord32;
+      break;
+    case kRepFloat32:
+      opcode = kCheckedLoadFloat32;
+      break;
+    case kRepFloat64:
+      opcode = kCheckedLoadFloat64;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+  Emit(opcode, g.DefineAsRegister(node), g.UseRegister(buffer),
+       g.UseRegister(offset), g.UseOperand(length, kArithmeticImm));
+}
+
+
+void InstructionSelector::VisitCheckedStore(Node* node) {
+  MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
+  Arm64OperandGenerator g(this);
+  Node* const buffer = node->InputAt(0);
+  Node* const offset = node->InputAt(1);
+  Node* const length = node->InputAt(2);
+  Node* const value = node->InputAt(3);
+  ArchOpcode opcode;
+  switch (rep) {
+    case kRepWord8:
+      opcode = kCheckedStoreWord8;
+      break;
+    case kRepWord16:
+      opcode = kCheckedStoreWord16;
+      break;
+    case kRepWord32:
+      opcode = kCheckedStoreWord32;
+      break;
+    case kRepFloat32:
+      opcode = kCheckedStoreFloat32;
+      break;
+    case kRepFloat64:
+      opcode = kCheckedStoreFloat64;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+  Emit(opcode, nullptr, g.UseRegister(buffer), g.UseRegister(offset),
+       g.UseOperand(length, kArithmeticImm), g.UseRegister(value));
+}
+
+
 template <typename Matcher>
 static void VisitLogical(InstructionSelector* selector, Node* node, Matcher* m,
                          ArchOpcode opcode, bool left_can_cover,
@@ -542,6 +608,17 @@ void InstructionSelector::VisitWord32Shl(Node* node) {
 
 
 void InstructionSelector::VisitWord64Shl(Node* node) {
+  Arm64OperandGenerator g(this);
+  Int64BinopMatcher m(node);
+  if ((m.left().IsChangeInt32ToInt64() || m.left().IsChangeUint32ToUint64()) &&
+      m.right().IsInRange(32, 63)) {
+    // There's no need to sign/zero-extend to 64-bit if we shift out the upper
+    // 32 bits anyway.
+    Emit(kArm64Lsl, g.DefineAsRegister(node),
+         g.UseRegister(m.left().node()->InputAt(0)),
+         g.UseImmediate(m.right().node()));
+    return;
+  }
   VisitRRO(this, kArm64Lsl, node, kShift64Imm);
 }
 
@@ -597,6 +674,21 @@ void InstructionSelector::VisitWord64Shr(Node* node) {
 
 
 void InstructionSelector::VisitWord32Sar(Node* node) {
+  Arm64OperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  // Select Sxth/Sxtb for (x << K) >> K where K is 16 or 24.
+  if (CanCover(node, m.left().node()) && m.left().IsWord32Shl()) {
+    Int32BinopMatcher mleft(m.left().node());
+    if (mleft.right().Is(16) && m.right().Is(16)) {
+      Emit(kArm64Sxth32, g.DefineAsRegister(node),
+           g.UseRegister(mleft.left().node()));
+      return;
+    } else if (mleft.right().Is(24) && m.right().Is(24)) {
+      Emit(kArm64Sxtb32, g.DefineAsRegister(node),
+           g.UseRegister(mleft.left().node()));
+      return;
+    }
+  }
   VisitRRO(this, kArm64Asr32, node, kShift32Imm);
 }
 
@@ -871,7 +963,41 @@ void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
 
 void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
   Arm64OperandGenerator g(this);
-  Emit(kArm64Mov32, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+  Node* value = node->InputAt(0);
+  switch (value->opcode()) {
+    case IrOpcode::kWord32And:
+    case IrOpcode::kWord32Or:
+    case IrOpcode::kWord32Xor:
+    case IrOpcode::kWord32Shl:
+    case IrOpcode::kWord32Shr:
+    case IrOpcode::kWord32Sar:
+    case IrOpcode::kWord32Ror:
+    case IrOpcode::kWord32Equal:
+    case IrOpcode::kInt32Add:
+    case IrOpcode::kInt32AddWithOverflow:
+    case IrOpcode::kInt32Sub:
+    case IrOpcode::kInt32SubWithOverflow:
+    case IrOpcode::kInt32Mul:
+    case IrOpcode::kInt32MulHigh:
+    case IrOpcode::kInt32Div:
+    case IrOpcode::kInt32Mod:
+    case IrOpcode::kInt32LessThan:
+    case IrOpcode::kInt32LessThanOrEqual:
+    case IrOpcode::kUint32Div:
+    case IrOpcode::kUint32LessThan:
+    case IrOpcode::kUint32LessThanOrEqual:
+    case IrOpcode::kUint32Mod:
+    case IrOpcode::kUint32MulHigh: {
+      // 32-bit operations will write their result in a W register (implicitly
+      // clearing the top 32-bit of the corresponding X register) so the
+      // zero-extension is a no-op.
+      Emit(kArchNop, g.DefineSameAsFirst(node), g.Use(value));
+      return;
+    }
+    default:
+      break;
+  }
+  Emit(kArm64Mov32, g.DefineAsRegister(node), g.UseRegister(value));
 }
 
 
@@ -884,6 +1010,18 @@ void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
 
 void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
   Arm64OperandGenerator g(this);
+  Node* value = node->InputAt(0);
+  if (CanCover(node, value)) {
+    Int64BinopMatcher m(value);
+    if ((m.IsWord64Sar() && m.right().HasValue() &&
+         (m.right().Value() == 32)) ||
+        (m.IsWord64Shr() && m.right().IsInRange(32, 63))) {
+      Emit(kArm64Lsr, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+           g.UseImmediate(m.right().node()));
+      return;
+    }
+  }
+
   Emit(kArm64Mov32, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
 }
 
@@ -943,7 +1081,7 @@ void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
 
 void InstructionSelector::VisitCall(Node* node) {
   Arm64OperandGenerator g(this);
-  CallDescriptor* descriptor = OpParameter<CallDescriptor*>(node);
+  const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
 
   FrameStateDescriptor* frame_state_descriptor = NULL;
   if (descriptor->NeedsFrameState()) {
@@ -1097,12 +1235,6 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
 
   FlagsContinuation cont(kNotEqual, tbranch, fbranch);
 
-  // If we can fall through to the true block, invert the branch.
-  if (IsNextInAssemblyOrder(tbranch)) {
-    cont.Negate();
-    cont.SwapBlocks();
-  }
-
   // Try to combine with comparisons against 0 by simply inverting the branch.
   while (CanCover(user, value)) {
     if (value->opcode() == IrOpcode::kWord32Equal) {
@@ -1211,9 +1343,8 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
           // If the mask has only one bit set, we can use tbz/tbnz.
           DCHECK((cont.condition() == kEqual) ||
                  (cont.condition() == kNotEqual));
-          ArchOpcode opcode =
-              (cont.condition() == kEqual) ? kArm64Tbz32 : kArm64Tbnz32;
-          Emit(opcode, NULL, g.UseRegister(m.left().node()),
+          Emit(cont.Encode(kArm64TestAndBranch32), NULL,
+               g.UseRegister(m.left().node()),
                g.TempImmediate(
                    base::bits::CountTrailingZeros32(m.right().Value())),
                g.Label(cont.true_block()),
@@ -1230,9 +1361,8 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
           // If the mask has only one bit set, we can use tbz/tbnz.
           DCHECK((cont.condition() == kEqual) ||
                  (cont.condition() == kNotEqual));
-          ArchOpcode opcode =
-              (cont.condition() == kEqual) ? kArm64Tbz : kArm64Tbnz;
-          Emit(opcode, NULL, g.UseRegister(m.left().node()),
+          Emit(cont.Encode(kArm64TestAndBranch), NULL,
+               g.UseRegister(m.left().node()),
                g.TempImmediate(
                    base::bits::CountTrailingZeros64(m.right().Value())),
                g.Label(cont.true_block()),
@@ -1247,8 +1377,10 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
     }
   }
 
-  // Branch could not be combined with a compare, emit compare against 0.
-  VisitWord32Test(this, value, &cont);
+  // Branch could not be combined with a compare, compare against 0 and branch.
+  Emit(cont.Encode(kArm64CompareAndBranch32), NULL, g.UseRegister(value),
+       g.Label(cont.true_block()),
+       g.Label(cont.false_block()))->MarkAsControl();
 }
 
 
@@ -1388,7 +1520,10 @@ InstructionSelector::SupportedMachineOperatorFlags() {
   return MachineOperatorBuilder::kFloat64Floor |
          MachineOperatorBuilder::kFloat64Ceil |
          MachineOperatorBuilder::kFloat64RoundTruncate |
-         MachineOperatorBuilder::kFloat64RoundTiesAway;
+         MachineOperatorBuilder::kFloat64RoundTiesAway |
+         MachineOperatorBuilder::kWord32ShiftIsSafe |
+         MachineOperatorBuilder::kInt32DivIsSafe |
+         MachineOperatorBuilder::kUint32DivIsSafe;
 }
 }  // namespace compiler
 }  // namespace internal
index bfa79da..291b552 100644 (file)
@@ -51,9 +51,9 @@ CallDescriptor* Linkage::GetRuntimeCallDescriptor(
 
 CallDescriptor* Linkage::GetStubCallDescriptor(
     const CallInterfaceDescriptor& descriptor, int stack_parameter_count,
-    CallDescriptor::Flags flags, Zone* zone) {
+    CallDescriptor::Flags flags, Operator::Properties properties, Zone* zone) {
   return LH::GetStubCallDescriptor(zone, descriptor, stack_parameter_count,
-                                   flags);
+                                   flags, properties);
 }
 
 
index 805afc5..cde5e71 100644 (file)
@@ -20,14 +20,14 @@ namespace internal {
 namespace compiler {
 
 AstGraphBuilder::AstGraphBuilder(Zone* local_zone, CompilationInfo* info,
-                                 JSGraph* jsgraph)
+                                 JSGraph* jsgraph, LoopAssignmentAnalysis* loop)
     : StructuredGraphBuilder(local_zone, jsgraph->graph(), jsgraph->common()),
       info_(info),
       jsgraph_(jsgraph),
       globals_(0, local_zone),
       breakable_(NULL),
       execution_context_(NULL),
-      loop_assignment_analysis_(NULL) {
+      loop_assignment_analysis_(loop) {
   InitializeAstVisitor(local_zone);
 }
 
@@ -62,23 +62,26 @@ bool AstGraphBuilder::CreateGraph() {
   int parameter_count = info()->num_parameters();
   graph()->SetStart(graph()->NewNode(common()->Start(parameter_count)));
 
-  if (FLAG_loop_assignment_analysis) {
-    // TODO(turbofan): use a temporary zone for the loop assignment analysis.
-    AstLoopAssignmentAnalyzer analyzer(zone(), info());
-    loop_assignment_analysis_ = analyzer.Analyze();
-  }
-
   // Initialize the top-level environment.
   Environment env(this, scope, graph()->start());
   set_environment(&env);
 
+  // Initialize the incoming context.
+  Node* outer_context = GetFunctionContext();
+  set_current_context(outer_context);
+
+  // Build receiver check for sloppy mode if necessary.
+  // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
+  Node* original_receiver = env.Lookup(scope->receiver());
+  Node* patched_receiver = BuildPatchReceiverToGlobalProxy(original_receiver);
+  env.Bind(scope->receiver(), patched_receiver);
+
   // Build node to initialize local function context.
   Node* closure = GetFunctionClosure();
-  Node* outer = GetFunctionContext();
-  Node* inner = BuildLocalFunctionContext(outer, closure);
+  Node* inner_context = BuildLocalFunctionContext(outer_context, closure);
 
   // Push top-level function scope for the function body.
-  ContextScope top_context(this, scope, inner);
+  ContextScope top_context(this, scope, inner_context);
 
   // Build the arguments object if it is used.
   BuildArgumentsObject(scope->arguments());
@@ -139,26 +142,6 @@ static LhsKind DetermineLhsKind(Expression* expr) {
 }
 
 
-// Helper to find an existing shared function info in the baseline code for the
-// given function literal. Used to canonicalize SharedFunctionInfo objects.
-static Handle<SharedFunctionInfo> SearchSharedFunctionInfo(
-    Code* unoptimized_code, FunctionLiteral* expr) {
-  int start_position = expr->start_position();
-  for (RelocIterator it(unoptimized_code); !it.done(); it.next()) {
-    RelocInfo* rinfo = it.rinfo();
-    if (rinfo->rmode() != RelocInfo::EMBEDDED_OBJECT) continue;
-    Object* obj = rinfo->target_object();
-    if (obj->IsSharedFunctionInfo()) {
-      SharedFunctionInfo* shared = SharedFunctionInfo::cast(obj);
-      if (shared->start_position() == start_position) {
-        return Handle<SharedFunctionInfo>(shared);
-      }
-    }
-  }
-  return Handle<SharedFunctionInfo>();
-}
-
-
 StructuredGraphBuilder::Environment* AstGraphBuilder::CopyEnvironment(
     StructuredGraphBuilder::Environment* env) {
   return new (zone()) Environment(*reinterpret_cast<Environment*>(env));
@@ -386,8 +369,8 @@ void AstGraphBuilder::VisitVariableDeclaration(VariableDeclaration* decl) {
       Handle<Oddball> value = variable->binding_needs_init()
                                   ? isolate()->factory()->the_hole_value()
                                   : isolate()->factory()->undefined_value();
-      globals()->Add(variable->name(), zone());
-      globals()->Add(value, zone());
+      globals()->push_back(variable->name());
+      globals()->push_back(value);
       break;
     }
     case Variable::PARAMETER:
@@ -418,8 +401,8 @@ void AstGraphBuilder::VisitFunctionDeclaration(FunctionDeclaration* decl) {
           Compiler::BuildFunctionInfo(decl->fun(), info()->script(), info());
       // Check for stack-overflow exception.
       if (function.is_null()) return SetStackOverflow();
-      globals()->Add(variable->name(), zone());
-      globals()->Add(function, zone());
+      globals()->push_back(variable->name());
+      globals()->push_back(function);
       break;
     }
     case Variable::PARAMETER:
@@ -826,8 +809,8 @@ void AstGraphBuilder::VisitFunctionLiteral(FunctionLiteral* expr) {
 
   // Build a new shared function info if we cannot find one in the baseline
   // code. We also have a stack overflow if the recursive compilation did.
-  Handle<SharedFunctionInfo> shared_info =
-      SearchSharedFunctionInfo(info()->shared_info()->code(), expr);
+  expr->InitializeSharedInfo(handle(info()->shared_info()->code()));
+  Handle<SharedFunctionInfo> shared_info = expr->shared_info();
   if (shared_info.is_null()) {
     shared_info = Compiler::BuildFunctionInfo(expr, info()->script(), info());
     CHECK(!shared_info.is_null());  // TODO(mstarzinger): Set stack overflow?
@@ -1630,12 +1613,13 @@ void AstGraphBuilder::VisitCaseClause(CaseClause* expr) { UNREACHABLE(); }
 
 
 void AstGraphBuilder::VisitDeclarations(ZoneList<Declaration*>* declarations) {
-  DCHECK(globals()->is_empty());
+  DCHECK(globals()->empty());
   AstVisitor::VisitDeclarations(declarations);
-  if (globals()->is_empty()) return;
-  Handle<FixedArray> data =
-      isolate()->factory()->NewFixedArray(globals()->length(), TENURED);
-  for (int i = 0; i < globals()->length(); ++i) data->set(i, *globals()->at(i));
+  if (globals()->empty()) return;
+  int array_index = 0;
+  Handle<FixedArray> data = isolate()->factory()->NewFixedArray(
+      static_cast<int>(globals()->size()), TENURED);
+  for (Handle<Object> obj : *globals()) data->set(array_index++, *obj);
   int encoded_flags = DeclareGlobalsEvalFlag::encode(info()->is_eval()) |
                       DeclareGlobalsNativeFlag::encode(info()->is_native()) |
                       DeclareGlobalsStrictMode::encode(strict_mode());
@@ -1643,7 +1627,7 @@ void AstGraphBuilder::VisitDeclarations(ZoneList<Declaration*>* declarations) {
   Node* pairs = jsgraph()->Constant(data);
   const Operator* op = javascript()->CallRuntime(Runtime::kDeclareGlobals, 3);
   NewNode(op, current_context(), pairs, flags);
-  globals()->Rewind(0);
+  globals()->clear();
 }
 
 
@@ -1773,10 +1757,36 @@ Node* AstGraphBuilder::ProcessArguments(const Operator* op, int arity) {
 }
 
 
+Node* AstGraphBuilder::BuildPatchReceiverToGlobalProxy(Node* receiver) {
+  // Sloppy mode functions and builtins need to replace the receiver with the
+  // global proxy when called as functions (without an explicit receiver
+  // object). Otherwise there is nothing left to do here.
+  if (info()->strict_mode() != SLOPPY || info()->is_native()) return receiver;
+
+  // There is no need to perform patching if the receiver is never used. Note
+  // that scope predicates are purely syntactical, a call to eval might still
+  // inspect the receiver value.
+  if (!info()->scope()->uses_this() && !info()->scope()->inner_uses_this() &&
+      !info()->scope()->calls_sloppy_eval()) {
+    return receiver;
+  }
+
+  IfBuilder receiver_check(this);
+  Node* undefined = jsgraph()->UndefinedConstant();
+  Node* check = NewNode(javascript()->StrictEqual(), receiver, undefined);
+  receiver_check.If(check);
+  receiver_check.Then();
+  environment()->Push(BuildLoadGlobalProxy());
+  receiver_check.Else();
+  environment()->Push(receiver);
+  receiver_check.End();
+  return environment()->Pop();
+}
+
+
 Node* AstGraphBuilder::BuildLocalFunctionContext(Node* context, Node* closure) {
   int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
   if (heap_slots <= 0) return context;
-  set_current_context(context);
 
   // Allocate a new local context.
   const Operator* op = javascript()->CreateFunctionContext();
@@ -1984,7 +1994,12 @@ Node* AstGraphBuilder::BuildVariableAssignment(
           value = BuildHoleCheckSilent(current, value, current);
         }
       } else if (mode == CONST_LEGACY && op != Token::INIT_CONST_LEGACY) {
-        // Non-initializing assignments to legacy const is ignored.
+        // Non-initializing assignments to legacy const is
+        // - exception in strict mode.
+        // - ignored in sloppy mode.
+        if (strict_mode() == STRICT) {
+          return BuildThrowConstAssignError(bailout_id);
+        }
         return value;
       } else if (mode == LET && op != Token::INIT_LET) {
         // Perform an initialization check for let declared variables.
@@ -1998,8 +2013,8 @@ Node* AstGraphBuilder::BuildVariableAssignment(
           value = BuildHoleCheckThrow(current, variable, value, bailout_id);
         }
       } else if (mode == CONST && op != Token::INIT_CONST) {
-        // All assignments to const variables are early errors.
-        UNREACHABLE();
+        // Non-initializing assignments to const is exception in all modes.
+        return BuildThrowConstAssignError(bailout_id);
       }
       environment()->Bind(variable, value);
       return value;
@@ -2013,7 +2028,12 @@ Node* AstGraphBuilder::BuildVariableAssignment(
         Node* current = NewNode(op, current_context());
         value = BuildHoleCheckSilent(current, value, current);
       } else if (mode == CONST_LEGACY && op != Token::INIT_CONST_LEGACY) {
-        // Non-initializing assignments to legacy const is ignored.
+        // Non-initializing assignments to legacy const is
+        // - exception in strict mode.
+        // - ignored in sloppy mode.
+        if (strict_mode() == STRICT) {
+          return BuildThrowConstAssignError(bailout_id);
+        }
         return value;
       } else if (mode == LET && op != Token::INIT_LET) {
         // Perform an initialization check for let declared variables.
@@ -2022,8 +2042,8 @@ Node* AstGraphBuilder::BuildVariableAssignment(
         Node* current = NewNode(op, current_context());
         value = BuildHoleCheckThrow(current, variable, value, bailout_id);
       } else if (mode == CONST && op != Token::INIT_CONST) {
-        // All assignments to const variables are early errors.
-        UNREACHABLE();
+        // Non-initializing assignments to const is exception in all modes.
+        return BuildThrowConstAssignError(bailout_id);
       }
       const Operator* op = javascript()->StoreContext(depth, variable->index());
       return NewNode(op, current_context(), value);
@@ -2069,6 +2089,14 @@ Node* AstGraphBuilder::BuildLoadGlobalObject() {
 }
 
 
+Node* AstGraphBuilder::BuildLoadGlobalProxy() {
+  Node* global = BuildLoadGlobalObject();
+  Node* proxy =
+      BuildLoadObjectField(global, JSGlobalObject::kGlobalProxyOffset);
+  return proxy;
+}
+
+
 Node* AstGraphBuilder::BuildToBoolean(Node* input) {
   // TODO(titzer): this should be in a JSOperatorReducer.
   switch (input->opcode()) {
@@ -2109,6 +2137,16 @@ Node* AstGraphBuilder::BuildThrowReferenceError(Variable* variable,
 }
 
 
+Node* AstGraphBuilder::BuildThrowConstAssignError(BailoutId bailout_id) {
+  // TODO(mstarzinger): Should be unified with the VisitThrow implementation.
+  const Operator* op =
+      javascript()->CallRuntime(Runtime::kThrowConstAssignError, 0);
+  Node* call = NewNode(op);
+  PrepareFrameState(call, bailout_id);
+  return call;
+}
+
+
 Node* AstGraphBuilder::BuildBinaryOp(Node* left, Node* right, Token::Value op) {
   const Operator* js_op;
   switch (op) {
index 518f23f..0337c81 100644 (file)
@@ -26,7 +26,8 @@ class LoopBuilder;
 // of function inlining.
 class AstGraphBuilder : public StructuredGraphBuilder, public AstVisitor {
  public:
-  AstGraphBuilder(Zone* local_zone, CompilationInfo* info, JSGraph* jsgraph);
+  AstGraphBuilder(Zone* local_zone, CompilationInfo* info, JSGraph* jsgraph,
+                  LoopAssignmentAnalysis* loop_assignment = NULL);
 
   // Creates a graph by visiting the entire AST.
   bool CreateGraph();
@@ -56,11 +57,7 @@ class AstGraphBuilder : public StructuredGraphBuilder, public AstVisitor {
   // Support for control flow builders. The concrete type of the environment
   // depends on the graph builder, but environments themselves are not virtual.
   typedef StructuredGraphBuilder::Environment BaseEnvironment;
-  virtual BaseEnvironment* CopyEnvironment(BaseEnvironment* env) OVERRIDE;
-
-  // TODO(mstarzinger): The pipeline only needs to be a friend to access the
-  // function context. Remove as soon as the context is a parameter.
-  friend class Pipeline;
+  BaseEnvironment* CopyEnvironment(BaseEnvironment* env) OVERRIDE;
 
   // Getters for values in the activation record.
   Node* GetFunctionClosure();
@@ -72,6 +69,9 @@ class AstGraphBuilder : public StructuredGraphBuilder, public AstVisitor {
   // other dependencies tracked by the environment might be mutated though.
   //
 
+  // Builder to create a receiver check for sloppy mode.
+  Node* BuildPatchReceiverToGlobalProxy(Node* receiver);
+
   // Builder to create a local function context.
   Node* BuildLocalFunctionContext(Node* context, Node* closure);
 
@@ -92,6 +92,7 @@ class AstGraphBuilder : public StructuredGraphBuilder, public AstVisitor {
   // Builders for accessing the function context.
   Node* BuildLoadBuiltinsObject();
   Node* BuildLoadGlobalObject();
+  Node* BuildLoadGlobalProxy();
   Node* BuildLoadClosure();
   Node* BuildLoadObjectField(Node* object, int offset);
 
@@ -100,6 +101,7 @@ class AstGraphBuilder : public StructuredGraphBuilder, public AstVisitor {
 
   // Builders for error reporting at runtime.
   Node* BuildThrowReferenceError(Variable* var, BailoutId bailout_id);
+  Node* BuildThrowConstAssignError(BailoutId bailout_id);
 
   // Builders for dynamic hole-checks at runtime.
   Node* BuildHoleCheckSilent(Node* value, Node* for_hole, Node* not_hole);
@@ -112,13 +114,13 @@ class AstGraphBuilder : public StructuredGraphBuilder, public AstVisitor {
   // Builder for stack-check guards.
   Node* BuildStackCheck();
 
-#define DECLARE_VISIT(type) virtual void Visit##type(type* node) OVERRIDE;
+#define DECLARE_VISIT(type) void Visit##type(type* node) OVERRIDE;
   // Visiting functions for AST nodes make this an AstVisitor.
   AST_NODE_LIST(DECLARE_VISIT)
 #undef DECLARE_VISIT
 
   // Visiting function for declarations list is overridden.
-  virtual void VisitDeclarations(ZoneList<Declaration*>* declarations) OVERRIDE;
+  void VisitDeclarations(ZoneList<Declaration*>* declarations) OVERRIDE;
 
  private:
   CompilationInfo* info_;
@@ -126,7 +128,7 @@ class AstGraphBuilder : public StructuredGraphBuilder, public AstVisitor {
   JSGraph* jsgraph_;
 
   // List of global declarations for functions and variables.
-  ZoneList<Handle<Object> > globals_;
+  ZoneVector<Handle<Object>> globals_;
 
   // Stack of breakable statements entered by the visitor.
   BreakableScope* breakable_;
@@ -145,7 +147,7 @@ class AstGraphBuilder : public StructuredGraphBuilder, public AstVisitor {
   inline StrictMode strict_mode() const;
   JSGraph* jsgraph() { return jsgraph_; }
   JSOperatorBuilder* javascript() { return jsgraph_->javascript(); }
-  ZoneList<Handle<Object> >* globals() { return &globals_; }
+  ZoneVector<Handle<Object>>* globals() { return &globals_; }
 
   // Current scope during visitation.
   inline Scope* current_scope() const;
@@ -345,9 +347,9 @@ class AstGraphBuilder::AstEffectContext FINAL : public AstContext {
  public:
   explicit AstEffectContext(AstGraphBuilder* owner)
       : AstContext(owner, Expression::kEffect) {}
-  virtual ~AstEffectContext();
-  virtual void ProduceValue(Node* value) OVERRIDE;
-  virtual Node* ConsumeValue() OVERRIDE;
+  ~AstEffectContext() FINAL;
+  void ProduceValue(Node* value) FINAL;
+  Node* ConsumeValue() FINAL;
 };
 
 
@@ -356,9 +358,9 @@ class AstGraphBuilder::AstValueContext FINAL : public AstContext {
  public:
   explicit AstValueContext(AstGraphBuilder* owner)
       : AstContext(owner, Expression::kValue) {}
-  virtual ~AstValueContext();
-  virtual void ProduceValue(Node* value) OVERRIDE;
-  virtual Node* ConsumeValue() OVERRIDE;
+  ~AstValueContext() FINAL;
+  void ProduceValue(Node* value) FINAL;
+  Node* ConsumeValue() FINAL;
 };
 
 
@@ -367,9 +369,9 @@ class AstGraphBuilder::AstTestContext FINAL : public AstContext {
  public:
   explicit AstTestContext(AstGraphBuilder* owner)
       : AstContext(owner, Expression::kTest) {}
-  virtual ~AstTestContext();
-  virtual void ProduceValue(Node* value) OVERRIDE;
-  virtual Node* ConsumeValue() OVERRIDE;
+  ~AstTestContext() FINAL;
+  void ProduceValue(Node* value) FINAL;
+  Node* ConsumeValue() FINAL;
 };
 
 
index 10b9d83..00a7f2d 100644 (file)
@@ -46,7 +46,7 @@ class AstLoopAssignmentAnalyzer : public AstVisitor {
 
   LoopAssignmentAnalysis* Analyze();
 
-#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
+#define DECLARE_VISIT(type) void Visit##type(type* node) OVERRIDE;
   AST_NODE_LIST(DECLARE_VISIT)
 #undef DECLARE_VISIT
 
index 708ec49..d7d3ade 100644 (file)
@@ -6,10 +6,11 @@
 
 #include <sstream>
 
+#include "src/compiler.h"
 #include "src/compiler/common-operator.h"
 #include "src/compiler/graph.h"
 #include "src/compiler/machine-operator.h"
-#include "src/compiler/operator-properties-inl.h"
+#include "src/compiler/operator-properties.h"
 #include "src/compiler/schedule.h"
 
 namespace v8 {
@@ -69,7 +70,7 @@ BasicBlockProfiler::Data* BasicBlockInstrumentor::Instrument(
   CommonOperatorBuilder common(graph->zone());
   Node* zero = graph->NewNode(common.Int32Constant(0));
   Node* one = graph->NewNode(common.Int32Constant(1));
-  MachineOperatorBuilder machine;
+  MachineOperatorBuilder machine(graph->zone());
   BasicBlockVector* blocks = schedule->rpo_order();
   size_t block_number = 0;
   for (BasicBlockVector::iterator it = blocks->begin(); block_number < n_blocks;
index 16fbfa3..7ddc751 100644 (file)
@@ -75,7 +75,7 @@ Node* ChangeLowering::AllocateHeapNumberWithValue(Node* value, Node* control) {
   CallDescriptor* descriptor = linkage()->GetStubCallDescriptor(
       callable.descriptor(), 0, CallDescriptor::kNoFlags);
   Node* target = jsgraph()->HeapConstant(callable.code());
-  Node* context = jsgraph()->ZeroConstant();
+  Node* context = jsgraph()->NoContextConstant();
   Node* effect = graph()->NewNode(common()->ValueEffect(1), value);
   Node* heap_number = graph()->NewNode(common()->Call(descriptor), target,
                                        context, effect, control);
@@ -163,6 +163,9 @@ Reduction ChangeLowering::ChangeInt32ToTagged(Node* value, Node* control) {
         machine()->Word64Shl(),
         graph()->NewNode(machine()->ChangeInt32ToInt64(), value),
         SmiShiftBitsConstant()));
+  } else if (NodeProperties::GetBounds(value).upper->Is(Type::SignedSmall())) {
+    return Replace(
+        graph()->NewNode(machine()->WordShl(), value, SmiShiftBitsConstant()));
   }
 
   Node* add = graph()->NewNode(machine()->Int32AddWithOverflow(), value, value);
@@ -196,9 +199,9 @@ namespace {
 bool CanCover(Node* value, IrOpcode::Value opcode) {
   if (value->opcode() != opcode) return false;
   bool first = true;
-  for (auto i = value->uses().begin(); i != value->uses().end(); ++i) {
-    if (NodeProperties::IsEffectEdge(i.edge())) continue;
-    DCHECK(NodeProperties::IsValueEdge(i.edge()));
+  for (Edge const edge : value->use_edges()) {
+    if (NodeProperties::IsEffectEdge(edge)) continue;
+    DCHECK(NodeProperties::IsValueEdge(edge));
     if (!first) return false;
     first = false;
   }
@@ -233,11 +236,9 @@ Reduction ChangeLowering::ChangeTaggedToFloat64(Node* value, Node* control) {
     Node* phi1 = d1.Phi(kMachFloat64, phi2, ChangeSmiToFloat64(object));
     Node* ephi1 = d1.EffectPhi(number, effect);
 
-    for (auto i = value->uses().begin(); i != value->uses().end();) {
-      if (NodeProperties::IsEffectEdge(i.edge())) {
-        i.UpdateToAndIncrement(ephi1);
-      } else {
-        ++i;
+    for (Edge edge : value->use_edges()) {
+      if (NodeProperties::IsEffectEdge(edge)) {
+        edge.UpdateTo(ephi1);
       }
     }
     return Replace(phi1);
index 6911af5..773fd08 100644 (file)
@@ -21,9 +21,9 @@ class ChangeLowering FINAL : public Reducer {
  public:
   ChangeLowering(JSGraph* jsgraph, Linkage* linkage)
       : jsgraph_(jsgraph), linkage_(linkage) {}
-  virtual ~ChangeLowering();
+  ~ChangeLowering() FINAL;
 
-  virtual Reduction Reduce(Node* node) OVERRIDE;
+  Reduction Reduce(Node* node) FINAL;
 
  private:
   Node* HeapNumberValueIndexConstant();
index 0dc919a..7942344 100644 (file)
@@ -25,6 +25,8 @@ class InstructionOperandConverter {
   InstructionOperandConverter(CodeGenerator* gen, Instruction* instr)
       : gen_(gen), instr_(instr) {}
 
+  // -- Instruction operand accesses with conversions --------------------------
+
   Register InputRegister(int index) {
     return ToRegister(instr_->InputAt(index));
   }
@@ -57,22 +59,31 @@ class InstructionOperandConverter {
     return ToHeapObject(instr_->InputAt(index));
   }
 
-  Label* InputLabel(int index) { return gen_->GetLabel(InputRpo(index)); }
+  Label* InputLabel(int index) { return ToLabel(instr_->InputAt(index)); }
 
   BasicBlock::RpoNumber InputRpo(int index) {
-    int rpo_number = InputInt32(index);
-    return BasicBlock::RpoNumber::FromInt(rpo_number);
+    return ToRpoNumber(instr_->InputAt(index));
   }
 
   Register OutputRegister(int index = 0) {
     return ToRegister(instr_->OutputAt(index));
   }
 
+  Register TempRegister(int index) { return ToRegister(instr_->TempAt(index)); }
+
   DoubleRegister OutputDoubleRegister() {
     return ToDoubleRegister(instr_->Output());
   }
 
-  Register TempRegister(int index) { return ToRegister(instr_->TempAt(index)); }
+  // -- Conversions for operands -----------------------------------------------
+
+  Label* ToLabel(InstructionOperand* op) {
+    return gen_->GetLabel(ToRpoNumber(op));
+  }
+
+  BasicBlock::RpoNumber ToRpoNumber(InstructionOperand* op) {
+    return ToConstant(op).ToRpoNumber();
+  }
 
   Register ToRegister(InstructionOperand* op) {
     DCHECK(op->IsRegister());
@@ -84,19 +95,17 @@ class InstructionOperandConverter {
     return DoubleRegister::FromAllocationIndex(op->index());
   }
 
-  Constant ToConstant(InstructionOperand* operand) {
-    if (operand->IsImmediate()) {
-      return gen_->code()->GetImmediate(operand->index());
+  Constant ToConstant(InstructionOperand* op) {
+    if (op->IsImmediate()) {
+      return gen_->code()->GetImmediate(op->index());
     }
-    return gen_->code()->GetConstant(operand->index());
+    return gen_->code()->GetConstant(op->index());
   }
 
-  double ToDouble(InstructionOperand* operand) {
-    return ToConstant(operand).ToFloat64();
-  }
+  double ToDouble(InstructionOperand* op) { return ToConstant(op).ToFloat64(); }
 
-  Handle<HeapObject> ToHeapObject(InstructionOperand* operand) {
-    return ToConstant(operand).ToHeapObject();
+  Handle<HeapObject> ToHeapObject(InstructionOperand* op) {
+    return ToConstant(op).ToHeapObject();
   }
 
   Frame* frame() const { return gen_->frame(); }
@@ -109,6 +118,27 @@ class InstructionOperandConverter {
 };
 
 
+// Generator for out-of-line code that is emitted after the main code is done.
+class OutOfLineCode : public ZoneObject {
+ public:
+  explicit OutOfLineCode(CodeGenerator* gen);
+  virtual ~OutOfLineCode();
+
+  virtual void Generate() = 0;
+
+  Label* entry() { return &entry_; }
+  Label* exit() { return &exit_; }
+  MacroAssembler* masm() const { return masm_; }
+  OutOfLineCode* next() const { return next_; }
+
+ private:
+  Label entry_;
+  Label exit_;
+  MacroAssembler* const masm_;
+  OutOfLineCode* const next_;
+};
+
+
 // TODO(dcarney): generify this on bleeding_edge and replace this call
 // when merged.
 static inline void FinishCode(MacroAssembler* masm) {
index 6b04eef..cfe4f06 100644 (file)
@@ -27,7 +27,8 @@ CodeGenerator::CodeGenerator(Frame* frame, Linkage* linkage,
       deoptimization_states_(code->zone()),
       deoptimization_literals_(code->zone()),
       translations_(code->zone()),
-      last_lazy_deopt_pc_(0) {
+      last_lazy_deopt_pc_(0),
+      ools_(nullptr) {
   for (int i = 0; i < code->InstructionBlockCount(); ++i) {
     new (&labels_[i]) Label;
   }
@@ -56,6 +57,8 @@ Handle<Code> CodeGenerator::GenerateCode() {
       if (block->IsDeferred() == (deferred == 0)) {
         continue;
       }
+      // Align loop headers on 16-byte boundaries.
+      if (block->IsLoopHeader()) masm()->Align(16);
       // Bind a label for a block.
       current_block_ = block->rpo_number();
       if (FLAG_code_comments) {
@@ -71,9 +74,19 @@ Handle<Code> CodeGenerator::GenerateCode() {
     }
   }
 
+  // Assemble all out-of-line code.
+  if (ools_) {
+    masm()->RecordComment("-- Out of line code --");
+    for (OutOfLineCode* ool = ools_; ool; ool = ool->next()) {
+      masm()->bind(ool->entry());
+      ool->Generate();
+      masm()->jmp(ool->exit());
+    }
+  }
+
   FinishCode(masm());
 
-  // Ensure there is space for lazy deopt.
+  // Ensure there is space for lazy deoptimization in the code.
   if (!info->IsStub()) {
     int target_offset = masm()->pc_offset() + Deoptimizer::patch_size();
     while (masm()->pc_offset() < target_offset) {
@@ -96,6 +109,11 @@ Handle<Code> CodeGenerator::GenerateCode() {
 
   PopulateDeoptimizationData(result);
 
+  // Ensure there is space for lazy deoptimization in the relocation info.
+  if (!info->IsStub()) {
+    Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(result);
+  }
+
   // Emit a code line info recording stop event.
   void* line_info = recorder->DetachJITHandlerData();
   LOG_CODE_EVENT(isolate(), CodeEndLinePosInfoRecordEvent(*result, line_info));
@@ -139,18 +157,39 @@ void CodeGenerator::AssembleInstruction(Instruction* instr) {
     // Assemble architecture-specific code for the instruction.
     AssembleArchInstruction(instr);
 
-    // Assemble branches or boolean materializations after this instruction.
     FlagsMode mode = FlagsModeField::decode(instr->opcode());
     FlagsCondition condition = FlagsConditionField::decode(instr->opcode());
-    switch (mode) {
-      case kFlags_none:
+    if (mode == kFlags_branch) {
+      // Assemble a branch after this instruction.
+      InstructionOperandConverter i(this, instr);
+      BasicBlock::RpoNumber true_rpo =
+          i.InputRpo(static_cast<int>(instr->InputCount()) - 2);
+      BasicBlock::RpoNumber false_rpo =
+          i.InputRpo(static_cast<int>(instr->InputCount()) - 1);
+
+      if (true_rpo == false_rpo) {
+        // redundant branch.
+        if (!IsNextInAssemblyOrder(true_rpo)) {
+          AssembleArchJump(true_rpo);
+        }
         return;
-      case kFlags_set:
-        return AssembleArchBoolean(instr, condition);
-      case kFlags_branch:
-        return AssembleArchBranch(instr, condition);
+      }
+      if (IsNextInAssemblyOrder(true_rpo)) {
+        // true block is next, can fall through if condition negated.
+        std::swap(true_rpo, false_rpo);
+        condition = NegateFlagsCondition(condition);
+      }
+      BranchInfo branch;
+      branch.condition = condition;
+      branch.true_label = GetLabel(true_rpo);
+      branch.false_label = GetLabel(false_rpo);
+      branch.fallthru = IsNextInAssemblyOrder(false_rpo);
+      // Assemble architecture-specific branch.
+      AssembleArchBranch(instr, &branch);
+    } else if (mode == kFlags_set) {
+      // Assemble a boolean materialization after this instruction.
+      AssembleArchBoolean(instr, condition);
     }
-    UNREACHABLE();
   }
 }
 
@@ -422,7 +461,8 @@ void CodeGenerator::AddTranslationForOperand(Translation* translation,
     if (type == kMachBool || type == kMachInt32 || type == kMachInt8 ||
         type == kMachInt16) {
       translation->StoreInt32StackSlot(op->index());
-    } else if (type == kMachUint32) {
+    } else if (type == kMachUint32 || type == kMachUint16 ||
+               type == kMachUint8) {
       translation->StoreUint32StackSlot(op->index());
     } else if ((type & kRepMask) == kRepTagged) {
       translation->StoreStackSlot(op->index());
@@ -437,7 +477,8 @@ void CodeGenerator::AddTranslationForOperand(Translation* translation,
     if (type == kMachBool || type == kMachInt32 || type == kMachInt8 ||
         type == kMachInt16) {
       translation->StoreInt32Register(converter.ToRegister(op));
-    } else if (type == kMachUint32) {
+    } else if (type == kMachUint32 || type == kMachUint16 ||
+               type == kMachUint8) {
       translation->StoreUint32Register(converter.ToRegister(op));
     } else if ((type & kRepMask) == kRepTagged) {
       translation->StoreRegister(converter.ToRegister(op));
@@ -489,7 +530,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
 
 
 void CodeGenerator::AssembleArchBranch(Instruction* instr,
-                                       FlagsCondition condition) {
+                                       BranchInfo* branch) {
   UNIMPLEMENTED();
 }
 
@@ -500,6 +541,11 @@ void CodeGenerator::AssembleArchBoolean(Instruction* instr,
 }
 
 
+void CodeGenerator::AssembleArchJump(BasicBlock::RpoNumber target) {
+  UNIMPLEMENTED();
+}
+
+
 void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
   UNIMPLEMENTED();
 }
@@ -527,6 +573,15 @@ void CodeGenerator::AddNopForSmiCodeInlining() { UNIMPLEMENTED(); }
 
 #endif  // !V8_TURBOFAN_BACKEND
 
+
+OutOfLineCode::OutOfLineCode(CodeGenerator* gen)
+    : masm_(gen->masm()), next_(gen->ools_) {
+  gen->ools_ = this;
+}
+
+
+OutOfLineCode::~OutOfLineCode() {}
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
index b03e2ad..747bad2 100644 (file)
@@ -5,8 +5,6 @@
 #ifndef V8_COMPILER_CODE_GENERATOR_H_
 #define V8_COMPILER_CODE_GENERATOR_H_
 
-#include <deque>
-
 #include "src/compiler/gap-resolver.h"
 #include "src/compiler/instruction.h"
 #include "src/deoptimizer.h"
@@ -17,7 +15,17 @@ namespace v8 {
 namespace internal {
 namespace compiler {
 
+// Forward declarations.
 class Linkage;
+class OutOfLineCode;
+
+struct BranchInfo {
+  FlagsCondition condition;
+  Label* true_label;
+  Label* false_label;
+  bool fallthru;
+};
+
 
 // Generates native code for a sequence of instructions.
 class CodeGenerator FINAL : public GapResolver::Assembler {
@@ -60,7 +68,8 @@ class CodeGenerator FINAL : public GapResolver::Assembler {
   // ===========================================================================
 
   void AssembleArchInstruction(Instruction* instr);
-  void AssembleArchBranch(Instruction* instr, FlagsCondition condition);
+  void AssembleArchJump(BasicBlock::RpoNumber target);
+  void AssembleArchBranch(Instruction* instr, BranchInfo* branch);
   void AssembleArchBoolean(Instruction* instr, FlagsCondition condition);
 
   void AssembleDeoptimizerCall(int deoptimization_id);
@@ -77,10 +86,10 @@ class CodeGenerator FINAL : public GapResolver::Assembler {
   // ===========================================================================
 
   // Interface used by the gap resolver to emit moves and swaps.
-  virtual void AssembleMove(InstructionOperand* source,
-                            InstructionOperand* destination) OVERRIDE;
-  virtual void AssembleSwap(InstructionOperand* source,
-                            InstructionOperand* destination) OVERRIDE;
+  void AssembleMove(InstructionOperand* source,
+                    InstructionOperand* destination) FINAL;
+  void AssembleSwap(InstructionOperand* source,
+                    InstructionOperand* destination) FINAL;
 
   // ===========================================================================
   // Deoptimization table construction
@@ -120,6 +129,8 @@ class CodeGenerator FINAL : public GapResolver::Assembler {
     int pc_offset_;
   };
 
+  friend class OutOfLineCode;
+
   Frame* const frame_;
   Linkage* const linkage_;
   InstructionSequence* const code_;
@@ -134,6 +145,7 @@ class CodeGenerator FINAL : public GapResolver::Assembler {
   ZoneDeque<Handle<Object> > deoptimization_literals_;
   TranslationBuffer translations_;
   int last_lazy_deopt_pc_;
+  OutOfLineCode* ools_;
 };
 
 }  // namespace compiler
diff --git a/deps/v8/src/compiler/common-node-cache.cc b/deps/v8/src/compiler/common-node-cache.cc
new file mode 100644 (file)
index 0000000..ee1fa0f
--- /dev/null
@@ -0,0 +1,29 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/common-node-cache.h"
+
+#include "src/assembler.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+Node** CommonNodeCache::FindExternalConstant(ExternalReference value) {
+  return external_constants_.Find(zone(), bit_cast<intptr_t>(value.address()));
+}
+
+
+void CommonNodeCache::GetCachedNodes(ZoneVector<Node*>* nodes) {
+  int32_constants_.GetCachedNodes(nodes);
+  int64_constants_.GetCachedNodes(nodes);
+  float32_constants_.GetCachedNodes(nodes);
+  float64_constants_.GetCachedNodes(nodes);
+  external_constants_.GetCachedNodes(nodes);
+  number_constants_.GetCachedNodes(nodes);
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
index 5479574..7ec70ae 100644 (file)
@@ -5,57 +5,63 @@
 #ifndef V8_COMPILER_COMMON_NODE_CACHE_H_
 #define V8_COMPILER_COMMON_NODE_CACHE_H_
 
-#include "src/assembler.h"
 #include "src/compiler/node-cache.h"
 
 namespace v8 {
 namespace internal {
+
+// Forward declarations.
+class ExternalReference;
+
+
 namespace compiler {
 
 // Bundles various caches for common nodes.
-class CommonNodeCache FINAL : public ZoneObject {
+class CommonNodeCache FINAL {
  public:
   explicit CommonNodeCache(Zone* zone) : zone_(zone) {}
+  ~CommonNodeCache() {}
 
   Node** FindInt32Constant(int32_t value) {
-    return int32_constants_.Find(zone_, value);
+    return int32_constants_.Find(zone(), value);
   }
 
   Node** FindInt64Constant(int64_t value) {
-    return int64_constants_.Find(zone_, value);
+    return int64_constants_.Find(zone(), value);
+  }
+
+  Node** FindFloat32Constant(float value) {
+    // We canonicalize float constants at the bit representation level.
+    return float32_constants_.Find(zone(), bit_cast<int32_t>(value));
   }
 
   Node** FindFloat64Constant(double value) {
     // We canonicalize double constants at the bit representation level.
-    return float64_constants_.Find(zone_, bit_cast<int64_t>(value));
+    return float64_constants_.Find(zone(), bit_cast<int64_t>(value));
   }
 
-  Node** FindExternalConstant(ExternalReference reference) {
-    return external_constants_.Find(zone_, reference.address());
-  }
+  Node** FindExternalConstant(ExternalReference value);
 
   Node** FindNumberConstant(double value) {
     // We canonicalize double constants at the bit representation level.
-    return number_constants_.Find(zone_, bit_cast<int64_t>(value));
+    return number_constants_.Find(zone(), bit_cast<int64_t>(value));
   }
 
-  Zone* zone() const { return zone_; }
+  // Return all nodes from the cache.
+  void GetCachedNodes(ZoneVector<Node*>* nodes);
 
-  void GetCachedNodes(NodeVector* nodes) {
-    int32_constants_.GetCachedNodes(nodes);
-    int64_constants_.GetCachedNodes(nodes);
-    float64_constants_.GetCachedNodes(nodes);
-    external_constants_.GetCachedNodes(nodes);
-    number_constants_.GetCachedNodes(nodes);
-  }
+  Zone* zone() const { return zone_; }
 
  private:
   Int32NodeCache int32_constants_;
   Int64NodeCache int64_constants_;
+  Int32NodeCache float32_constants_;
   Int64NodeCache float64_constants_;
-  PtrNodeCache external_constants_;
+  IntPtrNodeCache external_constants_;
   Int64NodeCache number_constants_;
   Zone* zone_;
+
+  DISALLOW_COPY_AND_ASSIGN(CommonNodeCache);
 };
 
 }  // namespace compiler
diff --git a/deps/v8/src/compiler/common-operator-reducer.cc b/deps/v8/src/compiler/common-operator-reducer.cc
new file mode 100644 (file)
index 0000000..cf597ea
--- /dev/null
@@ -0,0 +1,41 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/common-operator-reducer.h"
+
+#include "src/compiler/common-operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+Reduction CommonOperatorReducer::Reduce(Node* node) {
+  switch (node->opcode()) {
+    case IrOpcode::kEffectPhi:
+    case IrOpcode::kPhi: {
+      int const input_count = node->InputCount();
+      if (input_count > 1) {
+        Node* const replacement = node->InputAt(0);
+        for (int i = 1; i < input_count - 1; ++i) {
+          if (node->InputAt(i) != replacement) return NoChange();
+        }
+        return Replace(replacement);
+      }
+      break;
+    }
+    case IrOpcode::kSelect: {
+      if (node->InputAt(1) == node->InputAt(2)) {
+        return Replace(node->InputAt(1));
+      }
+      break;
+    }
+    default:
+      break;
+  }
+  return NoChange();
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/deps/v8/src/compiler/common-operator-reducer.h b/deps/v8/src/compiler/common-operator-reducer.h
new file mode 100644 (file)
index 0000000..10543db
--- /dev/null
@@ -0,0 +1,27 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_COMMON_OPERATOR_REDUCER_H_
+#define V8_COMPILER_COMMON_OPERATOR_REDUCER_H_
+
+#include "src/compiler/graph-reducer.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Performs strength reduction on nodes that have common operators.
+class CommonOperatorReducer FINAL : public Reducer {
+ public:
+  CommonOperatorReducer() {}
+  ~CommonOperatorReducer() FINAL {}
+
+  Reduction Reduce(Node* node) FINAL;
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_COMMON_OPERATOR_REDUCER_H_
index b0af2fd..a6cca45 100644 (file)
@@ -112,6 +112,32 @@ std::ostream& operator<<(std::ostream& os, FrameStateCallInfo const& info) {
   V(Return, Operator::kNoProperties, 1, 1, 1, 1)
 
 
+#define CACHED_LOOP_LIST(V) \
+  V(1)                      \
+  V(2)
+
+
+#define CACHED_MERGE_LIST(V) \
+  V(1)                       \
+  V(2)                       \
+  V(3)                       \
+  V(4)                       \
+  V(5)                       \
+  V(6)                       \
+  V(7)                       \
+  V(8)
+
+
+#define CACHED_PARAMETER_LIST(V) \
+  V(0)                           \
+  V(1)                           \
+  V(2)                           \
+  V(3)                           \
+  V(4)                           \
+  V(5)                           \
+  V(6)
+
+
 struct CommonOperatorGlobalCache FINAL {
 #define CACHED(Name, properties, value_input_count, effect_input_count,     \
                control_input_count, control_output_count)                   \
@@ -124,6 +150,59 @@ struct CommonOperatorGlobalCache FINAL {
   Name##Operator k##Name##Operator;
   CACHED_OP_LIST(CACHED)
 #undef CACHED
+
+  template <BranchHint kBranchHint>
+  struct BranchOperator FINAL : public Operator1<BranchHint> {
+    BranchOperator()
+        : Operator1<BranchHint>(                       // --
+              IrOpcode::kBranch, Operator::kFoldable,  // opcode
+              "Branch",                                // name
+              1, 0, 1, 0, 0, 2,                        // counts
+              kBranchHint) {}                          // parameter
+  };
+  BranchOperator<BranchHint::kNone> kBranchNoneOperator;
+  BranchOperator<BranchHint::kTrue> kBranchTrueOperator;
+  BranchOperator<BranchHint::kFalse> kBranchFalseOperator;
+
+  template <size_t kInputCount>
+  struct LoopOperator FINAL : public Operator {
+    LoopOperator()
+        : Operator(                                  // --
+              IrOpcode::kLoop, Operator::kFoldable,  // opcode
+              "Loop",                                // name
+              0, 0, kInputCount, 0, 0, 1) {}         // counts
+  };
+#define CACHED_LOOP(input_count) \
+  LoopOperator<input_count> kLoop##input_count##Operator;
+  CACHED_LOOP_LIST(CACHED_LOOP)
+#undef CACHED_LOOP
+
+  template <size_t kInputCount>
+  struct MergeOperator FINAL : public Operator {
+    MergeOperator()
+        : Operator(                                   // --
+              IrOpcode::kMerge, Operator::kFoldable,  // opcode
+              "Merge",                                // name
+              0, 0, kInputCount, 0, 0, 1) {}          // counts
+  };
+#define CACHED_MERGE(input_count) \
+  MergeOperator<input_count> kMerge##input_count##Operator;
+  CACHED_MERGE_LIST(CACHED_MERGE)
+#undef CACHED_MERGE
+
+  template <int kIndex>
+  struct ParameterOperator FINAL : public Operator1<int> {
+    ParameterOperator()
+        : Operator1<int>(                             // --
+              IrOpcode::kParameter, Operator::kPure,  // opcode
+              "Parameter",                            // name
+              1, 0, 0, 1, 0, 0,                       // counts,
+              kIndex) {}                              // parameter
+  };
+#define CACHED_PARAMETER(index) \
+  ParameterOperator<index> kParameter##index##Operator;
+  CACHED_PARAMETER_LIST(CACHED_PARAMETER)
+#undef CACHED_PARAMETER
 };
 
 
@@ -145,8 +224,16 @@ CACHED_OP_LIST(CACHED)
 
 
 const Operator* CommonOperatorBuilder::Branch(BranchHint hint) {
-  return new (zone()) Operator1<BranchHint>(
-      IrOpcode::kBranch, Operator::kFoldable, "Branch", 1, 0, 1, 0, 0, 2, hint);
+  switch (hint) {
+    case BranchHint::kNone:
+      return &cache_.kBranchNoneOperator;
+    case BranchHint::kTrue:
+      return &cache_.kBranchTrueOperator;
+    case BranchHint::kFalse:
+      return &cache_.kBranchFalseOperator;
+  }
+  UNREACHABLE();
+  return nullptr;
 }
 
 
@@ -160,19 +247,39 @@ const Operator* CommonOperatorBuilder::Start(int num_formal_parameters) {
 }
 
 
-const Operator* CommonOperatorBuilder::Merge(int controls) {
-  return new (zone()) Operator(               // --
-      IrOpcode::kMerge, Operator::kFoldable,  // opcode
-      "Merge",                                // name
-      0, 0, controls, 0, 0, 1);               // counts
-}
-
-
-const Operator* CommonOperatorBuilder::Loop(int controls) {
+const Operator* CommonOperatorBuilder::Loop(int control_input_count) {
+  switch (control_input_count) {
+#define CACHED_LOOP(input_count) \
+  case input_count:              \
+    return &cache_.kLoop##input_count##Operator;
+    CACHED_LOOP_LIST(CACHED_LOOP)
+#undef CACHED_LOOP
+    default:
+      break;
+  }
+  // Uncached.
   return new (zone()) Operator(              // --
       IrOpcode::kLoop, Operator::kFoldable,  // opcode
       "Loop",                                // name
-      0, 0, controls, 0, 0, 1);              // counts
+      0, 0, control_input_count, 0, 0, 1);   // counts
+}
+
+
+const Operator* CommonOperatorBuilder::Merge(int control_input_count) {
+  switch (control_input_count) {
+#define CACHED_MERGE(input_count) \
+  case input_count:               \
+    return &cache_.kMerge##input_count##Operator;
+    CACHED_MERGE_LIST(CACHED_MERGE)
+#undef CACHED_MERGE
+    default:
+      break;
+  }
+  // Uncached.
+  return new (zone()) Operator(               // --
+      IrOpcode::kMerge, Operator::kFoldable,  // opcode
+      "Merge",                                // name
+      0, 0, control_input_count, 0, 0, 1);    // counts
 }
 
 
@@ -185,6 +292,16 @@ const Operator* CommonOperatorBuilder::Terminate(int effects) {
 
 
 const Operator* CommonOperatorBuilder::Parameter(int index) {
+  switch (index) {
+#define CACHED_PARAMETER(index) \
+  case index:                   \
+    return &cache_.kParameter##index##Operator;
+    CACHED_PARAMETER_LIST(CACHED_PARAMETER)
+#undef CACHED_PARAMETER
+    default:
+      break;
+  }
+  // Uncached.
   return new (zone()) Operator1<int>(         // --
       IrOpcode::kParameter, Operator::kPure,  // opcode
       "Parameter",                            // name
@@ -339,7 +456,7 @@ const Operator* CommonOperatorBuilder::Call(const CallDescriptor* descriptor) {
               descriptor->ReturnCount(),
               Operator::ZeroIfPure(descriptor->properties()), 0, descriptor) {}
 
-    virtual void PrintParameter(std::ostream& os) const OVERRIDE {
+    void PrintParameter(std::ostream& os) const OVERRIDE {
       os << "[" << *parameter() << "]";
     }
   };
index 9938989..af6066b 100644 (file)
@@ -169,8 +169,8 @@ class CommonOperatorBuilder FINAL : public ZoneObject {
   const Operator* Return();
 
   const Operator* Start(int num_formal_parameters);
-  const Operator* Merge(int controls);
-  const Operator* Loop(int controls);
+  const Operator* Loop(int control_input_count);
+  const Operator* Merge(int control_input_count);
   const Operator* Parameter(int index);
 
   const Operator* Int32Constant(int32_t);
index be24a88..8725244 100644 (file)
@@ -78,7 +78,6 @@ void SwitchBuilder::BeginSwitch() {
   body_environment_ = environment()->CopyAsUnreachable();
   label_environment_ = environment()->CopyAsUnreachable();
   break_environment_ = environment()->CopyAsUnreachable();
-  body_environments_.AddBlock(NULL, case_count(), zone());
 }
 
 
index 4b5fc3a..11adfdb 100644 (file)
@@ -40,7 +40,7 @@ class ControlBuilder {
 
 
 // Tracks control flow for a conditional statement.
-class IfBuilder : public ControlBuilder {
+class IfBuilder FINAL : public ControlBuilder {
  public:
   explicit IfBuilder(StructuredGraphBuilder* builder)
       : ControlBuilder(builder),
@@ -60,7 +60,7 @@ class IfBuilder : public ControlBuilder {
 
 
 // Tracks control flow for an iteration statement.
-class LoopBuilder : public ControlBuilder {
+class LoopBuilder FINAL : public ControlBuilder {
  public:
   explicit LoopBuilder(StructuredGraphBuilder* builder)
       : ControlBuilder(builder),
@@ -74,8 +74,8 @@ class LoopBuilder : public ControlBuilder {
   void EndLoop();
 
   // Primitive support for break and continue.
-  virtual void Continue();
-  virtual void Break();
+  void Continue() FINAL;
+  void Break() FINAL;
 
   // Compound control command for conditional break.
   void BreakUnless(Node* condition);
@@ -88,7 +88,7 @@ class LoopBuilder : public ControlBuilder {
 
 
 // Tracks control flow for a switch statement.
-class SwitchBuilder : public ControlBuilder {
+class SwitchBuilder FINAL : public ControlBuilder {
  public:
   explicit SwitchBuilder(StructuredGraphBuilder* builder, int case_count)
       : ControlBuilder(builder),
@@ -107,21 +107,21 @@ class SwitchBuilder : public ControlBuilder {
   void EndSwitch();
 
   // Primitive support for break.
-  virtual void Break();
+  void Break() FINAL;
 
   // The number of cases within a switch is statically known.
-  int case_count() const { return body_environments_.capacity(); }
+  size_t case_count() const { return body_environments_.size(); }
 
  private:
   Environment* body_environment_;   // Environment after last case body.
   Environment* label_environment_;  // Environment for next label condition.
   Environment* break_environment_;  // Environment after the switch exits.
-  ZoneList<Environment*> body_environments_;
+  ZoneVector<Environment*> body_environments_;
 };
 
 
 // Tracks control flow for a block statement.
-class BlockBuilder : public ControlBuilder {
+class BlockBuilder FINAL : public ControlBuilder {
  public:
   explicit BlockBuilder(StructuredGraphBuilder* builder)
       : ControlBuilder(builder), break_environment_(NULL) {}
@@ -131,13 +131,14 @@ class BlockBuilder : public ControlBuilder {
   void EndBlock();
 
   // Primitive support for break.
-  virtual void Break();
+  void Break() FINAL;
 
  private:
   Environment* break_environment_;  // Environment after the block exits.
 };
-}
-}
-}  // namespace v8::internal::compiler
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
 
 #endif  // V8_COMPILER_CONTROL_BUILDERS_H_
diff --git a/deps/v8/src/compiler/control-equivalence.h b/deps/v8/src/compiler/control-equivalence.h
new file mode 100644 (file)
index 0000000..cca087f
--- /dev/null
@@ -0,0 +1,361 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_CONTROL_EQUIVALENCE_H_
+#define V8_COMPILER_CONTROL_EQUIVALENCE_H_
+
+#include "src/v8.h"
+
+#include "src/compiler/graph.h"
+#include "src/compiler/node.h"
+#include "src/compiler/node-properties.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Determines control dependence equivalence classes for control nodes. Any two
+// nodes having the same set of control dependences land in one class. These
+// classes can in turn be used to:
+//  - Build a program structure tree (PST) for controls in the graph.
+//  - Determine single-entry single-exit (SESE) regions within the graph.
+//
+// Note that this implementation actually uses cycle equivalence to establish
+// class numbers. Any two nodes are cycle equivalent if they occur in the same
+// set of cycles. It can be shown that control dependence equivalence reduces
+// to undirected cycle equivalence for strongly connected control flow graphs.
+//
+// The algorithm is based on the paper, "The program structure tree: computing
+// control regions in linear time" by Johnson, Pearson & Pingali (PLDI94) which
+// also contains proofs for the aforementioned equivalence. References to line
+// numbers in the algorithm from figure 4 have been added [line:x].
+class ControlEquivalence : public ZoneObject {
+ public:
+  ControlEquivalence(Zone* zone, Graph* graph)
+      : zone_(zone),
+        graph_(graph),
+        dfs_number_(0),
+        class_number_(1),
+        node_data_(graph->NodeCount(), EmptyData(), zone) {}
+
+  // Run the main algorithm starting from the {exit} control node. This causes
+  // the following iterations over control edges of the graph:
+  //  1) A breadth-first backwards traversal to determine the set of nodes that
+  //     participate in the next step. Takes O(E) time and O(N) space.
+  //  2) An undirected depth-first backwards traversal that determines class
+  //     numbers for all participating nodes. Takes O(E) time and O(N) space.
+  void Run(Node* exit) {
+    if (GetClass(exit) != kInvalidClass) return;
+    DetermineParticipation(exit);
+    RunUndirectedDFS(exit);
+  }
+
+  // Retrieves a previously computed class number.
+  size_t ClassOf(Node* node) {
+    DCHECK(GetClass(node) != kInvalidClass);
+    return GetClass(node);
+  }
+
+ private:
+  static const size_t kInvalidClass = static_cast<size_t>(-1);
+  typedef enum { kInputDirection, kUseDirection } DFSDirection;
+
+  struct Bracket {
+    DFSDirection direction;  // Direction in which this bracket was added.
+    size_t recent_class;     // Cached class when bracket was topmost.
+    size_t recent_size;      // Cached set-size when bracket was topmost.
+    Node* from;              // Node that this bracket originates from.
+    Node* to;                // Node that this bracket points to.
+  };
+
+  // The set of brackets for each node during the DFS walk.
+  typedef ZoneLinkedList<Bracket> BracketList;
+
+  struct DFSStackEntry {
+    DFSDirection direction;            // Direction currently used in DFS walk.
+    Node::InputEdges::iterator input;  // Iterator used for "input" direction.
+    Node::UseEdges::iterator use;      // Iterator used for "use" direction.
+    Node* parent_node;                 // Parent node of entry during DFS walk.
+    Node* node;                        // Node that this stack entry belongs to.
+  };
+
+  // The stack is used during the undirected DFS walk.
+  typedef ZoneStack<DFSStackEntry> DFSStack;
+
+  struct NodeData {
+    size_t class_number;  // Equivalence class number assigned to node.
+    size_t dfs_number;    // Pre-order DFS number assigned to node.
+    bool visited;         // Indicates node has already been visited.
+    bool on_stack;        // Indicates node is on DFS stack during walk.
+    bool participates;    // Indicates node participates in DFS walk.
+    BracketList blist;    // List of brackets per node.
+  };
+
+  // The per-node data computed during the DFS walk.
+  typedef ZoneVector<NodeData> Data;
+
+  // Called at pre-visit during DFS walk.
+  void VisitPre(Node* node) {
+    Trace("CEQ: Pre-visit of #%d:%s\n", node->id(), node->op()->mnemonic());
+
+    // Dispense a new pre-order number.
+    SetNumber(node, NewDFSNumber());
+    Trace("  Assigned DFS number is %d\n", GetNumber(node));
+  }
+
+  // Called at mid-visit during DFS walk.
+  void VisitMid(Node* node, DFSDirection direction) {
+    Trace("CEQ: Mid-visit of #%d:%s\n", node->id(), node->op()->mnemonic());
+    BracketList& blist = GetBracketList(node);
+
+    // Remove brackets pointing to this node [line:19].
+    BracketListDelete(blist, node, direction);
+
+    // Potentially introduce artificial dependency from start to end.
+    if (blist.empty()) {
+      DCHECK_EQ(kInputDirection, direction);
+      VisitBackedge(node, graph_->end(), kInputDirection);
+    }
+
+    // Potentially start a new equivalence class [line:37].
+    BracketListTrace(blist);
+    Bracket* recent = &blist.back();
+    if (recent->recent_size != blist.size()) {
+      recent->recent_size = blist.size();
+      recent->recent_class = NewClassNumber();
+    }
+
+    // Assign equivalence class to node.
+    SetClass(node, recent->recent_class);
+    Trace("  Assigned class number is %d\n", GetClass(node));
+  }
+
+  // Called at post-visit during DFS walk.
+  void VisitPost(Node* node, Node* parent_node, DFSDirection direction) {
+    Trace("CEQ: Post-visit of #%d:%s\n", node->id(), node->op()->mnemonic());
+    BracketList& blist = GetBracketList(node);
+
+    // Remove brackets pointing to this node [line:19].
+    BracketListDelete(blist, node, direction);
+
+    // Propagate bracket list up the DFS tree [line:13].
+    if (parent_node != NULL) {
+      BracketList& parent_blist = GetBracketList(parent_node);
+      parent_blist.splice(parent_blist.end(), blist);
+    }
+  }
+
+  // Called when hitting a back edge in the DFS walk.
+  void VisitBackedge(Node* from, Node* to, DFSDirection direction) {
+    Trace("CEQ: Backedge from #%d:%s to #%d:%s\n", from->id(),
+          from->op()->mnemonic(), to->id(), to->op()->mnemonic());
+
+    // Push backedge onto the bracket list [line:25].
+    Bracket bracket = {direction, kInvalidClass, 0, from, to};
+    GetBracketList(from).push_back(bracket);
+  }
+
+  // Performs and undirected DFS walk of the graph. Conceptually all nodes are
+  // expanded, splitting "input" and "use" out into separate nodes. During the
+  // traversal, edges towards the representative nodes are preferred.
+  //
+  //   \ /        - Pre-visit: When N1 is visited in direction D the preferred
+  //    x   N1      edge towards N is taken next, calling VisitPre(N).
+  //    |         - Mid-visit: After all edges out of N2 in direction D have
+  //    |   N       been visited, we switch the direction and start considering
+  //    |           edges out of N1 now, and we call VisitMid(N).
+  //    x   N2    - Post-visit: After all edges out of N1 in direction opposite
+  //   / \          to D have been visited, we pop N and call VisitPost(N).
+  //
+  // This will yield a true spanning tree (without cross or forward edges) and
+  // also discover proper back edges in both directions.
+  void RunUndirectedDFS(Node* exit) {
+    ZoneStack<DFSStackEntry> stack(zone_);
+    DFSPush(stack, exit, NULL, kInputDirection);
+    VisitPre(exit);
+
+    while (!stack.empty()) {  // Undirected depth-first backwards traversal.
+      DFSStackEntry& entry = stack.top();
+      Node* node = entry.node;
+
+      if (entry.direction == kInputDirection) {
+        if (entry.input != node->input_edges().end()) {
+          Edge edge = *entry.input;
+          Node* input = edge.to();
+          ++(entry.input);
+          if (NodeProperties::IsControlEdge(edge) &&
+              NodeProperties::IsControl(input)) {
+            // Visit next control input.
+            if (!GetData(input)->participates) continue;
+            if (GetData(input)->visited) continue;
+            if (GetData(input)->on_stack) {
+              // Found backedge if input is on stack.
+              if (input != entry.parent_node) {
+                VisitBackedge(node, input, kInputDirection);
+              }
+            } else {
+              // Push input onto stack.
+              DFSPush(stack, input, node, kInputDirection);
+              VisitPre(input);
+            }
+          }
+          continue;
+        }
+        if (entry.use != node->use_edges().end()) {
+          // Switch direction to uses.
+          entry.direction = kUseDirection;
+          VisitMid(node, kInputDirection);
+          continue;
+        }
+      }
+
+      if (entry.direction == kUseDirection) {
+        if (entry.use != node->use_edges().end()) {
+          Edge edge = *entry.use;
+          Node* use = edge.from();
+          ++(entry.use);
+          if (NodeProperties::IsControlEdge(edge) &&
+              NodeProperties::IsControl(use)) {
+            // Visit next control use.
+            if (!GetData(use)->participates) continue;
+            if (GetData(use)->visited) continue;
+            if (GetData(use)->on_stack) {
+              // Found backedge if use is on stack.
+              if (use != entry.parent_node) {
+                VisitBackedge(node, use, kUseDirection);
+              }
+            } else {
+              // Push use onto stack.
+              DFSPush(stack, use, node, kUseDirection);
+              VisitPre(use);
+            }
+          }
+          continue;
+        }
+        if (entry.input != node->input_edges().end()) {
+          // Switch direction to inputs.
+          entry.direction = kInputDirection;
+          VisitMid(node, kUseDirection);
+          continue;
+        }
+      }
+
+      // Pop node from stack when done with all inputs and uses.
+      DCHECK(entry.input == node->input_edges().end());
+      DCHECK(entry.use == node->use_edges().end());
+      DFSPop(stack, node);
+      VisitPost(node, entry.parent_node, entry.direction);
+    }
+  }
+
+  void DetermineParticipationEnqueue(ZoneQueue<Node*>& queue, Node* node) {
+    if (!GetData(node)->participates) {
+      GetData(node)->participates = true;
+      queue.push(node);
+    }
+  }
+
+  void DetermineParticipation(Node* exit) {
+    ZoneQueue<Node*> queue(zone_);
+    DetermineParticipationEnqueue(queue, exit);
+    while (!queue.empty()) {  // Breadth-first backwards traversal.
+      Node* node = queue.front();
+      queue.pop();
+      int max = NodeProperties::PastControlIndex(node);
+      for (int i = NodeProperties::FirstControlIndex(node); i < max; i++) {
+        DetermineParticipationEnqueue(queue, node->InputAt(i));
+      }
+    }
+  }
+
+ private:
+  NodeData* GetData(Node* node) { return &node_data_[node->id()]; }
+  int NewClassNumber() { return class_number_++; }
+  int NewDFSNumber() { return dfs_number_++; }
+
+  // Template used to initialize per-node data.
+  NodeData EmptyData() {
+    return {kInvalidClass, 0, false, false, false, BracketList(zone_)};
+  }
+
+  // Accessors for the DFS number stored within the per-node data.
+  size_t GetNumber(Node* node) { return GetData(node)->dfs_number; }
+  void SetNumber(Node* node, size_t number) {
+    GetData(node)->dfs_number = number;
+  }
+
+  // Accessors for the equivalence class stored within the per-node data.
+  size_t GetClass(Node* node) { return GetData(node)->class_number; }
+  void SetClass(Node* node, size_t number) {
+    GetData(node)->class_number = number;
+  }
+
+  // Accessors for the bracket list stored within the per-node data.
+  BracketList& GetBracketList(Node* node) { return GetData(node)->blist; }
+  void SetBracketList(Node* node, BracketList& list) {
+    GetData(node)->blist = list;
+  }
+
+  // Mutates the DFS stack by pushing an entry.
+  void DFSPush(DFSStack& stack, Node* node, Node* from, DFSDirection dir) {
+    DCHECK(GetData(node)->participates);
+    DCHECK(!GetData(node)->visited);
+    GetData(node)->on_stack = true;
+    Node::InputEdges::iterator input = node->input_edges().begin();
+    Node::UseEdges::iterator use = node->use_edges().begin();
+    stack.push({dir, input, use, from, node});
+  }
+
+  // Mutates the DFS stack by popping an entry.
+  void DFSPop(DFSStack& stack, Node* node) {
+    DCHECK_EQ(stack.top().node, node);
+    GetData(node)->on_stack = false;
+    GetData(node)->visited = true;
+    stack.pop();
+  }
+
+  // TODO(mstarzinger): Optimize this to avoid linear search.
+  void BracketListDelete(BracketList& blist, Node* to, DFSDirection direction) {
+    for (BracketList::iterator i = blist.begin(); i != blist.end(); /*nop*/) {
+      if (i->to == to && i->direction != direction) {
+        Trace("  BList erased: {%d->%d}\n", i->from->id(), i->to->id());
+        i = blist.erase(i);
+      } else {
+        ++i;
+      }
+    }
+  }
+
+  void BracketListTrace(BracketList& blist) {
+    if (FLAG_trace_turbo_scheduler) {
+      Trace("  BList: ");
+      for (Bracket bracket : blist) {
+        Trace("{%d->%d} ", bracket.from->id(), bracket.to->id());
+      }
+      Trace("\n");
+    }
+  }
+
+  void Trace(const char* msg, ...) {
+    if (FLAG_trace_turbo_scheduler) {
+      va_list arguments;
+      va_start(arguments, msg);
+      base::OS::VPrint(msg, arguments);
+      va_end(arguments);
+    }
+  }
+
+  Zone* zone_;
+  Graph* graph_;
+  int dfs_number_;    // Generates new DFS pre-order numbers on demand.
+  int class_number_;  // Generates new equivalence class numbers on demand.
+  Data node_data_;    // Per-node data stored as a side-table.
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_CONTROL_EQUIVALENCE_H_
index 1d83103..e738ccf 100644 (file)
@@ -15,7 +15,31 @@ namespace internal {
 namespace compiler {
 
 enum VisitState { kUnvisited = 0, kOnStack = 1, kRevisit = 2, kVisited = 3 };
-enum Reachability { kFromStart = 8 };
+enum Decision { kFalse, kUnknown, kTrue };
+
+class ReachabilityMarker : public NodeMarker<uint8_t> {
+ public:
+  explicit ReachabilityMarker(Graph* graph) : NodeMarker<uint8_t>(graph, 8) {}
+  bool SetReachableFromEnd(Node* node) {
+    uint8_t before = Get(node);
+    Set(node, before | kFromEnd);
+    return before & kFromEnd;
+  }
+  bool IsReachableFromEnd(Node* node) { return Get(node) & kFromEnd; }
+  bool SetReachableFromStart(Node* node) {
+    uint8_t before = Get(node);
+    Set(node, before | kFromStart);
+    return before & kFromStart;
+  }
+  bool IsReachableFromStart(Node* node) { return Get(node) & kFromStart; }
+  void Push(Node* node) { Set(node, Get(node) | kFwStack); }
+  void Pop(Node* node) { Set(node, Get(node) & ~kFwStack); }
+  bool IsOnStack(Node* node) { return Get(node) & kFwStack; }
+
+ private:
+  enum Bit { kFromEnd = 1, kFromStart = 2, kFwStack = 4 };
+};
+
 
 #define TRACE(x) \
   if (FLAG_trace_turbo_reduction) PrintF x
@@ -71,19 +95,19 @@ class ControlReducerImpl {
     // we have to be careful about proper loop detection during reduction.
 
     // Gather all nodes backwards-reachable from end (through inputs).
-    state_.assign(graph()->NodeCount(), kUnvisited);
+    ReachabilityMarker marked(graph());
     NodeVector nodes(zone_);
-    AddNodesReachableFromEnd(nodes);
+    AddNodesReachableFromEnd(marked, nodes);
 
     // Walk forward through control nodes, looking for back edges to nodes
     // that are not connected to end. Those are non-terminating loops (NTLs).
     Node* start = graph()->start();
-    ZoneVector<byte> fw_reachability(graph()->NodeCount(), 0, zone_);
-    fw_reachability[start->id()] = kFromStart | kOnStack;
+    marked.Push(start);
+    marked.SetReachableFromStart(start);
 
     // We use a stack of (Node, UseIter) pairs to avoid O(n^2) traversal.
     typedef std::pair<Node*, UseIter> FwIter;
-    ZoneDeque<FwIter> fw_stack(zone_);
+    ZoneVector<FwIter> fw_stack(zone_);
     fw_stack.push_back(FwIter(start, start->uses().begin()));
 
     while (!fw_stack.empty()) {
@@ -92,20 +116,26 @@ class ControlReducerImpl {
       bool pop = true;
       while (fw_stack.back().second != node->uses().end()) {
         Node* succ = *(fw_stack.back().second);
-        byte reach = fw_reachability[succ->id()];
-        if ((reach & kOnStack) != 0 && state_[succ->id()] != kVisited) {
+        if (marked.IsOnStack(succ) && !marked.IsReachableFromEnd(succ)) {
           // {succ} is on stack and not reachable from end.
-          ConnectNTL(nodes, succ);
-          fw_reachability.resize(graph()->NodeCount(), 0);
-          // The use list of {succ} might have changed.
-          fw_stack[fw_stack.size() - 1] = FwIter(succ, succ->uses().begin());
+          Node* added = ConnectNTL(succ);
+          nodes.push_back(added);
+          marked.SetReachableFromEnd(added);
+          AddBackwardsReachableNodes(marked, nodes, nodes.size() - 1);
+
+          // Reset the use iterators for the entire stack.
+          for (size_t i = 0; i < fw_stack.size(); i++) {
+            FwIter& iter = fw_stack[i];
+            fw_stack[i] = FwIter(iter.first, iter.first->uses().begin());
+          }
           pop = false;  // restart traversing successors of this node.
           break;
         }
-        if ((reach & kFromStart) == 0 &&
-            IrOpcode::IsControlOpcode(succ->opcode())) {
+        if (IrOpcode::IsControlOpcode(succ->opcode()) &&
+            !marked.IsReachableFromStart(succ)) {
           // {succ} is a control node and not yet reached from start.
-          fw_reachability[succ->id()] |= kFromStart | kOnStack;
+          marked.Push(succ);
+          marked.SetReachableFromStart(succ);
           fw_stack.push_back(FwIter(succ, succ->uses().begin()));
           pop = false;  // "recurse" into successor control node.
           break;
@@ -113,21 +143,20 @@ class ControlReducerImpl {
         ++fw_stack.back().second;
       }
       if (pop) {
-        fw_reachability[node->id()] &= ~kOnStack;
+        marked.Pop(node);
         fw_stack.pop_back();
       }
     }
 
     // Trim references from dead nodes to live nodes first.
     jsgraph_->GetCachedNodes(&nodes);
-    TrimNodes(nodes);
+    TrimNodes(marked, nodes);
 
     // Any control nodes not reachable from start are dead, even loops.
     for (size_t i = 0; i < nodes.size(); i++) {
       Node* node = nodes[i];
-      byte reach = fw_reachability[node->id()];
-      if ((reach & kFromStart) == 0 &&
-          IrOpcode::IsControlOpcode(node->opcode())) {
+      if (IrOpcode::IsControlOpcode(node->opcode()) &&
+          !marked.IsReachableFromStart(node)) {
         ReplaceNode(node, dead());  // uses will be added to revisit queue.
       }
     }
@@ -135,7 +164,7 @@ class ControlReducerImpl {
   }
 
   // Connect {loop}, the header of a non-terminating loop, to the end node.
-  void ConnectNTL(NodeVector& nodes, Node* loop) {
+  Node* ConnectNTL(Node* loop) {
     TRACE(("ConnectNTL: #%d:%s\n", loop->id(), loop->op()->mnemonic()));
 
     if (loop->opcode() != IrOpcode::kTerminate) {
@@ -172,27 +201,24 @@ class ControlReducerImpl {
       merge->AppendInput(graph()->zone(), loop);
       merge->set_op(common_->Merge(merge->InputCount()));
     }
-    nodes.push_back(to_add);
-    state_.resize(graph()->NodeCount(), kUnvisited);
-    state_[to_add->id()] = kVisited;
-    AddBackwardsReachableNodes(nodes, nodes.size() - 1);
+    return to_add;
   }
 
-  void AddNodesReachableFromEnd(NodeVector& nodes) {
+  void AddNodesReachableFromEnd(ReachabilityMarker& marked, NodeVector& nodes) {
     Node* end = graph()->end();
-    state_[end->id()] = kVisited;
+    marked.SetReachableFromEnd(end);
     if (!end->IsDead()) {
       nodes.push_back(end);
-      AddBackwardsReachableNodes(nodes, nodes.size() - 1);
+      AddBackwardsReachableNodes(marked, nodes, nodes.size() - 1);
     }
   }
 
-  void AddBackwardsReachableNodes(NodeVector& nodes, size_t cursor) {
+  void AddBackwardsReachableNodes(ReachabilityMarker& marked, NodeVector& nodes,
+                                  size_t cursor) {
     while (cursor < nodes.size()) {
       Node* node = nodes[cursor++];
       for (Node* const input : node->inputs()) {
-        if (state_[input->id()] != kVisited) {
-          state_[input->id()] = kVisited;
+        if (!marked.SetReachableFromEnd(input)) {
           nodes.push_back(input);
         }
       }
@@ -201,28 +227,26 @@ class ControlReducerImpl {
 
   void Trim() {
     // Gather all nodes backwards-reachable from end through inputs.
-    state_.assign(graph()->NodeCount(), kUnvisited);
+    ReachabilityMarker marked(graph());
     NodeVector nodes(zone_);
-    AddNodesReachableFromEnd(nodes);
+    AddNodesReachableFromEnd(marked, nodes);
 
     // Process cached nodes in the JSGraph too.
     jsgraph_->GetCachedNodes(&nodes);
-    TrimNodes(nodes);
+    TrimNodes(marked, nodes);
   }
 
-  void TrimNodes(NodeVector& nodes) {
+  void TrimNodes(ReachabilityMarker& marked, NodeVector& nodes) {
     // Remove dead->live edges.
     for (size_t j = 0; j < nodes.size(); j++) {
       Node* node = nodes[j];
-      for (UseIter i = node->uses().begin(); i != node->uses().end();) {
-        size_t id = static_cast<size_t>((*i)->id());
-        if (state_[id] != kVisited) {
-          TRACE(("DeadLink: #%d:%s(%d) -> #%d:%s\n", (*i)->id(),
-                 (*i)->op()->mnemonic(), i.index(), node->id(),
+      for (Edge edge : node->use_edges()) {
+        Node* use = edge.from();
+        if (!marked.IsReachableFromEnd(use)) {
+          TRACE(("DeadLink: #%d:%s(%d) -> #%d:%s\n", use->id(),
+                 use->op()->mnemonic(), edge.index(), node->id(),
                  node->op()->mnemonic()));
-          i.UpdateToAndIncrement(NULL);
-        } else {
-          ++i;
+          edge.UpdateTo(NULL);
         }
       }
     }
@@ -234,8 +258,7 @@ class ControlReducerImpl {
         CHECK_NE(NULL, input);
       }
       for (Node* const use : node->uses()) {
-        size_t id = static_cast<size_t>(use->id());
-        CHECK_EQ(kVisited, state_[id]);
+        CHECK(marked.IsReachableFromEnd(use));
       }
     }
 #endif
@@ -340,29 +363,37 @@ class ControlReducerImpl {
     }
   }
 
-  // Reduce redundant selects.
-  Node* ReduceSelect(Node* const node) {
-    Node* const cond = node->InputAt(0);
-    Node* const tvalue = node->InputAt(1);
-    Node* const fvalue = node->InputAt(2);
-    if (tvalue == fvalue) return tvalue;
+  // Try to statically fold a condition.
+  Decision DecideCondition(Node* cond) {
     switch (cond->opcode()) {
       case IrOpcode::kInt32Constant:
-        return Int32Matcher(cond).Is(0) ? fvalue : tvalue;
+        return Int32Matcher(cond).Is(0) ? kFalse : kTrue;
       case IrOpcode::kInt64Constant:
-        return Int64Matcher(cond).Is(0) ? fvalue : tvalue;
+        return Int64Matcher(cond).Is(0) ? kFalse : kTrue;
       case IrOpcode::kNumberConstant:
-        return NumberMatcher(cond).Is(0) ? fvalue : tvalue;
+        return NumberMatcher(cond).Is(0) ? kFalse : kTrue;
       case IrOpcode::kHeapConstant: {
         Handle<Object> object =
             HeapObjectMatcher<Object>(cond).Value().handle();
-        if (object->IsTrue()) return tvalue;
-        if (object->IsFalse()) return fvalue;
+        if (object->IsTrue()) return kTrue;
+        if (object->IsFalse()) return kFalse;
+        // TODO(turbofan): decide more conditions for heap constants.
         break;
       }
       default:
         break;
     }
+    return kUnknown;
+  }
+
+  // Reduce redundant selects.
+  Node* ReduceSelect(Node* const node) {
+    Node* const tvalue = node->InputAt(1);
+    Node* const fvalue = node->InputAt(2);
+    if (tvalue == fvalue) return tvalue;
+    Decision result = DecideCondition(node->InputAt(0));
+    if (result == kTrue) return tvalue;
+    if (result == kFalse) return fvalue;
     return node;
   }
 
@@ -436,50 +467,24 @@ class ControlReducerImpl {
 
   // Reduce branches if they have constant inputs.
   Node* ReduceBranch(Node* node) {
-    Node* cond = node->InputAt(0);
-    bool is_true;
-    switch (cond->opcode()) {
-      case IrOpcode::kInt32Constant:
-        is_true = !Int32Matcher(cond).Is(0);
-        break;
-      case IrOpcode::kInt64Constant:
-        is_true = !Int64Matcher(cond).Is(0);
-        break;
-      case IrOpcode::kNumberConstant:
-        is_true = !NumberMatcher(cond).Is(0);
-        break;
-      case IrOpcode::kHeapConstant: {
-        Handle<Object> object =
-            HeapObjectMatcher<Object>(cond).Value().handle();
-        if (object->IsTrue())
-          is_true = true;
-        else if (object->IsFalse())
-          is_true = false;
-        else
-          return node;  // TODO(turbofan): fold branches on strings, objects.
-        break;
-      }
-      default:
-        return node;
-    }
+    Decision result = DecideCondition(node->InputAt(0));
+    if (result == kUnknown) return node;
 
     TRACE(("BranchReduce: #%d:%s = %s\n", node->id(), node->op()->mnemonic(),
-           is_true ? "true" : "false"));
+           (result == kTrue) ? "true" : "false"));
 
     // Replace IfTrue and IfFalse projections from this branch.
     Node* control = NodeProperties::GetControlInput(node);
-    for (UseIter i = node->uses().begin(); i != node->uses().end();) {
-      Node* to = *i;
-      if (to->opcode() == IrOpcode::kIfTrue) {
-        TRACE(("  IfTrue: #%d:%s\n", to->id(), to->op()->mnemonic()));
-        i.UpdateToAndIncrement(NULL);
-        ReplaceNode(to, is_true ? control : dead());
-      } else if (to->opcode() == IrOpcode::kIfFalse) {
-        TRACE(("  IfFalse: #%d:%s\n", to->id(), to->op()->mnemonic()));
-        i.UpdateToAndIncrement(NULL);
-        ReplaceNode(to, is_true ? dead() : control);
-      } else {
-        ++i;
+    for (Edge edge : node->use_edges()) {
+      Node* use = edge.from();
+      if (use->opcode() == IrOpcode::kIfTrue) {
+        TRACE(("  IfTrue: #%d:%s\n", use->id(), use->op()->mnemonic()));
+        edge.UpdateTo(NULL);
+        ReplaceNode(use, (result == kTrue) ? control : dead());
+      } else if (use->opcode() == IrOpcode::kIfFalse) {
+        TRACE(("  IfFalse: #%d:%s\n", use->id(), use->op()->mnemonic()));
+        edge.UpdateTo(NULL);
+        ReplaceNode(use, (result == kTrue) ? dead() : control);
       }
     }
     return control;
@@ -534,7 +539,6 @@ void ControlReducer::ReduceGraph(Zone* zone, JSGraph* jsgraph,
                                  CommonOperatorBuilder* common) {
   ControlReducerImpl impl(zone, jsgraph, common);
   impl.Reduce();
-  impl.Trim();
 }
 
 
index df9a01f..f99d7bd 100644 (file)
@@ -17,7 +17,7 @@ namespace compiler {
 // registers for a compiled function. Frames are usually populated by the
 // register allocator and are used by Linkage to generate code for the prologue
 // and epilogue to compiled code.
-class Frame {
+class Frame : public ZoneObject {
  public:
   Frame()
       : register_save_area_size_(0),
@@ -69,6 +69,8 @@ class Frame {
   int double_spill_slot_count_;
   BitVector* allocated_registers_;
   BitVector* allocated_double_registers_;
+
+  DISALLOW_COPY_AND_ASSIGN(Frame);
 };
 
 
index 98aaab2..4f4f4e4 100644 (file)
@@ -39,8 +39,9 @@ class GapResolver FINAL {
   // Assembler used to emit moves and save registers.
   Assembler* const assembler_;
 };
-}
-}
-}  // namespace v8::internal::compiler
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
 
 #endif  // V8_COMPILER_GAP_RESOLVER_H_
diff --git a/deps/v8/src/compiler/generic-algorithm-inl.h b/deps/v8/src/compiler/generic-algorithm-inl.h
deleted file mode 100644 (file)
index a25131f..0000000
+++ /dev/null
@@ -1,48 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_GENERIC_ALGORITHM_INL_H_
-#define V8_COMPILER_GENERIC_ALGORITHM_INL_H_
-
-#include <vector>
-
-#include "src/compiler/generic-algorithm.h"
-#include "src/compiler/generic-graph.h"
-#include "src/compiler/generic-node.h"
-#include "src/compiler/generic-node-inl.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-template <class N>
-class NodeInputIterationTraits {
- public:
-  typedef N Node;
-  typedef typename N::Inputs::iterator Iterator;
-
-  static Iterator begin(Node* node) { return node->inputs().begin(); }
-  static Iterator end(Node* node) { return node->inputs().end(); }
-  static int max_id(GenericGraphBase* graph) { return graph->NodeCount(); }
-  static Node* to(Iterator iterator) { return *iterator; }
-  static Node* from(Iterator iterator) { return iterator.edge().from(); }
-};
-
-template <class N>
-class NodeUseIterationTraits {
- public:
-  typedef N Node;
-  typedef typename N::Uses::iterator Iterator;
-
-  static Iterator begin(Node* node) { return node->uses().begin(); }
-  static Iterator end(Node* node) { return node->uses().end(); }
-  static int max_id(GenericGraphBase* graph) { return graph->NodeCount(); }
-  static Node* to(Iterator iterator) { return *iterator; }
-  static Node* from(Iterator iterator) { return iterator.edge().to(); }
-};
-}
-}
-}  // namespace v8::internal::compiler
-
-#endif  // V8_COMPILER_GENERIC_ALGORITHM_INL_H_
index 3442318..391757e 100644 (file)
@@ -6,52 +6,52 @@
 #define V8_COMPILER_GENERIC_ALGORITHM_H_
 
 #include <stack>
+#include <vector>
 
-#include "src/compiler/generic-graph.h"
-#include "src/compiler/generic-node.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/node.h"
 #include "src/zone-containers.h"
 
 namespace v8 {
 namespace internal {
 namespace compiler {
 
+class Graph;
+class Node;
+
 // GenericGraphVisit allows visitation of graphs of nodes and edges in pre- and
 // post-order. Visitation uses an explicitly allocated stack rather than the
-// execution stack to avoid stack overflow. Although GenericGraphVisit is
-// primarily intended to traverse networks of nodes through their
-// dependencies and uses, it also can be used to visit any graph-like network
-// by specifying custom traits.
+// execution stack to avoid stack overflow.
 class GenericGraphVisit {
  public:
   // struct Visitor {
-  //   void Pre(Traits::Node* current);
-  //   void Post(Traits::Node* current);
-  //   void PreEdge(Traits::Node* from, int index, Traits::Node* to);
-  //   void PostEdge(Traits::Node* from, int index, Traits::Node* to);
+  //   void Pre(Node* current);
+  //   void Post(Node* current);
+  //   void PreEdge(Node* from, int index, Node* to);
+  //   void PostEdge(Node* from, int index, Node* to);
   // }
-  template <class Visitor, class Traits, class RootIterator>
-  static void Visit(GenericGraphBase* graph, Zone* zone,
-                    RootIterator root_begin, RootIterator root_end,
-                    Visitor* visitor) {
-    typedef typename Traits::Node Node;
-    typedef typename Traits::Iterator Iterator;
+  template <class Visitor>
+  static void Visit(Graph* graph, Zone* zone, Node** root_begin,
+                    Node** root_end, Visitor* visitor) {
+    typedef typename Node::InputEdges::iterator Iterator;
     typedef std::pair<Iterator, Iterator> NodeState;
     typedef std::stack<NodeState, ZoneDeque<NodeState> > NodeStateStack;
     NodeStateStack stack((ZoneDeque<NodeState>(zone)));
-    BoolVector visited(Traits::max_id(graph), false, zone);
+    BoolVector visited(graph->NodeCount(), false, zone);
     Node* current = *root_begin;
     while (true) {
       DCHECK(current != NULL);
       const int id = current->id();
       DCHECK(id >= 0);
-      DCHECK(id < Traits::max_id(graph));  // Must be a valid id.
+      DCHECK(id < graph->NodeCount());  // Must be a valid id.
       bool visit = !GetVisited(&visited, id);
       if (visit) {
         visitor->Pre(current);
         SetVisited(&visited, id);
       }
-      Iterator begin(visit ? Traits::begin(current) : Traits::end(current));
-      Iterator end(Traits::end(current));
+      Iterator begin(visit ? current->input_edges().begin()
+                           : current->input_edges().end());
+      Iterator end(current->input_edges().end());
       stack.push(NodeState(begin, end));
       Node* post_order_node = current;
       while (true) {
@@ -67,35 +67,33 @@ class GenericGraphVisit {
             current = *root_begin;
             break;
           }
-          post_order_node = Traits::from(stack.top().first);
+          post_order_node = (*stack.top().first).from();
           visit = true;
         } else {
-          visitor->PreEdge(Traits::from(top.first), top.first.edge().index(),
-                           Traits::to(top.first));
-          current = Traits::to(top.first);
+          visitor->PreEdge((*top.first).from(), (*top.first).index(),
+                           (*top.first).to());
+          current = (*top.first).to();
           if (!GetVisited(&visited, current->id())) break;
         }
         top = stack.top();
-        visitor->PostEdge(Traits::from(top.first), top.first.edge().index(),
-                          Traits::to(top.first));
+        visitor->PostEdge((*top.first).from(), (*top.first).index(),
+                          (*top.first).to());
         ++stack.top().first;
       }
     }
   }
 
-  template <class Visitor, class Traits>
-  static void Visit(GenericGraphBase* graph, Zone* zone,
-                    typename Traits::Node* current, Visitor* visitor) {
-    typename Traits::Node* array[] = {current};
-    Visit<Visitor, Traits>(graph, zone, &array[0], &array[1], visitor);
+  template <class Visitor>
+  static void Visit(Graph* graph, Zone* zone, Node* current, Visitor* visitor) {
+    Node* array[] = {current};
+    Visit<Visitor>(graph, zone, &array[0], &array[1], visitor);
   }
 
-  template <class B, class S>
   struct NullNodeVisitor {
-    void Pre(GenericNode<B, S>* node) {}
-    void Post(GenericNode<B, S>* node) {}
-    void PreEdge(GenericNode<B, S>* from, int index, GenericNode<B, S>* to) {}
-    void PostEdge(GenericNode<B, S>* from, int index, GenericNode<B, S>* to) {}
+    void Pre(Node* node) {}
+    void Post(Node* node) {}
+    void PreEdge(Node* from, int index, Node* to) {}
+    void PostEdge(Node* from, int index, Node* to) {}
   };
 
  private:
@@ -112,8 +110,11 @@ class GenericGraphVisit {
     return visited->at(id);
   }
 };
-}
-}
-}  // namespace v8::internal::compiler
+
+typedef GenericGraphVisit::NullNodeVisitor NullNodeVisitor;
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
 
 #endif  // V8_COMPILER_GENERIC_ALGORITHM_H_
diff --git a/deps/v8/src/compiler/generic-graph.h b/deps/v8/src/compiler/generic-graph.h
deleted file mode 100644 (file)
index 11f6aa2..0000000
+++ /dev/null
@@ -1,53 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_GENERIC_GRAPH_H_
-#define V8_COMPILER_GENERIC_GRAPH_H_
-
-#include "src/compiler/generic-node.h"
-
-namespace v8 {
-namespace internal {
-
-class Zone;
-
-namespace compiler {
-
-class GenericGraphBase : public ZoneObject {
- public:
-  explicit GenericGraphBase(Zone* zone) : zone_(zone), next_node_id_(0) {}
-
-  Zone* zone() const { return zone_; }
-
-  NodeId NextNodeID() { return next_node_id_++; }
-  NodeId NodeCount() const { return next_node_id_; }
-
- private:
-  Zone* zone_;
-  NodeId next_node_id_;
-};
-
-template <class V>
-class GenericGraph : public GenericGraphBase {
- public:
-  explicit GenericGraph(Zone* zone)
-      : GenericGraphBase(zone), start_(NULL), end_(NULL) {}
-
-  V* start() const { return start_; }
-  V* end() const { return end_; }
-
-  void SetStart(V* start) { start_ = start; }
-  void SetEnd(V* end) { end_ = end; }
-
- private:
-  V* start_;
-  V* end_;
-
-  DISALLOW_COPY_AND_ASSIGN(GenericGraph);
-};
-}
-}
-}  // namespace v8::internal::compiler
-
-#endif  // V8_COMPILER_GENERIC_GRAPH_H_
diff --git a/deps/v8/src/compiler/generic-node-inl.h b/deps/v8/src/compiler/generic-node-inl.h
deleted file mode 100644 (file)
index afbd1f0..0000000
+++ /dev/null
@@ -1,267 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_GENERIC_NODE_INL_H_
-#define V8_COMPILER_GENERIC_NODE_INL_H_
-
-#include "src/v8.h"
-
-#include "src/compiler/generic-graph.h"
-#include "src/compiler/generic-node.h"
-#include "src/zone.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-template <class B, class S>
-GenericNode<B, S>::GenericNode(GenericGraphBase* graph, int input_count,
-                               int reserve_input_count)
-    : BaseClass(graph->zone()),
-      input_count_(input_count),
-      reserve_input_count_(reserve_input_count),
-      has_appendable_inputs_(false),
-      use_count_(0),
-      first_use_(NULL),
-      last_use_(NULL) {
-  DCHECK(reserve_input_count <= kMaxReservedInputs);
-  inputs_.static_ = reinterpret_cast<Input*>(this + 1);
-  AssignUniqueID(graph);
-}
-
-template <class B, class S>
-inline void GenericNode<B, S>::AssignUniqueID(GenericGraphBase* graph) {
-  id_ = graph->NextNodeID();
-}
-
-template <class B, class S>
-inline typename GenericNode<B, S>::Inputs::iterator
-GenericNode<B, S>::Inputs::begin() {
-  return typename GenericNode<B, S>::Inputs::iterator(this->node_, 0);
-}
-
-template <class B, class S>
-inline typename GenericNode<B, S>::Inputs::iterator
-GenericNode<B, S>::Inputs::end() {
-  return typename GenericNode<B, S>::Inputs::iterator(
-      this->node_, this->node_->InputCount());
-}
-
-template <class B, class S>
-inline typename GenericNode<B, S>::Uses::iterator
-GenericNode<B, S>::Uses::begin() {
-  return typename GenericNode<B, S>::Uses::iterator(this->node_);
-}
-
-template <class B, class S>
-inline typename GenericNode<B, S>::Uses::iterator
-GenericNode<B, S>::Uses::end() {
-  return typename GenericNode<B, S>::Uses::iterator();
-}
-
-template <class B, class S>
-void GenericNode<B, S>::ReplaceUses(GenericNode* replace_to) {
-  for (Use* use = first_use_; use != NULL; use = use->next) {
-    use->from->GetInputRecordPtr(use->input_index)->to = replace_to;
-  }
-  if (replace_to->last_use_ == NULL) {
-    DCHECK_EQ(NULL, replace_to->first_use_);
-    replace_to->first_use_ = first_use_;
-    replace_to->last_use_ = last_use_;
-  } else if (first_use_ != NULL) {
-    DCHECK_NE(NULL, replace_to->first_use_);
-    replace_to->last_use_->next = first_use_;
-    first_use_->prev = replace_to->last_use_;
-    replace_to->last_use_ = last_use_;
-  }
-  replace_to->use_count_ += use_count_;
-  use_count_ = 0;
-  first_use_ = NULL;
-  last_use_ = NULL;
-}
-
-template <class B, class S>
-template <class UnaryPredicate>
-void GenericNode<B, S>::ReplaceUsesIf(UnaryPredicate pred,
-                                      GenericNode* replace_to) {
-  for (Use* use = first_use_; use != NULL;) {
-    Use* next = use->next;
-    if (pred(static_cast<S*>(use->from))) {
-      RemoveUse(use);
-      replace_to->AppendUse(use);
-      use->from->GetInputRecordPtr(use->input_index)->to = replace_to;
-    }
-    use = next;
-  }
-}
-
-template <class B, class S>
-void GenericNode<B, S>::RemoveAllInputs() {
-  for (typename Inputs::iterator iter(inputs().begin()); iter != inputs().end();
-       ++iter) {
-    iter.GetInput()->Update(NULL);
-  }
-}
-
-template <class B, class S>
-void GenericNode<B, S>::TrimInputCount(int new_input_count) {
-  if (new_input_count == input_count_) return;  // Nothing to do.
-
-  DCHECK(new_input_count < input_count_);
-
-  // Update inline inputs.
-  for (int i = new_input_count; i < input_count_; i++) {
-    typename GenericNode<B, S>::Input* input = GetInputRecordPtr(i);
-    input->Update(NULL);
-  }
-  input_count_ = new_input_count;
-}
-
-template <class B, class S>
-void GenericNode<B, S>::ReplaceInput(int index, GenericNode<B, S>* new_to) {
-  Input* input = GetInputRecordPtr(index);
-  input->Update(new_to);
-}
-
-template <class B, class S>
-void GenericNode<B, S>::Input::Update(GenericNode<B, S>* new_to) {
-  GenericNode* old_to = this->to;
-  if (new_to == old_to) return;  // Nothing to do.
-  // Snip out the use from where it used to be
-  if (old_to != NULL) {
-    old_to->RemoveUse(use);
-  }
-  to = new_to;
-  // And put it into the new node's use list.
-  if (new_to != NULL) {
-    new_to->AppendUse(use);
-  } else {
-    use->next = NULL;
-    use->prev = NULL;
-  }
-}
-
-template <class B, class S>
-void GenericNode<B, S>::EnsureAppendableInputs(Zone* zone) {
-  if (!has_appendable_inputs_) {
-    void* deque_buffer = zone->New(sizeof(InputDeque));
-    InputDeque* deque = new (deque_buffer) InputDeque(zone);
-    for (int i = 0; i < input_count_; ++i) {
-      deque->push_back(inputs_.static_[i]);
-    }
-    inputs_.appendable_ = deque;
-    has_appendable_inputs_ = true;
-  }
-}
-
-template <class B, class S>
-void GenericNode<B, S>::AppendInput(Zone* zone, GenericNode<B, S>* to_append) {
-  Use* new_use = new (zone) Use;
-  Input new_input;
-  new_input.to = to_append;
-  new_input.use = new_use;
-  if (reserve_input_count_ > 0) {
-    DCHECK(!has_appendable_inputs_);
-    reserve_input_count_--;
-    inputs_.static_[input_count_] = new_input;
-  } else {
-    EnsureAppendableInputs(zone);
-    inputs_.appendable_->push_back(new_input);
-  }
-  new_use->input_index = input_count_;
-  new_use->from = this;
-  to_append->AppendUse(new_use);
-  input_count_++;
-}
-
-template <class B, class S>
-void GenericNode<B, S>::InsertInput(Zone* zone, int index,
-                                    GenericNode<B, S>* to_insert) {
-  DCHECK(index >= 0 && index < InputCount());
-  // TODO(turbofan): Optimize this implementation!
-  AppendInput(zone, InputAt(InputCount() - 1));
-  for (int i = InputCount() - 1; i > index; --i) {
-    ReplaceInput(i, InputAt(i - 1));
-  }
-  ReplaceInput(index, to_insert);
-}
-
-template <class B, class S>
-void GenericNode<B, S>::RemoveInput(int index) {
-  DCHECK(index >= 0 && index < InputCount());
-  // TODO(turbofan): Optimize this implementation!
-  for (; index < InputCount() - 1; ++index) {
-    ReplaceInput(index, InputAt(index + 1));
-  }
-  TrimInputCount(InputCount() - 1);
-}
-
-template <class B, class S>
-void GenericNode<B, S>::AppendUse(Use* use) {
-  use->next = NULL;
-  use->prev = last_use_;
-  if (last_use_ == NULL) {
-    first_use_ = use;
-  } else {
-    last_use_->next = use;
-  }
-  last_use_ = use;
-  ++use_count_;
-}
-
-template <class B, class S>
-void GenericNode<B, S>::RemoveUse(Use* use) {
-  if (last_use_ == use) {
-    last_use_ = use->prev;
-  }
-  if (use->prev != NULL) {
-    use->prev->next = use->next;
-  } else {
-    first_use_ = use->next;
-  }
-  if (use->next != NULL) {
-    use->next->prev = use->prev;
-  }
-  --use_count_;
-}
-
-template <class B, class S>
-inline bool GenericNode<B, S>::OwnedBy(GenericNode* owner) const {
-  return first_use_ != NULL && first_use_->from == owner &&
-         first_use_->next == NULL;
-}
-
-template <class B, class S>
-S* GenericNode<B, S>::New(GenericGraphBase* graph, int input_count, S** inputs,
-                          bool has_extensible_inputs) {
-  size_t node_size = sizeof(GenericNode);
-  int reserve_input_count = has_extensible_inputs ? kDefaultReservedInputs : 0;
-  size_t inputs_size = (input_count + reserve_input_count) * sizeof(Input);
-  size_t uses_size = input_count * sizeof(Use);
-  int size = static_cast<int>(node_size + inputs_size + uses_size);
-  Zone* zone = graph->zone();
-  void* buffer = zone->New(size);
-  S* result = new (buffer) S(graph, input_count, reserve_input_count);
-  Input* input =
-      reinterpret_cast<Input*>(reinterpret_cast<char*>(buffer) + node_size);
-  Use* use =
-      reinterpret_cast<Use*>(reinterpret_cast<char*>(input) + inputs_size);
-
-  for (int current = 0; current < input_count; ++current) {
-    GenericNode* to = *inputs++;
-    input->to = to;
-    input->use = use;
-    use->input_index = current;
-    use->from = result;
-    to->AppendUse(use);
-    ++use;
-    ++input;
-  }
-  return result;
-}
-}
-}
-}  // namespace v8::internal::compiler
-
-#endif  // V8_COMPILER_GENERIC_NODE_INL_H_
diff --git a/deps/v8/src/compiler/generic-node.h b/deps/v8/src/compiler/generic-node.h
deleted file mode 100644 (file)
index 506a34f..0000000
+++ /dev/null
@@ -1,279 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_GENERIC_NODE_H_
-#define V8_COMPILER_GENERIC_NODE_H_
-
-#include "src/v8.h"
-
-#include "src/zone-containers.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-class GenericGraphBase;
-
-typedef int NodeId;
-
-// A GenericNode<> is the basic primitive of graphs. GenericNode's are
-// chained together by input/use chains but by default otherwise contain only an
-// identifying number which specific applications of graphs and nodes can use
-// to index auxiliary out-of-line data, especially transient data.
-// Specializations of the templatized GenericNode<> class must provide a base
-// class B that contains all of the members to be made available in each
-// specialized Node instance. GenericNode uses a mixin template pattern to
-// ensure that common accessors and methods expect the derived class S type
-// rather than the GenericNode<B, S> type.
-template <class B, class S>
-class GenericNode : public B {
- public:
-  typedef B BaseClass;
-  typedef S DerivedClass;
-
-  inline NodeId id() const { return id_; }
-
-  int InputCount() const { return input_count_; }
-  S* InputAt(int index) const {
-    return static_cast<S*>(GetInputRecordPtr(index)->to);
-  }
-  inline void ReplaceInput(int index, GenericNode* new_input);
-  inline void AppendInput(Zone* zone, GenericNode* new_input);
-  inline void InsertInput(Zone* zone, int index, GenericNode* new_input);
-  inline void RemoveInput(int index);
-
-  int UseCount() { return use_count_; }
-  S* UseAt(int index) {
-    DCHECK(index < use_count_);
-    Use* current = first_use_;
-    while (index-- != 0) {
-      current = current->next;
-    }
-    return static_cast<S*>(current->from);
-  }
-  inline void ReplaceUses(GenericNode* replace_to);
-  template <class UnaryPredicate>
-  inline void ReplaceUsesIf(UnaryPredicate pred, GenericNode* replace_to);
-  inline void RemoveAllInputs();
-
-  inline void TrimInputCount(int input_count);
-
-  class Inputs {
-   public:
-    class iterator;
-    iterator begin();
-    iterator end();
-
-    explicit Inputs(GenericNode* node) : node_(node) {}
-
-   private:
-    GenericNode* node_;
-  };
-
-  Inputs inputs() { return Inputs(this); }
-
-  class Uses {
-   public:
-    class iterator;
-    iterator begin();
-    iterator end();
-    bool empty() { return begin() == end(); }
-
-    explicit Uses(GenericNode* node) : node_(node) {}
-
-   private:
-    GenericNode* node_;
-  };
-
-  Uses uses() { return Uses(this); }
-
-  class Edge;
-
-  bool OwnedBy(GenericNode* owner) const;
-
-  static S* New(GenericGraphBase* graph, int input_count, S** inputs,
-                bool has_extensible_inputs);
-
- protected:
-  friend class GenericGraphBase;
-
-  class Use : public ZoneObject {
-   public:
-    GenericNode* from;
-    Use* next;
-    Use* prev;
-    int input_index;
-  };
-
-  class Input {
-   public:
-    GenericNode* to;
-    Use* use;
-
-    void Update(GenericNode* new_to);
-  };
-
-  void EnsureAppendableInputs(Zone* zone);
-
-  Input* GetInputRecordPtr(int index) const {
-    if (has_appendable_inputs_) {
-      return &((*inputs_.appendable_)[index]);
-    } else {
-      return inputs_.static_ + index;
-    }
-  }
-
-  inline void AppendUse(Use* use);
-  inline void RemoveUse(Use* use);
-
-  void* operator new(size_t, void* location) { return location; }
-
-  GenericNode(GenericGraphBase* graph, int input_count,
-              int reserved_input_count);
-
- private:
-  void AssignUniqueID(GenericGraphBase* graph);
-
-  typedef ZoneDeque<Input> InputDeque;
-
-  static const int kReservedInputCountBits = 2;
-  static const int kMaxReservedInputs = (1 << kReservedInputCountBits) - 1;
-  static const int kDefaultReservedInputs = kMaxReservedInputs;
-
-  NodeId id_;
-  int input_count_ : 29;
-  unsigned int reserve_input_count_ : kReservedInputCountBits;
-  bool has_appendable_inputs_ : 1;
-  union {
-    // When a node is initially allocated, it uses a static buffer to hold its
-    // inputs under the assumption that the number of outputs will not increase.
-    // When the first input is appended, the static buffer is converted into a
-    // deque to allow for space-efficient growing.
-    Input* static_;
-    InputDeque* appendable_;
-  } inputs_;
-  int use_count_;
-  Use* first_use_;
-  Use* last_use_;
-
-  DISALLOW_COPY_AND_ASSIGN(GenericNode);
-};
-
-// An encapsulation for information associated with a single use of node as a
-// input from another node, allowing access to both the defining node and
-// the ndoe having the input.
-template <class B, class S>
-class GenericNode<B, S>::Edge {
- public:
-  S* from() const { return static_cast<S*>(input_->use->from); }
-  S* to() const { return static_cast<S*>(input_->to); }
-  int index() const {
-    int index = input_->use->input_index;
-    DCHECK(index < input_->use->from->input_count_);
-    return index;
-  }
-
- private:
-  friend class GenericNode<B, S>::Uses::iterator;
-  friend class GenericNode<B, S>::Inputs::iterator;
-
-  explicit Edge(typename GenericNode<B, S>::Input* input) : input_(input) {}
-
-  typename GenericNode<B, S>::Input* input_;
-};
-
-// A forward iterator to visit the nodes which are depended upon by a node
-// in the order of input.
-template <class B, class S>
-class GenericNode<B, S>::Inputs::iterator {
- public:
-  iterator(const typename GenericNode<B, S>::Inputs::iterator& other)  // NOLINT
-      : node_(other.node_),
-        index_(other.index_) {}
-
-  S* operator*() { return static_cast<S*>(GetInput()->to); }
-  typename GenericNode<B, S>::Edge edge() {
-    return typename GenericNode::Edge(GetInput());
-  }
-  bool operator==(const iterator& other) const {
-    return other.index_ == index_ && other.node_ == node_;
-  }
-  bool operator!=(const iterator& other) const { return !(other == *this); }
-  iterator& operator++() {
-    DCHECK(node_ != NULL);
-    DCHECK(index_ < node_->input_count_);
-    ++index_;
-    return *this;
-  }
-  iterator& UpdateToAndIncrement(GenericNode<B, S>* new_to) {
-    typename GenericNode<B, S>::Input* input = GetInput();
-    input->Update(new_to);
-    index_++;
-    return *this;
-  }
-  int index() { return index_; }
-
- private:
-  friend class GenericNode;
-
-  explicit iterator(GenericNode* node, int index)
-      : node_(node), index_(index) {}
-
-  Input* GetInput() const { return node_->GetInputRecordPtr(index_); }
-
-  GenericNode* node_;
-  int index_;
-};
-
-// A forward iterator to visit the uses of a node. The uses are returned in
-// the order in which they were added as inputs.
-template <class B, class S>
-class GenericNode<B, S>::Uses::iterator {
- public:
-  iterator(const typename GenericNode<B, S>::Uses::iterator& other)  // NOLINT
-      : current_(other.current_),
-        index_(other.index_) {}
-
-  S* operator*() { return static_cast<S*>(current_->from); }
-  typename GenericNode<B, S>::Edge edge() {
-    return typename GenericNode::Edge(CurrentInput());
-  }
-
-  bool operator==(const iterator& other) { return other.current_ == current_; }
-  bool operator!=(const iterator& other) { return other.current_ != current_; }
-  iterator& operator++() {
-    DCHECK(current_ != NULL);
-    index_++;
-    current_ = current_->next;
-    return *this;
-  }
-  iterator& UpdateToAndIncrement(GenericNode<B, S>* new_to) {
-    DCHECK(current_ != NULL);
-    index_++;
-    typename GenericNode<B, S>::Input* input = CurrentInput();
-    current_ = current_->next;
-    input->Update(new_to);
-    return *this;
-  }
-  int index() const { return index_; }
-
- private:
-  friend class GenericNode<B, S>::Uses;
-
-  iterator() : current_(NULL), index_(0) {}
-  explicit iterator(GenericNode<B, S>* node)
-      : current_(node->first_use_), index_(0) {}
-
-  Input* CurrentInput() const {
-    return current_->from->GetInputRecordPtr(current_->input_index);
-  }
-
-  typename GenericNode<B, S>::Use* current_;
-  int index_;
-};
-}
-}
-}  // namespace v8::internal::compiler
-
-#endif  // V8_COMPILER_GENERIC_NODE_H_
index 65ce345..6321aaa 100644 (file)
@@ -4,15 +4,13 @@
 
 #include "src/compiler/graph-builder.h"
 
+#include "src/bit-vector.h"
 #include "src/compiler.h"
-#include "src/compiler/generic-graph.h"
-#include "src/compiler/generic-node.h"
-#include "src/compiler/generic-node-inl.h"
 #include "src/compiler/graph-visualizer.h"
+#include "src/compiler/node.h"
 #include "src/compiler/node-properties.h"
 #include "src/compiler/node-properties-inl.h"
 #include "src/compiler/operator-properties.h"
-#include "src/compiler/operator-properties-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -45,7 +43,7 @@ Node** StructuredGraphBuilder::EnsureInputBufferSize(int size) {
 Node* StructuredGraphBuilder::MakeNode(const Operator* op,
                                        int value_input_count,
                                        Node** value_inputs, bool incomplete) {
-  DCHECK(op->InputCount() == value_input_count);
+  DCHECK(op->ValueInputCount() == value_input_count);
 
   bool has_context = OperatorProperties::HasContextInput(op);
   bool has_framestate = OperatorProperties::HasFrameStateInput(op);
index 0fdd769..d88b125 100644 (file)
@@ -86,7 +86,7 @@ class StructuredGraphBuilder : public GraphBuilder {
  public:
   StructuredGraphBuilder(Zone* zone, Graph* graph,
                          CommonOperatorBuilder* common);
-  virtual ~StructuredGraphBuilder() {}
+  ~StructuredGraphBuilder() OVERRIDE {}
 
   // Creates a new Phi node having {count} input values.
   Node* NewPhi(int count, Node* input, Node* control);
@@ -114,8 +114,8 @@ class StructuredGraphBuilder : public GraphBuilder {
   // The following method creates a new node having the specified operator and
   // ensures effect and control dependencies are wired up. The dependencies
   // tracked by the environment might be mutated.
-  virtual Node* MakeNode(const Operator* op, int value_input_count,
-                         Node** value_inputs, bool incomplete) FINAL;
+  Node* MakeNode(const Operator* op, int value_input_count, Node** value_inputs,
+                 bool incomplete) FINAL;
 
   Environment* environment() const { return environment_; }
   void set_environment(Environment* env) { environment_ = env; }
index efebf7b..c135ae5 100644 (file)
@@ -5,7 +5,7 @@
 #ifndef V8_COMPILER_GRAPH_INL_H_
 #define V8_COMPILER_GRAPH_INL_H_
 
-#include "src/compiler/generic-algorithm-inl.h"
+#include "src/compiler/generic-algorithm.h"
 #include "src/compiler/graph.h"
 
 namespace v8 {
@@ -13,27 +13,13 @@ namespace internal {
 namespace compiler {
 
 template <class Visitor>
-void Graph::VisitNodeUsesFrom(Node* node, Visitor* visitor) {
-  Zone tmp_zone(zone()->isolate());
-  GenericGraphVisit::Visit<Visitor, NodeUseIterationTraits<Node> >(
-      this, &tmp_zone, node, visitor);
-}
-
-
-template <class Visitor>
-void Graph::VisitNodeUsesFromStart(Visitor* visitor) {
-  VisitNodeUsesFrom(start(), visitor);
-}
-
-
-template <class Visitor>
 void Graph::VisitNodeInputsFromEnd(Visitor* visitor) {
   Zone tmp_zone(zone()->isolate());
-  GenericGraphVisit::Visit<Visitor, NodeInputIterationTraits<Node> >(
-      this, &tmp_zone, end(), visitor);
+  GenericGraphVisit::Visit<Visitor>(this, &tmp_zone, end(), visitor);
 }
-}
-}
-}  // namespace v8::internal::compiler
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
 
 #endif  // V8_COMPILER_GRAPH_INL_H_
index f716b2a..9a6b121 100644 (file)
@@ -12,79 +12,190 @@ namespace v8 {
 namespace internal {
 namespace compiler {
 
-GraphReducer::GraphReducer(Graph* graph)
-    : graph_(graph), reducers_(graph->zone()) {}
+enum class GraphReducer::State : uint8_t {
+  kUnvisited,
+  kRevisit,
+  kOnStack,
+  kVisited
+};
+
+
+GraphReducer::GraphReducer(Graph* graph, Zone* zone)
+    : graph_(graph),
+      state_(graph, 4),
+      reducers_(zone),
+      revisit_(zone),
+      stack_(zone) {}
 
 
-static bool NodeIdIsLessThan(const Node* node, NodeId id) {
-  return node->id() < id;
+void GraphReducer::AddReducer(Reducer* reducer) {
+  reducers_.push_back(reducer);
 }
 
 
 void GraphReducer::ReduceNode(Node* node) {
-  static const unsigned kMaxAttempts = 16;
-  bool reduce = true;
-  for (unsigned attempts = 0; attempts <= kMaxAttempts; ++attempts) {
-    if (!reduce) return;
-    reduce = false;  // Assume we don't need to rerun any reducers.
-    int before = graph_->NodeCount();
-    for (ZoneVector<Reducer*>::iterator i = reducers_.begin();
-         i != reducers_.end(); ++i) {
+  DCHECK(stack_.empty());
+  DCHECK(revisit_.empty());
+  Push(node);
+  for (;;) {
+    if (!stack_.empty()) {
+      // Process the node on the top of the stack, potentially pushing more or
+      // popping the node off the stack.
+      ReduceTop();
+    } else if (!revisit_.empty()) {
+      // If the stack becomes empty, revisit any nodes in the revisit queue.
+      Node* const node = revisit_.top();
+      revisit_.pop();
+      if (state_.Get(node) == State::kRevisit) {
+        // state can change while in queue.
+        Push(node);
+      }
+    } else {
+      break;
+    }
+  }
+  DCHECK(revisit_.empty());
+  DCHECK(stack_.empty());
+}
+
+
+void GraphReducer::ReduceGraph() { ReduceNode(graph()->end()); }
+
+
+Reduction GraphReducer::Reduce(Node* const node) {
+  auto skip = reducers_.end();
+  for (auto i = reducers_.begin(); i != reducers_.end();) {
+    if (i != skip) {
       Reduction reduction = (*i)->Reduce(node);
-      Node* replacement = reduction.replacement();
-      if (replacement == NULL) {
+      if (!reduction.Changed()) {
         // No change from this reducer.
-      } else if (replacement == node) {
-        // {replacement == node} represents an in-place reduction.
-        // Rerun all the reducers for this node, as now there may be more
+      } else if (reduction.replacement() == node) {
+        // {replacement} == {node} represents an in-place reduction. Rerun
+        // all the other reducers for this node, as now there may be more
         // opportunities for reduction.
-        reduce = true;
-        break;
+        skip = i;
+        i = reducers_.begin();
+        continue;
       } else {
-        if (node == graph_->start()) graph_->SetStart(replacement);
-        if (node == graph_->end()) graph_->SetEnd(replacement);
-        // If {node} was replaced by an old node, unlink {node} and assume that
-        // {replacement} was already reduced and finish.
-        if (replacement->id() < before) {
-          node->ReplaceUses(replacement);
-          node->Kill();
-          return;
-        }
-        // Otherwise, {node} was replaced by a new node. Replace all old uses of
-        // {node} with {replacement}. New nodes created by this reduction can
-        // use {node}.
-        node->ReplaceUsesIf(
-            std::bind2nd(std::ptr_fun(&NodeIdIsLessThan), before), replacement);
-        // Unlink {node} if it's no longer used.
-        if (node->uses().empty()) {
-          node->Kill();
-        }
-        // Rerun all the reductions on the {replacement}.
-        node = replacement;
-        reduce = true;
-        break;
+        // {node} was replaced by another node.
+        return reduction;
       }
     }
+    ++i;
+  }
+  if (skip == reducers_.end()) {
+    // No change from any reducer.
+    return Reducer::NoChange();
   }
+  // At least one reducer did some in-place reduction.
+  return Reducer::Changed(node);
 }
 
 
-// A helper class to reuse the node traversal algorithm.
-struct GraphReducerVisitor FINAL : public NullNodeVisitor {
-  explicit GraphReducerVisitor(GraphReducer* reducer) : reducer_(reducer) {}
-  void Post(Node* node) { reducer_->ReduceNode(node); }
-  GraphReducer* reducer_;
-};
+void GraphReducer::ReduceTop() {
+  NodeState& entry = stack_.top();
+  Node* node = entry.node;
+  DCHECK(state_.Get(node) == State::kOnStack);
+
+  if (node->IsDead()) return Pop();  // Node was killed while on stack.
+
+  // Recurse on an input if necessary.
+  int start = entry.input_index < node->InputCount() ? entry.input_index : 0;
+  for (int i = start; i < node->InputCount(); i++) {
+    Node* input = node->InputAt(i);
+    entry.input_index = i + 1;
+    if (input != node && Recurse(input)) return;
+  }
+  for (int i = 0; i < start; i++) {
+    Node* input = node->InputAt(i);
+    entry.input_index = i + 1;
+    if (input != node && Recurse(input)) return;
+  }
+
+  // Remember the node count before reduction.
+  const int node_count = graph()->NodeCount();
+
+  // All inputs should be visited or on stack. Apply reductions to node.
+  Reduction reduction = Reduce(node);
+
+  // If there was no reduction, pop {node} and continue.
+  if (!reduction.Changed()) return Pop();
+
+  // Check if the reduction is an in-place update of the {node}.
+  Node* const replacement = reduction.replacement();
+  if (replacement == node) {
+    // In-place update of {node}, may need to recurse on an input.
+    for (int i = 0; i < node->InputCount(); ++i) {
+      Node* input = node->InputAt(i);
+      entry.input_index = i + 1;
+      if (input != node && Recurse(input)) return;
+    }
+  }
+
+  // After reducing the node, pop it off the stack.
+  Pop();
+
+  // Revisit all uses of the node.
+  for (Node* const use : node->uses()) {
+    // Don't revisit this node if it refers to itself.
+    if (use != node) Revisit(use);
+  }
+
+  // Check if we have a new replacement.
+  if (replacement != node) {
+    if (node == graph()->start()) graph()->SetStart(replacement);
+    if (node == graph()->end()) graph()->SetEnd(replacement);
+    // If {node} was replaced by an old node, unlink {node} and assume that
+    // {replacement} was already reduced and finish.
+    if (replacement->id() < node_count) {
+      node->ReplaceUses(replacement);
+      node->Kill();
+    } else {
+      // Otherwise {node} was replaced by a new node. Replace all old uses of
+      // {node} with {replacement}. New nodes created by this reduction can
+      // use {node}.
+      node->ReplaceUsesIf(
+          [node_count](Node* const node) { return node->id() < node_count; },
+          replacement);
+      // Unlink {node} if it's no longer used.
+      if (node->uses().empty()) {
+        node->Kill();
+      }
+
+      // If there was a replacement, reduce it after popping {node}.
+      Recurse(replacement);
+    }
+  }
+}
+
+
+void GraphReducer::Pop() {
+  Node* node = stack_.top().node;
+  state_.Set(node, State::kVisited);
+  stack_.pop();
+}
 
 
-void GraphReducer::ReduceGraph() {
-  GraphReducerVisitor visitor(this);
-  // Perform a post-order reduction of all nodes starting from the end.
-  graph()->VisitNodeInputsFromEnd(&visitor);
+void GraphReducer::Push(Node* const node) {
+  DCHECK(state_.Get(node) != State::kOnStack);
+  state_.Set(node, State::kOnStack);
+  stack_.push({node, 0});
 }
 
 
-// TODO(titzer): partial graph reductions.
+bool GraphReducer::Recurse(Node* node) {
+  if (state_.Get(node) > State::kRevisit) return false;
+  Push(node);
+  return true;
+}
+
+
+void GraphReducer::Revisit(Node* node) {
+  if (state_.Get(node) == State::kVisited) {
+    state_.Set(node, State::kRevisit);
+    revisit_.push(node);
+  }
+}
 
 }  // namespace compiler
 }  // namespace internal
index e0e4f7a..09a650c 100644 (file)
@@ -5,17 +5,13 @@
 #ifndef V8_COMPILER_GRAPH_REDUCER_H_
 #define V8_COMPILER_GRAPH_REDUCER_H_
 
+#include "src/compiler/graph.h"
 #include "src/zone-containers.h"
 
 namespace v8 {
 namespace internal {
 namespace compiler {
 
-// Forward declarations.
-class Graph;
-class Node;
-
-
 // Represents the result of trying to reduce a node in the graph.
 class Reduction FINAL {
  public:
@@ -55,20 +51,42 @@ class Reducer {
 // Performs an iterative reduction of a node graph.
 class GraphReducer FINAL {
  public:
-  explicit GraphReducer(Graph* graph);
+  GraphReducer(Graph* graph, Zone* zone);
 
   Graph* graph() const { return graph_; }
 
-  void AddReducer(Reducer* reducer) { reducers_.push_back(reducer); }
+  void AddReducer(Reducer* reducer);
 
   // Reduce a single node.
-  void ReduceNode(Node* node);
+  void ReduceNode(Node* const);
   // Reduce the whole graph.
   void ReduceGraph();
 
  private:
+  enum class State : uint8_t;
+  struct NodeState {
+    Node* node;
+    int input_index;
+  };
+
+  // Reduce a single node.
+  Reduction Reduce(Node* const);
+  // Reduce the node on top of the stack.
+  void ReduceTop();
+
+  // Node stack operations.
+  void Pop();
+  void Push(Node* node);
+
+  // Revisit queue operations.
+  bool Recurse(Node* node);
+  void Revisit(Node* node);
+
   Graph* graph_;
+  NodeMarker<State> state_;
   ZoneVector<Reducer*> reducers_;
+  ZoneStack<Node*> revisit_;
+  ZoneStack<NodeState> stack_;
 
   DISALLOW_COPY_AND_ASSIGN(GraphReducer);
 };
index 296ffe4..3a0b783 100644 (file)
@@ -9,7 +9,7 @@
 #include "src/compiler/graph-inl.h"
 #include "src/compiler/node.h"
 #include "src/compiler/operator.h"
-#include "src/compiler/operator-properties-inl.h"
+#include "src/compiler/operator-properties.h"
 
 namespace v8 {
 namespace internal {
@@ -59,7 +59,7 @@ void GraphReplayPrinter::PrintReplayOpCreator(const Operator* op) {
       PrintF("unique_constant");
       break;
     case IrOpcode::kPhi:
-      PrintF("%d", op->InputCount());
+      PrintF("%d", op->ValueInputCount());
       break;
     case IrOpcode::kEffectPhi:
       PrintF("%d", op->EffectInputCount());
index a66af22..f41311e 100644 (file)
@@ -5,6 +5,7 @@
 #ifndef V8_COMPILER_GRAPH_REPLAY_H_
 #define V8_COMPILER_GRAPH_REPLAY_H_
 
+#include "src/compiler/generic-algorithm.h"
 #include "src/compiler/node.h"
 
 namespace v8 {
index 19f24cf..e018c7a 100644 (file)
@@ -8,8 +8,6 @@
 #include <string>
 
 #include "src/code-stubs.h"
-#include "src/compiler/generic-node.h"
-#include "src/compiler/generic-node-inl.h"
 #include "src/compiler/graph.h"
 #include "src/compiler/graph-inl.h"
 #include "src/compiler/node.h"
@@ -27,6 +25,9 @@ namespace internal {
 namespace compiler {
 
 static int SafeId(Node* node) { return node == NULL ? -1 : node->id(); }
+static const char* SafeMnemonic(Node* node) {
+  return node == NULL ? "null" : node->op()->mnemonic();
+}
 
 #define DEAD_COLOR "#999999"
 
@@ -227,7 +228,7 @@ class GraphVisualizer {
   void PrintNode(Node* node, bool gray);
 
  private:
-  void PrintEdge(Node::Edge edge);
+  void PrintEdge(Edge edge);
 
   AllNodes all_;
   std::ostream& os_;
@@ -284,26 +285,26 @@ void GraphVisualizer::PrintNode(Node* node, bool gray) {
   label << *node->op();
   os_ << "    label=\"{{#" << SafeId(node) << ":" << Escaped(label);
 
-  InputIter i = node->inputs().begin();
+  auto i = node->input_edges().begin();
   for (int j = node->op()->ValueInputCount(); j > 0; ++i, j--) {
-    os_ << "|<I" << i.index() << ">#" << SafeId(*i);
+    os_ << "|<I" << (*i).index() << ">#" << SafeId((*i).to());
   }
   for (int j = OperatorProperties::GetContextInputCount(node->op()); j > 0;
        ++i, j--) {
-    os_ << "|<I" << i.index() << ">X #" << SafeId(*i);
+    os_ << "|<I" << (*i).index() << ">X #" << SafeId((*i).to());
   }
   for (int j = OperatorProperties::GetFrameStateInputCount(node->op()); j > 0;
        ++i, j--) {
-    os_ << "|<I" << i.index() << ">F #" << SafeId(*i);
+    os_ << "|<I" << (*i).index() << ">F #" << SafeId((*i).to());
   }
   for (int j = node->op()->EffectInputCount(); j > 0; ++i, j--) {
-    os_ << "|<I" << i.index() << ">E #" << SafeId(*i);
+    os_ << "|<I" << (*i).index() << ">E #" << SafeId((*i).to());
   }
 
   if (OperatorProperties::IsBasicBlockBegin(node->op()) ||
       GetControlCluster(node) == NULL) {
     for (int j = node->op()->ControlInputCount(); j > 0; ++i, j--) {
-      os_ << "|<I" << i.index() << ">C #" << SafeId(*i);
+      os_ << "|<I" << (*i).index() << ">C #" << SafeId((*i).to());
     }
   }
   os_ << "}";
@@ -337,7 +338,7 @@ static bool IsLikelyBackEdge(Node* from, int index, Node* to) {
 }
 
 
-void GraphVisualizer::PrintEdge(Node::Edge edge) {
+void GraphVisualizer::PrintEdge(Edge edge) {
   Node* from = edge.from();
   int index = edge.index();
   Node* to = edge.to();
@@ -381,8 +382,8 @@ void GraphVisualizer::Print() {
 
   // With all the nodes written, add the edges.
   for (Node* const node : all_.live) {
-    for (UseIter i = node->uses().begin(); i != node->uses().end(); ++i) {
-      PrintEdge(i.edge());
+    for (Edge edge : node->use_edges()) {
+      PrintEdge(edge);
     }
   }
   os_ << "}\n";
@@ -530,7 +531,7 @@ void GraphC1Visualizer::PrintInputs(InputIter* i, int count,
 
 
 void GraphC1Visualizer::PrintInputs(Node* node) {
-  InputIter i = node->inputs().begin();
+  auto i = node->inputs().begin();
   PrintInputs(&i, node->op()->ValueInputCount(), " ");
   PrintInputs(&i, OperatorProperties::GetContextInputCount(node->op()),
               " Ctx:");
@@ -723,14 +724,18 @@ void GraphC1Visualizer::PrintLiveRange(LiveRange* range, const char* type) {
         os_ << " \"" << Register::AllocationIndexToString(assigned_reg) << "\"";
       }
     } else if (range->IsSpilled()) {
-      InstructionOperand* op = range->TopLevel()->GetSpillOperand();
-      if (op->IsDoubleStackSlot()) {
-        os_ << " \"double_stack:" << op->index() << "\"";
-      } else if (op->IsStackSlot()) {
-        os_ << " \"stack:" << op->index() << "\"";
+      int index = -1;
+      if (range->TopLevel()->HasSpillRange()) {
+        index = kMaxInt;  // This hasn't been set yet.
+      } else {
+        index = range->TopLevel()->GetSpillOperand()->index();
+      }
+      if (range->TopLevel()->Kind() == DOUBLE_REGISTERS) {
+        os_ << " \"double_stack:" << index << "\"";
+      } else if (range->TopLevel()->Kind() == GENERAL_REGISTERS) {
+        os_ << " \"stack:" << index << "\"";
       } else {
-        DCHECK(op->IsConstant());
-        os_ << " \"const(nostack):" << op->index() << "\"";
+        os_ << " \"const(nostack):" << index << "\"";
       }
     }
     int parent_index = -1;
@@ -785,6 +790,43 @@ std::ostream& operator<<(std::ostream& os, const AsC1VAllocator& ac) {
   GraphC1Visualizer(os, &tmp_zone).PrintAllocator(ac.phase_, ac.allocator_);
   return os;
 }
+
+const int kUnvisited = 0;
+const int kOnStack = 1;
+const int kVisited = 2;
+
+std::ostream& operator<<(std::ostream& os, const AsRPO& ar) {
+  Zone local_zone(ar.graph.zone()->isolate());
+  ZoneVector<byte> state(ar.graph.NodeCount(), kUnvisited, &local_zone);
+  ZoneStack<Node*> stack(&local_zone);
+
+  stack.push(ar.graph.end());
+  state[ar.graph.end()->id()] = kOnStack;
+  while (!stack.empty()) {
+    Node* n = stack.top();
+    bool pop = true;
+    for (Node* const i : n->inputs()) {
+      if (state[i->id()] == kUnvisited) {
+        state[i->id()] = kOnStack;
+        stack.push(i);
+        pop = false;
+        break;
+      }
+    }
+    if (pop) {
+      state[n->id()] = kVisited;
+      stack.pop();
+      os << "#" << SafeId(n) << ":" << SafeMnemonic(n) << "(";
+      int j = 0;
+      for (Node* const i : n->inputs()) {
+        if (j++ > 0) os << ", ";
+        os << "#" << SafeId(i) << ":" << SafeMnemonic(i);
+      }
+      os << ")" << std::endl;
+    }
+  }
+  return os;
+}
 }
 }
 }  // namespace v8::internal::compiler
index 7212a4f..3dd66ea 100644 (file)
@@ -36,6 +36,14 @@ struct AsJSON {
 
 std::ostream& operator<<(std::ostream& os, const AsJSON& ad);
 
+struct AsRPO {
+  explicit AsRPO(const Graph& g) : graph(g) {}
+  const Graph& graph;
+};
+
+std::ostream& operator<<(std::ostream& os, const AsRPO& ad);
+
+
 struct AsC1VCompilation {
   explicit AsC1VCompilation(const CompilationInfo* info) : info_(info) {}
   const CompilationInfo* info_;
index 1de712e..995046b 100644 (file)
@@ -5,7 +5,6 @@
 #include "src/compiler/graph.h"
 
 #include "src/compiler/common-operator.h"
-#include "src/compiler/generic-node-inl.h"
 #include "src/compiler/graph-inl.h"
 #include "src/compiler/node.h"
 #include "src/compiler/node-aux-data-inl.h"
 #include "src/compiler/node-properties-inl.h"
 #include "src/compiler/opcodes.h"
 #include "src/compiler/operator-properties.h"
-#include "src/compiler/operator-properties-inl.h"
 
 namespace v8 {
 namespace internal {
 namespace compiler {
 
-Graph::Graph(Zone* zone) : GenericGraph<Node>(zone), decorators_(zone) {}
+Graph::Graph(Zone* zone)
+    : zone_(zone),
+      start_(NULL),
+      end_(NULL),
+      mark_max_(0),
+      next_node_id_(0),
+      decorators_(zone) {}
 
 
 void Graph::Decorate(Node* node) {
@@ -32,7 +36,7 @@ void Graph::Decorate(Node* node) {
 
 Node* Graph::NewNode(const Operator* op, int input_count, Node** inputs,
                      bool incomplete) {
-  DCHECK_LE(op->InputCount(), input_count);
+  DCHECK_LE(op->ValueInputCount(), input_count);
   Node* result = Node::New(this, input_count, inputs, incomplete);
   result->Initialize(op);
   if (!incomplete) {
index 9b0d234..d619da2 100644 (file)
@@ -8,7 +8,6 @@
 #include <map>
 #include <set>
 
-#include "src/compiler/generic-algorithm.h"
 #include "src/compiler/node.h"
 #include "src/compiler/node-aux-data.h"
 #include "src/compiler/source-position.h"
@@ -17,10 +16,11 @@ namespace v8 {
 namespace internal {
 namespace compiler {
 
+// Forward declarations.
 class GraphDecorator;
 
 
-class Graph : public GenericGraph<Node> {
+class Graph : public ZoneObject {
  public:
   explicit Graph(Zone* zone);
 
@@ -30,7 +30,7 @@ class Graph : public GenericGraph<Node> {
 
   // Factories for nodes with static input counts.
   Node* NewNode(const Operator* op) {
-    return NewNode(op, 0, static_cast<Node**>(NULL));
+    return NewNode(op, 0, static_cast<Node**>(nullptr));
   }
   Node* NewNode(const Operator* op, Node* n1) { return NewNode(op, 1, &n1); }
   Node* NewNode(const Operator* op, Node* n1, Node* n2) {
@@ -62,13 +62,17 @@ class Graph : public GenericGraph<Node> {
   }
 
   template <class Visitor>
-  void VisitNodeUsesFrom(Node* node, Visitor* visitor);
+  inline void VisitNodeInputsFromEnd(Visitor* visitor);
 
-  template <class Visitor>
-  void VisitNodeUsesFromStart(Visitor* visitor);
+  Zone* zone() const { return zone_; }
+  Node* start() const { return start_; }
+  Node* end() const { return end_; }
 
-  template <class Visitor>
-  void VisitNodeInputsFromEnd(Visitor* visitor);
+  void SetStart(Node* start) { start_ = start; }
+  void SetEnd(Node* end) { end_ = end; }
+
+  NodeId NextNodeID() { return next_node_id_++; }
+  NodeId NodeCount() const { return next_node_id_; }
 
   void Decorate(Node* node);
 
@@ -84,10 +88,56 @@ class Graph : public GenericGraph<Node> {
   }
 
  private:
+  template <typename State>
+  friend class NodeMarker;
+
+  Zone* zone_;
+  Node* start_;
+  Node* end_;
+  Mark mark_max_;
+  NodeId next_node_id_;
   ZoneVector<GraphDecorator*> decorators_;
+
+  DISALLOW_COPY_AND_ASSIGN(Graph);
+};
+
+
+// A NodeMarker uses monotonically increasing marks to assign local "states"
+// to nodes. Only one NodeMarker per graph is valid at a given time.
+template <typename State>
+class NodeMarker BASE_EMBEDDED {
+ public:
+  NodeMarker(Graph* graph, uint32_t num_states)
+      : mark_min_(graph->mark_max_), mark_max_(graph->mark_max_ += num_states) {
+    DCHECK(num_states > 0);         // user error!
+    DCHECK(mark_max_ > mark_min_);  // check for wraparound.
+  }
+
+  State Get(Node* node) {
+    Mark mark = node->mark();
+    if (mark < mark_min_) {
+      mark = mark_min_;
+      node->set_mark(mark_min_);
+    }
+    DCHECK_LT(mark, mark_max_);
+    return static_cast<State>(mark - mark_min_);
+  }
+
+  void Set(Node* node, State state) {
+    Mark local = static_cast<Mark>(state);
+    DCHECK(local < (mark_max_ - mark_min_));
+    DCHECK_LT(node->mark(), mark_max_);
+    node->set_mark(local + mark_min_);
+  }
+
+ private:
+  Mark mark_min_;
+  Mark mark_max_;
 };
 
 
+// A graph decorator can be used to add behavior to the creation of nodes
+// in a graph.
 class GraphDecorator : public ZoneObject {
  public:
   virtual ~GraphDecorator() {}
index dda4696..55f7426 100644 (file)
@@ -69,6 +69,8 @@ class IA32OperandConverter : public InstructionOperandConverter {
         return Immediate(constant.ToHeapObject());
       case Constant::kInt64:
         break;
+      case Constant::kRpoNumber:
+        return Immediate::CodeRelativeOffset(ToLabel(operand));
     }
     UNREACHABLE();
     return Immediate(-1);
@@ -153,18 +155,130 @@ class IA32OperandConverter : public InstructionOperandConverter {
     return Operand(no_reg, 0);
   }
 
-  Operand MemoryOperand() {
-    int first_input = 0;
+  Operand MemoryOperand(int first_input = 0) {
     return MemoryOperand(&first_input);
   }
 };
 
 
-static bool HasImmediateInput(Instruction* instr, int index) {
+namespace {
+
+bool HasImmediateInput(Instruction* instr, int index) {
   return instr->InputAt(index)->IsImmediate();
 }
 
 
+class OutOfLineLoadInteger FINAL : public OutOfLineCode {
+ public:
+  OutOfLineLoadInteger(CodeGenerator* gen, Register result)
+      : OutOfLineCode(gen), result_(result) {}
+
+  void Generate() FINAL { __ xor_(result_, result_); }
+
+ private:
+  Register const result_;
+};
+
+
+class OutOfLineLoadFloat FINAL : public OutOfLineCode {
+ public:
+  OutOfLineLoadFloat(CodeGenerator* gen, XMMRegister result)
+      : OutOfLineCode(gen), result_(result) {}
+
+  void Generate() FINAL { __ pcmpeqd(result_, result_); }
+
+ private:
+  XMMRegister const result_;
+};
+
+
+class OutOfLineTruncateDoubleToI FINAL : public OutOfLineCode {
+ public:
+  OutOfLineTruncateDoubleToI(CodeGenerator* gen, Register result,
+                             XMMRegister input)
+      : OutOfLineCode(gen), result_(result), input_(input) {}
+
+  void Generate() FINAL {
+    __ sub(esp, Immediate(kDoubleSize));
+    __ movsd(MemOperand(esp, 0), input_);
+    __ SlowTruncateToI(result_, esp, 0);
+    __ add(esp, Immediate(kDoubleSize));
+  }
+
+ private:
+  Register const result_;
+  XMMRegister const input_;
+};
+
+}  // namespace
+
+
+#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr)                          \
+  do {                                                                  \
+    auto result = i.OutputDoubleRegister();                             \
+    auto offset = i.InputRegister(0);                                   \
+    if (instr->InputAt(1)->IsRegister()) {                              \
+      __ cmp(offset, i.InputRegister(1));                               \
+    } else {                                                            \
+      __ cmp(offset, i.InputImmediate(1));                              \
+    }                                                                   \
+    OutOfLineCode* ool = new (zone()) OutOfLineLoadFloat(this, result); \
+    __ j(above_equal, ool->entry());                                    \
+    __ asm_instr(result, i.MemoryOperand(2));                           \
+    __ bind(ool->exit());                                               \
+  } while (false)
+
+
+#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr)                          \
+  do {                                                                    \
+    auto result = i.OutputRegister();                                     \
+    auto offset = i.InputRegister(0);                                     \
+    if (instr->InputAt(1)->IsRegister()) {                                \
+      __ cmp(offset, i.InputRegister(1));                                 \
+    } else {                                                              \
+      __ cmp(offset, i.InputImmediate(1));                                \
+    }                                                                     \
+    OutOfLineCode* ool = new (zone()) OutOfLineLoadInteger(this, result); \
+    __ j(above_equal, ool->entry());                                      \
+    __ asm_instr(result, i.MemoryOperand(2));                             \
+    __ bind(ool->exit());                                                 \
+  } while (false)
+
+
+#define ASSEMBLE_CHECKED_STORE_FLOAT(asm_instr)                 \
+  do {                                                          \
+    auto offset = i.InputRegister(0);                           \
+    if (instr->InputAt(1)->IsRegister()) {                      \
+      __ cmp(offset, i.InputRegister(1));                       \
+    } else {                                                    \
+      __ cmp(offset, i.InputImmediate(1));                      \
+    }                                                           \
+    Label done;                                                 \
+    __ j(above_equal, &done, Label::kNear);                     \
+    __ asm_instr(i.MemoryOperand(3), i.InputDoubleRegister(2)); \
+    __ bind(&done);                                             \
+  } while (false)
+
+
+#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr)            \
+  do {                                                       \
+    auto offset = i.InputRegister(0);                        \
+    if (instr->InputAt(1)->IsRegister()) {                   \
+      __ cmp(offset, i.InputRegister(1));                    \
+    } else {                                                 \
+      __ cmp(offset, i.InputImmediate(1));                   \
+    }                                                        \
+    Label done;                                              \
+    __ j(above_equal, &done, Label::kNear);                  \
+    if (instr->InputAt(2)->IsRegister()) {                   \
+      __ asm_instr(i.MemoryOperand(3), i.InputRegister(2));  \
+    } else {                                                 \
+      __ asm_instr(i.MemoryOperand(3), i.InputImmediate(2)); \
+    }                                                        \
+    __ bind(&done);                                          \
+  } while (false)
+
+
 // Assembles an instruction after register allocation, producing machine code.
 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
   IA32OperandConverter i(this, instr);
@@ -195,7 +309,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
       break;
     }
     case kArchJmp:
-      __ jmp(GetLabel(i.InputRpo(0)));
+      AssembleArchJump(i.InputRpo(0));
       break;
     case kArchNop:
       // don't emit code for nops.
@@ -206,9 +320,16 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
     case kArchStackPointer:
       __ mov(i.OutputRegister(), esp);
       break;
-    case kArchTruncateDoubleToI:
-      __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
+    case kArchTruncateDoubleToI: {
+      auto result = i.OutputRegister();
+      auto input = i.InputDoubleRegister(0);
+      auto ool = new (zone()) OutOfLineTruncateDoubleToI(this, result, input);
+      __ cvttsd2si(result, Operand(input));
+      __ cmp(result, 1);
+      __ j(overflow, ool->entry());
+      __ bind(ool->exit());
       break;
+    }
     case kIA32Add:
       if (HasImmediateInput(instr, 1)) {
         __ add(i.InputOperand(0), i.InputImmediate(1));
@@ -397,6 +518,30 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
     case kSSEUint32ToFloat64:
       __ LoadUint32(i.OutputDoubleRegister(), i.InputOperand(0));
       break;
+    case kAVXFloat64Add: {
+      CpuFeatureScope avx_scope(masm(), AVX);
+      __ vaddsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+                i.InputOperand(1));
+      break;
+    }
+    case kAVXFloat64Sub: {
+      CpuFeatureScope avx_scope(masm(), AVX);
+      __ vsubsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+                i.InputOperand(1));
+      break;
+    }
+    case kAVXFloat64Mul: {
+      CpuFeatureScope avx_scope(masm(), AVX);
+      __ vmulsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+                i.InputOperand(1));
+      break;
+    }
+    case kAVXFloat64Div: {
+      CpuFeatureScope avx_scope(masm(), AVX);
+      __ vdivsd(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+                i.InputOperand(1));
+      break;
+    }
     case kIA32Movsxbl:
       __ movsx_b(i.OutputRegister(), i.MemoryOperand());
       break;
@@ -460,9 +605,41 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
         __ movss(operand, i.InputDoubleRegister(index));
       }
       break;
-    case kIA32Lea:
-      __ lea(i.OutputRegister(), i.MemoryOperand());
+    case kIA32Lea: {
+      AddressingMode mode = AddressingModeField::decode(instr->opcode());
+      // Shorten "leal" to "addl", "subl" or "shll" if the register allocation
+      // and addressing mode just happens to work out. The "addl"/"subl" forms
+      // in these cases are faster based on measurements.
+      if (mode == kMode_MI) {
+        __ Move(i.OutputRegister(), Immediate(i.InputInt32(0)));
+      } else if (i.InputRegister(0).is(i.OutputRegister())) {
+        if (mode == kMode_MRI) {
+          int32_t constant_summand = i.InputInt32(1);
+          if (constant_summand > 0) {
+            __ add(i.OutputRegister(), Immediate(constant_summand));
+          } else if (constant_summand < 0) {
+            __ sub(i.OutputRegister(), Immediate(-constant_summand));
+          }
+        } else if (mode == kMode_MR1) {
+          if (i.InputRegister(1).is(i.OutputRegister())) {
+            __ shl(i.OutputRegister(), 1);
+          } else {
+            __ lea(i.OutputRegister(), i.MemoryOperand());
+          }
+        } else if (mode == kMode_M2) {
+          __ shl(i.OutputRegister(), 1);
+        } else if (mode == kMode_M4) {
+          __ shl(i.OutputRegister(), 2);
+        } else if (mode == kMode_M8) {
+          __ shl(i.OutputRegister(), 3);
+        } else {
+          __ lea(i.OutputRegister(), i.MemoryOperand());
+        }
+      } else {
+        __ lea(i.OutputRegister(), i.MemoryOperand());
+      }
       break;
+    }
     case kIA32Push:
       if (HasImmediateInput(instr, 0)) {
         __ push(i.InputImmediate(0));
@@ -481,27 +658,54 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
       __ RecordWrite(object, index, value, mode);
       break;
     }
+    case kCheckedLoadInt8:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(movsx_b);
+      break;
+    case kCheckedLoadUint8:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(movzx_b);
+      break;
+    case kCheckedLoadInt16:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(movsx_w);
+      break;
+    case kCheckedLoadUint16:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(movzx_w);
+      break;
+    case kCheckedLoadWord32:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(mov);
+      break;
+    case kCheckedLoadFloat32:
+      ASSEMBLE_CHECKED_LOAD_FLOAT(movss);
+      break;
+    case kCheckedLoadFloat64:
+      ASSEMBLE_CHECKED_LOAD_FLOAT(movsd);
+      break;
+    case kCheckedStoreWord8:
+      ASSEMBLE_CHECKED_STORE_INTEGER(mov_b);
+      break;
+    case kCheckedStoreWord16:
+      ASSEMBLE_CHECKED_STORE_INTEGER(mov_w);
+      break;
+    case kCheckedStoreWord32:
+      ASSEMBLE_CHECKED_STORE_INTEGER(mov);
+      break;
+    case kCheckedStoreFloat32:
+      ASSEMBLE_CHECKED_STORE_FLOAT(movss);
+      break;
+    case kCheckedStoreFloat64:
+      ASSEMBLE_CHECKED_STORE_FLOAT(movsd);
+      break;
   }
 }
 
 
-// Assembles branches after an instruction.
-void CodeGenerator::AssembleArchBranch(Instruction* instr,
-                                       FlagsCondition condition) {
+// Assembles a branch after an instruction.
+void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
   IA32OperandConverter i(this, instr);
-  Label done;
-
-  // Emit a branch. The true and false targets are always the last two inputs
-  // to the instruction.
-  BasicBlock::RpoNumber tblock =
-      i.InputRpo(static_cast<int>(instr->InputCount()) - 2);
-  BasicBlock::RpoNumber fblock =
-      i.InputRpo(static_cast<int>(instr->InputCount()) - 1);
-  bool fallthru = IsNextInAssemblyOrder(fblock);
-  Label* tlabel = GetLabel(tblock);
-  Label* flabel = fallthru ? &done : GetLabel(fblock);
-  Label::Distance flabel_distance = fallthru ? Label::kNear : Label::kFar;
-  switch (condition) {
+  Label::Distance flabel_distance =
+      branch->fallthru ? Label::kNear : Label::kFar;
+  Label* tlabel = branch->true_label;
+  Label* flabel = branch->false_label;
+  switch (branch->condition) {
     case kUnorderedEqual:
       __ j(parity_even, flabel, flabel_distance);
     // Fall through.
@@ -557,8 +761,13 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr,
       __ j(no_overflow, tlabel);
       break;
   }
-  if (!fallthru) __ jmp(flabel, flabel_distance);  // no fallthru to flabel.
-  __ bind(&done);
+  // Add a jump if not falling through to the next block.
+  if (!branch->fallthru) __ jmp(flabel);
+}
+
+
+void CodeGenerator::AssembleArchJump(BasicBlock::RpoNumber target) {
+  if (!IsNextInAssemblyOrder(target)) __ jmp(GetLabel(target));
 }
 
 
@@ -818,24 +1027,6 @@ void CodeGenerator::AssemblePrologue() {
     __ Prologue(info->IsCodePreAgingActive());
     frame->SetRegisterSaveAreaSize(
         StandardFrameConstants::kFixedFrameSizeFromFp);
-
-    // Sloppy mode functions and builtins need to replace the receiver with the
-    // global proxy when called as functions (without an explicit receiver
-    // object).
-    // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
-    if (info->strict_mode() == SLOPPY && !info->is_native()) {
-      Label ok;
-      // +2 for return address and saved frame pointer.
-      int receiver_slot = info->scope()->num_parameters() + 2;
-      __ mov(ecx, Operand(ebp, receiver_slot * kPointerSize));
-      __ cmp(ecx, isolate()->factory()->undefined_value());
-      __ j(not_equal, &ok, Label::kNear);
-      __ mov(ecx, GlobalObjectOperand());
-      __ mov(ecx, FieldOperand(ecx, GlobalObject::kGlobalProxyOffset));
-      __ mov(Operand(ebp, receiver_slot * kPointerSize), ecx);
-      __ bind(&ok);
-    }
-
   } else {
     __ StubPrologue();
     frame->SetRegisterSaveAreaSize(
index f72a1ca..ec9fd18 100644 (file)
@@ -46,6 +46,10 @@ namespace compiler {
   V(SSEFloat64ToUint32)            \
   V(SSEInt32ToFloat64)             \
   V(SSEUint32ToFloat64)            \
+  V(AVXFloat64Add)                 \
+  V(AVXFloat64Sub)                 \
+  V(AVXFloat64Mul)                 \
+  V(AVXFloat64Div)                 \
   V(IA32Movsxbl)                   \
   V(IA32Movzxbl)                   \
   V(IA32Movb)                      \
index c242fb4..16063ab 100644 (file)
@@ -38,279 +38,83 @@ class IA32OperandGenerator FINAL : public OperandGenerator {
     }
   }
 
-  bool CanBeBetterLeftOperand(Node* node) const {
-    return !selector()->IsLive(node);
-  }
-};
-
-
-// Get the AddressingMode of scale factor N from the AddressingMode of scale
-// factor 1.
-static AddressingMode AdjustAddressingMode(AddressingMode base_mode,
-                                           int power) {
-  DCHECK(0 <= power && power < 4);
-  return static_cast<AddressingMode>(static_cast<int>(base_mode) + power);
-}
-
-
-// Fairly intel-specify node matcher used for matching scale factors in
-// addressing modes.
-// Matches nodes of form [x * N] for N in {1,2,4,8}
-class ScaleFactorMatcher : public NodeMatcher {
- public:
-  static const int kMatchedFactors[4];
-
-  explicit ScaleFactorMatcher(Node* node);
-
-  bool Matches() const { return left_ != NULL; }
-  int Power() const {
-    DCHECK(Matches());
-    return power_;
-  }
-  Node* Left() const {
-    DCHECK(Matches());
-    return left_;
-  }
-
- private:
-  Node* left_;
-  int power_;
-};
-
-
-// Fairly intel-specify node matcher used for matching index and displacement
-// operands in addressing modes.
-// Matches nodes of form:
-//  [x * N]
-//  [x * N + K]
-//  [x + K]
-//  [x] -- fallback case
-// for N in {1,2,4,8} and K int32_t
-class IndexAndDisplacementMatcher : public NodeMatcher {
- public:
-  explicit IndexAndDisplacementMatcher(Node* node);
-
-  Node* index_node() const { return index_node_; }
-  int displacement() const { return displacement_; }
-  int power() const { return power_; }
-
- private:
-  Node* index_node_;
-  int displacement_;
-  int power_;
-};
-
-
-// Fairly intel-specify node matcher used for matching multiplies that can be
-// transformed to lea instructions.
-// Matches nodes of form:
-//  [x * N]
-// for N in {1,2,3,4,5,8,9}
-class LeaMultiplyMatcher : public NodeMatcher {
- public:
-  static const int kMatchedFactors[7];
-
-  explicit LeaMultiplyMatcher(Node* node);
-
-  bool Matches() const { return left_ != NULL; }
-  int Power() const {
-    DCHECK(Matches());
-    return power_;
-  }
-  Node* Left() const {
-    DCHECK(Matches());
-    return left_;
-  }
-  // Displacement will be either 0 or 1.
-  int32_t Displacement() const {
-    DCHECK(Matches());
-    return displacement_;
-  }
-
- private:
-  Node* left_;
-  int power_;
-  int displacement_;
-};
-
-
-const int ScaleFactorMatcher::kMatchedFactors[] = {1, 2, 4, 8};
-
-
-ScaleFactorMatcher::ScaleFactorMatcher(Node* node)
-    : NodeMatcher(node), left_(NULL), power_(0) {
-  if (opcode() != IrOpcode::kInt32Mul) return;
-  // TODO(dcarney): should test 64 bit ints as well.
-  Int32BinopMatcher m(this->node());
-  if (!m.right().HasValue()) return;
-  int32_t value = m.right().Value();
-  switch (value) {
-    case 8:
-      power_++;  // Fall through.
-    case 4:
-      power_++;  // Fall through.
-    case 2:
-      power_++;  // Fall through.
-    case 1:
-      break;
-    default:
-      return;
-  }
-  left_ = m.left().node();
-}
-
-
-IndexAndDisplacementMatcher::IndexAndDisplacementMatcher(Node* node)
-    : NodeMatcher(node), index_node_(node), displacement_(0), power_(0) {
-  if (opcode() == IrOpcode::kInt32Add) {
-    Int32BinopMatcher m(this->node());
-    if (m.right().HasValue()) {
-      displacement_ = m.right().Value();
-      index_node_ = m.left().node();
-    }
-  }
-  // Test scale factor.
-  ScaleFactorMatcher scale_matcher(index_node_);
-  if (scale_matcher.Matches()) {
-    index_node_ = scale_matcher.Left();
-    power_ = scale_matcher.Power();
-  }
-}
-
-
-const int LeaMultiplyMatcher::kMatchedFactors[7] = {1, 2, 3, 4, 5, 8, 9};
-
-
-LeaMultiplyMatcher::LeaMultiplyMatcher(Node* node)
-    : NodeMatcher(node), left_(NULL), power_(0), displacement_(0) {
-  if (opcode() != IrOpcode::kInt32Mul && opcode() != IrOpcode::kInt64Mul) {
-    return;
-  }
-  int64_t value;
-  Node* left = NULL;
-  {
-    Int32BinopMatcher m(this->node());
-    if (m.right().HasValue()) {
-      value = m.right().Value();
-      left = m.left().node();
-    } else {
-      Int64BinopMatcher m(this->node());
-      if (m.right().HasValue()) {
-        value = m.right().Value();
-        left = m.left().node();
-      } else {
-        return;
+  AddressingMode GenerateMemoryOperandInputs(Node* index, int scale, Node* base,
+                                             Node* displacement_node,
+                                             InstructionOperand* inputs[],
+                                             size_t* input_count) {
+    AddressingMode mode = kMode_MRI;
+    int32_t displacement = (displacement_node == NULL)
+                               ? 0
+                               : OpParameter<int32_t>(displacement_node);
+    if (base != NULL) {
+      if (base->opcode() == IrOpcode::kInt32Constant) {
+        displacement += OpParameter<int32_t>(base);
+        base = NULL;
       }
     }
-  }
-  switch (value) {
-    case 9:
-    case 8:
-      power_++;  // Fall through.
-    case 5:
-    case 4:
-      power_++;  // Fall through.
-    case 3:
-    case 2:
-      power_++;  // Fall through.
-    case 1:
-      break;
-    default:
-      return;
-  }
-  if (!base::bits::IsPowerOfTwo64(value)) {
-    displacement_ = 1;
-  }
-  left_ = left;
-}
-
-
-class AddressingModeMatcher {
- public:
-  AddressingModeMatcher(IA32OperandGenerator* g, Node* base, Node* index)
-      : base_operand_(NULL),
-        index_operand_(NULL),
-        displacement_operand_(NULL),
-        mode_(kMode_None) {
-    Int32Matcher index_imm(index);
-    if (index_imm.HasValue()) {
-      int32_t displacement = index_imm.Value();
-      // Compute base operand and fold base immediate into displacement.
-      Int32Matcher base_imm(base);
-      if (!base_imm.HasValue()) {
-        base_operand_ = g->UseRegister(base);
-      } else {
-        displacement += base_imm.Value();
-      }
-      if (displacement != 0 || base_operand_ == NULL) {
-        displacement_operand_ = g->TempImmediate(displacement);
-      }
-      if (base_operand_ == NULL) {
-        mode_ = kMode_MI;
+    if (base != NULL) {
+      inputs[(*input_count)++] = UseRegister(base);
+      if (index != NULL) {
+        DCHECK(scale >= 0 && scale <= 3);
+        inputs[(*input_count)++] = UseRegister(index);
+        if (displacement != 0) {
+          inputs[(*input_count)++] = TempImmediate(displacement);
+          static const AddressingMode kMRnI_modes[] = {kMode_MR1I, kMode_MR2I,
+                                                       kMode_MR4I, kMode_MR8I};
+          mode = kMRnI_modes[scale];
+        } else {
+          static const AddressingMode kMRn_modes[] = {kMode_MR1, kMode_MR2,
+                                                      kMode_MR4, kMode_MR8};
+          mode = kMRn_modes[scale];
+        }
       } else {
         if (displacement == 0) {
-          mode_ = kMode_MR;
+          mode = kMode_MR;
         } else {
-          mode_ = kMode_MRI;
+          inputs[(*input_count)++] = TempImmediate(displacement);
+          mode = kMode_MRI;
         }
       }
     } else {
-      // Compute index and displacement.
-      IndexAndDisplacementMatcher matcher(index);
-      index_operand_ = g->UseRegister(matcher.index_node());
-      int32_t displacement = matcher.displacement();
-      // Compute base operand and fold base immediate into displacement.
-      Int32Matcher base_imm(base);
-      if (!base_imm.HasValue()) {
-        base_operand_ = g->UseRegister(base);
-      } else {
-        displacement += base_imm.Value();
-      }
-      // Compute displacement operand.
-      if (displacement != 0) {
-        displacement_operand_ = g->TempImmediate(displacement);
-      }
-      // Compute mode with scale factor one.
-      if (base_operand_ == NULL) {
-        if (displacement_operand_ == NULL) {
-          mode_ = kMode_M1;
+      DCHECK(scale >= 0 && scale <= 3);
+      if (index != NULL) {
+        inputs[(*input_count)++] = UseRegister(index);
+        if (displacement != 0) {
+          inputs[(*input_count)++] = TempImmediate(displacement);
+          static const AddressingMode kMnI_modes[] = {kMode_MRI, kMode_M2I,
+                                                      kMode_M4I, kMode_M8I};
+          mode = kMnI_modes[scale];
         } else {
-          mode_ = kMode_M1I;
+          static const AddressingMode kMn_modes[] = {kMode_MR, kMode_M2,
+                                                     kMode_M4, kMode_M8};
+          mode = kMn_modes[scale];
         }
       } else {
-        if (displacement_operand_ == NULL) {
-          mode_ = kMode_MR1;
-        } else {
-          mode_ = kMode_MR1I;
-        }
+        inputs[(*input_count)++] = TempImmediate(displacement);
+        return kMode_MI;
       }
-      // Adjust mode to actual scale factor.
-      mode_ = AdjustAddressingMode(mode_, matcher.power());
     }
-    DCHECK_NE(kMode_None, mode_);
+    return mode;
   }
 
-  size_t SetInputs(InstructionOperand** inputs) {
-    size_t input_count = 0;
-    // Compute inputs_ and input_count.
-    if (base_operand_ != NULL) {
-      inputs[input_count++] = base_operand_;
-    }
-    if (index_operand_ != NULL) {
-      inputs[input_count++] = index_operand_;
-    }
-    if (displacement_operand_ != NULL) {
-      inputs[input_count++] = displacement_operand_;
+  AddressingMode GetEffectiveAddressMemoryOperand(Node* node,
+                                                  InstructionOperand* inputs[],
+                                                  size_t* input_count) {
+    BaseWithIndexAndDisplacement32Matcher m(node, true);
+    DCHECK(m.matches());
+    if ((m.displacement() == NULL || CanBeImmediate(m.displacement()))) {
+      return GenerateMemoryOperandInputs(m.index(), m.scale(), m.base(),
+                                         m.displacement(), inputs, input_count);
+    } else {
+      inputs[(*input_count)++] = UseRegister(node->InputAt(0));
+      inputs[(*input_count)++] = UseRegister(node->InputAt(1));
+      return kMode_MR1;
     }
-    DCHECK_NE(input_count, 0);
-    return input_count;
   }
 
-  static const int kMaxInputCount = 3;
-  InstructionOperand* base_operand_;
-  InstructionOperand* index_operand_;
-  InstructionOperand* displacement_operand_;
-  AddressingMode mode_;
+  bool CanBeBetterLeftOperand(Node* node) const {
+    return !selector()->IsLive(node);
+  }
 };
 
 
@@ -325,8 +129,6 @@ static void VisitRRFloat64(InstructionSelector* selector, ArchOpcode opcode,
 void InstructionSelector::VisitLoad(Node* node) {
   MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
   MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
-  Node* base = node->InputAt(0);
-  Node* index = node->InputAt(1);
 
   ArchOpcode opcode;
   // TODO(titzer): signed/unsigned small loads
@@ -354,11 +156,13 @@ void InstructionSelector::VisitLoad(Node* node) {
   }
 
   IA32OperandGenerator g(this);
-  AddressingModeMatcher matcher(&g, base, index);
-  InstructionCode code = opcode | AddressingModeField::encode(matcher.mode_);
-  InstructionOperand* outputs[] = {g.DefineAsRegister(node)};
-  InstructionOperand* inputs[AddressingModeMatcher::kMaxInputCount];
-  size_t input_count = matcher.SetInputs(inputs);
+  InstructionOperand* outputs[1];
+  outputs[0] = g.DefineAsRegister(node);
+  InstructionOperand* inputs[3];
+  size_t input_count = 0;
+  AddressingMode mode =
+      g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
+  InstructionCode code = opcode | AddressingModeField::encode(mode);
   Emit(code, 1, outputs, input_count, inputs);
 }
 
@@ -417,15 +221,107 @@ void InstructionSelector::VisitStore(Node* node) {
     val = g.UseRegister(value);
   }
 
-  AddressingModeMatcher matcher(&g, base, index);
-  InstructionCode code = opcode | AddressingModeField::encode(matcher.mode_);
-  InstructionOperand* inputs[AddressingModeMatcher::kMaxInputCount + 1];
-  size_t input_count = matcher.SetInputs(inputs);
+  InstructionOperand* inputs[4];
+  size_t input_count = 0;
+  AddressingMode mode =
+      g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
+  InstructionCode code = opcode | AddressingModeField::encode(mode);
   inputs[input_count++] = val;
   Emit(code, 0, static_cast<InstructionOperand**>(NULL), input_count, inputs);
 }
 
 
+void InstructionSelector::VisitCheckedLoad(Node* node) {
+  MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
+  MachineType typ = TypeOf(OpParameter<MachineType>(node));
+  IA32OperandGenerator g(this);
+  Node* const buffer = node->InputAt(0);
+  Node* const offset = node->InputAt(1);
+  Node* const length = node->InputAt(2);
+  ArchOpcode opcode;
+  switch (rep) {
+    case kRepWord8:
+      opcode = typ == kTypeInt32 ? kCheckedLoadInt8 : kCheckedLoadUint8;
+      break;
+    case kRepWord16:
+      opcode = typ == kTypeInt32 ? kCheckedLoadInt16 : kCheckedLoadUint16;
+      break;
+    case kRepWord32:
+      opcode = kCheckedLoadWord32;
+      break;
+    case kRepFloat32:
+      opcode = kCheckedLoadFloat32;
+      break;
+    case kRepFloat64:
+      opcode = kCheckedLoadFloat64;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+  InstructionOperand* offset_operand = g.UseRegister(offset);
+  InstructionOperand* length_operand =
+      g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
+  if (g.CanBeImmediate(buffer)) {
+    Emit(opcode | AddressingModeField::encode(kMode_MRI),
+         g.DefineAsRegister(node), offset_operand, length_operand,
+         offset_operand, g.UseImmediate(buffer));
+  } else {
+    Emit(opcode | AddressingModeField::encode(kMode_MR1),
+         g.DefineAsRegister(node), offset_operand, length_operand,
+         g.UseRegister(buffer), offset_operand);
+  }
+}
+
+
+void InstructionSelector::VisitCheckedStore(Node* node) {
+  MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
+  IA32OperandGenerator g(this);
+  Node* const buffer = node->InputAt(0);
+  Node* const offset = node->InputAt(1);
+  Node* const length = node->InputAt(2);
+  Node* const value = node->InputAt(3);
+  ArchOpcode opcode;
+  switch (rep) {
+    case kRepWord8:
+      opcode = kCheckedStoreWord8;
+      break;
+    case kRepWord16:
+      opcode = kCheckedStoreWord16;
+      break;
+    case kRepWord32:
+      opcode = kCheckedStoreWord32;
+      break;
+    case kRepFloat32:
+      opcode = kCheckedStoreFloat32;
+      break;
+    case kRepFloat64:
+      opcode = kCheckedStoreFloat64;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+  InstructionOperand* value_operand =
+      g.CanBeImmediate(value)
+          ? g.UseImmediate(value)
+          : ((rep == kRepWord8 || rep == kRepBit) ? g.UseByteRegister(value)
+                                                  : g.UseRegister(value));
+  InstructionOperand* offset_operand = g.UseRegister(offset);
+  InstructionOperand* length_operand =
+      g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
+  if (g.CanBeImmediate(buffer)) {
+    Emit(opcode | AddressingModeField::encode(kMode_MRI), nullptr,
+         offset_operand, length_operand, value_operand, offset_operand,
+         g.UseImmediate(buffer));
+  } else {
+    Emit(opcode | AddressingModeField::encode(kMode_MR1), nullptr,
+         offset_operand, length_operand, value_operand, g.UseRegister(buffer),
+         offset_operand);
+  }
+}
+
+
 // Shared routine for multiple binary operations.
 static void VisitBinop(InstructionSelector* selector, Node* node,
                        InstructionCode opcode, FlagsContinuation* cont) {
@@ -524,20 +420,69 @@ static inline void VisitShift(InstructionSelector* selector, Node* node,
     selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
                    g.UseImmediate(right));
   } else {
-    Int32BinopMatcher m(node);
-    if (m.right().IsWord32And()) {
-      Int32BinopMatcher mright(right);
-      if (mright.right().Is(0x1F)) {
-        right = mright.left().node();
-      }
-    }
     selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
                    g.UseFixed(right, ecx));
   }
 }
 
 
+namespace {
+
+void VisitMulHigh(InstructionSelector* selector, Node* node,
+                  ArchOpcode opcode) {
+  IA32OperandGenerator g(selector);
+  selector->Emit(opcode, g.DefineAsFixed(node, edx),
+                 g.UseFixed(node->InputAt(0), eax),
+                 g.UseUniqueRegister(node->InputAt(1)));
+}
+
+
+void VisitDiv(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
+  IA32OperandGenerator g(selector);
+  InstructionOperand* temps[] = {g.TempRegister(edx)};
+  selector->Emit(opcode, g.DefineAsFixed(node, eax),
+                 g.UseFixed(node->InputAt(0), eax),
+                 g.UseUnique(node->InputAt(1)), arraysize(temps), temps);
+}
+
+
+void VisitMod(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
+  IA32OperandGenerator g(selector);
+  selector->Emit(opcode, g.DefineAsFixed(node, edx),
+                 g.UseFixed(node->InputAt(0), eax),
+                 g.UseUnique(node->InputAt(1)));
+}
+
+void EmitLea(InstructionSelector* selector, Node* result, Node* index,
+             int scale, Node* base, Node* displacement) {
+  IA32OperandGenerator g(selector);
+  InstructionOperand* inputs[4];
+  size_t input_count = 0;
+  AddressingMode mode = g.GenerateMemoryOperandInputs(
+      index, scale, base, displacement, inputs, &input_count);
+
+  DCHECK_NE(0, static_cast<int>(input_count));
+  DCHECK_GE(arraysize(inputs), input_count);
+
+  InstructionOperand* outputs[1];
+  outputs[0] = g.DefineAsRegister(result);
+
+  InstructionCode opcode = AddressingModeField::encode(mode) | kIA32Lea;
+
+  selector->Emit(opcode, 1, outputs, input_count, inputs);
+}
+
+}  // namespace
+
+
 void InstructionSelector::VisitWord32Shl(Node* node) {
+  Int32ScaleMatcher m(node, true);
+  if (m.matches()) {
+    Node* index = node->InputAt(0);
+    Node* base = m.power_of_two_plus_one() ? index : NULL;
+    EmitLea(this, node, index, m.scale(), base, NULL);
+    return;
+  }
   VisitShift(this, node, kIA32Shl);
 }
 
@@ -557,37 +502,30 @@ void InstructionSelector::VisitWord32Ror(Node* node) {
 }
 
 
-static bool TryEmitLeaMultAdd(InstructionSelector* selector, Node* node) {
-  Int32BinopMatcher m(node);
-  if (!m.right().HasValue()) return false;
-  int32_t displacement_value = m.right().Value();
-  Node* left = m.left().node();
-  LeaMultiplyMatcher lmm(left);
-  if (!lmm.Matches()) return false;
-  AddressingMode mode;
-  size_t input_count;
-  IA32OperandGenerator g(selector);
-  InstructionOperand* index = g.UseRegister(lmm.Left());
-  InstructionOperand* displacement = g.TempImmediate(displacement_value);
-  InstructionOperand* inputs[] = {index, displacement, displacement};
-  if (lmm.Displacement() != 0) {
-    input_count = 3;
-    inputs[1] = index;
-    mode = kMode_MR1I;
-  } else {
-    input_count = 2;
-    mode = kMode_M1I;
-  }
-  mode = AdjustAddressingMode(mode, lmm.Power());
-  InstructionOperand* outputs[] = {g.DefineAsRegister(node)};
-  selector->Emit(kIA32Lea | AddressingModeField::encode(mode), 1, outputs,
-                 input_count, inputs);
-  return true;
-}
+void InstructionSelector::VisitInt32Add(Node* node) {
+  IA32OperandGenerator g(this);
+
+  // Try to match the Add to a lea pattern
+  BaseWithIndexAndDisplacement32Matcher m(node);
+  if (m.matches() &&
+      (m.displacement() == NULL || g.CanBeImmediate(m.displacement()))) {
+    InstructionOperand* inputs[4];
+    size_t input_count = 0;
+    AddressingMode mode = g.GenerateMemoryOperandInputs(
+        m.index(), m.scale(), m.base(), m.displacement(), inputs, &input_count);
 
+    DCHECK_NE(0, static_cast<int>(input_count));
+    DCHECK_GE(arraysize(inputs), input_count);
 
-void InstructionSelector::VisitInt32Add(Node* node) {
-  if (TryEmitLeaMultAdd(this, node)) return;
+    InstructionOperand* outputs[1];
+    outputs[0] = g.DefineAsRegister(node);
+
+    InstructionCode opcode = AddressingModeField::encode(mode) | kIA32Lea;
+    Emit(opcode, 1, outputs, input_count, inputs);
+    return;
+  }
+
+  // No lea pattern match, use add
   VisitBinop(this, node, kIA32Add);
 }
 
@@ -603,36 +541,17 @@ void InstructionSelector::VisitInt32Sub(Node* node) {
 }
 
 
-static bool TryEmitLeaMult(InstructionSelector* selector, Node* node) {
-  LeaMultiplyMatcher lea(node);
-  // Try to match lea.
-  if (!lea.Matches()) return false;
-  AddressingMode mode;
-  size_t input_count;
-  IA32OperandGenerator g(selector);
-  InstructionOperand* left = g.UseRegister(lea.Left());
-  InstructionOperand* inputs[] = {left, left};
-  if (lea.Displacement() != 0) {
-    input_count = 2;
-    mode = kMode_MR1;
-  } else {
-    input_count = 1;
-    mode = kMode_M1;
-  }
-  mode = AdjustAddressingMode(mode, lea.Power());
-  InstructionOperand* outputs[] = {g.DefineAsRegister(node)};
-  selector->Emit(kIA32Lea | AddressingModeField::encode(mode), 1, outputs,
-                 input_count, inputs);
-  return true;
-}
-
-
 void InstructionSelector::VisitInt32Mul(Node* node) {
-  if (TryEmitLeaMult(this, node)) return;
+  Int32ScaleMatcher m(node, true);
+  if (m.matches()) {
+    Node* index = node->InputAt(0);
+    Node* base = m.power_of_two_plus_one() ? index : NULL;
+    EmitLea(this, node, index, m.scale(), base, NULL);
+    return;
+  }
   IA32OperandGenerator g(this);
-  Int32BinopMatcher m(node);
-  Node* left = m.left().node();
-  Node* right = m.right().node();
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
   if (g.CanBeImmediate(right)) {
     Emit(kIA32Imul, g.DefineAsRegister(node), g.Use(left),
          g.UseImmediate(right));
@@ -646,36 +565,6 @@ void InstructionSelector::VisitInt32Mul(Node* node) {
 }
 
 
-namespace {
-
-void VisitMulHigh(InstructionSelector* selector, Node* node,
-                  ArchOpcode opcode) {
-  IA32OperandGenerator g(selector);
-  selector->Emit(opcode, g.DefineAsFixed(node, edx),
-                 g.UseFixed(node->InputAt(0), eax),
-                 g.UseUniqueRegister(node->InputAt(1)));
-}
-
-
-void VisitDiv(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
-  IA32OperandGenerator g(selector);
-  InstructionOperand* temps[] = {g.TempRegister(edx)};
-  selector->Emit(opcode, g.DefineAsFixed(node, eax),
-                 g.UseFixed(node->InputAt(0), eax),
-                 g.UseUnique(node->InputAt(1)), arraysize(temps), temps);
-}
-
-
-void VisitMod(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
-  IA32OperandGenerator g(selector);
-  selector->Emit(opcode, g.DefineAsFixed(node, edx),
-                 g.UseFixed(node->InputAt(0), eax),
-                 g.UseUnique(node->InputAt(1)));
-}
-
-}  // namespace
-
-
 void InstructionSelector::VisitInt32MulHigh(Node* node) {
   VisitMulHigh(this, node, kIA32ImulHigh);
 }
@@ -744,29 +633,49 @@ void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
 
 void InstructionSelector::VisitFloat64Add(Node* node) {
   IA32OperandGenerator g(this);
-  Emit(kSSEFloat64Add, g.DefineSameAsFirst(node),
-       g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
+  if (IsSupported(AVX)) {
+    Emit(kAVXFloat64Add, g.DefineAsRegister(node),
+         g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
+  } else {
+    Emit(kSSEFloat64Add, g.DefineSameAsFirst(node),
+         g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
+  }
 }
 
 
 void InstructionSelector::VisitFloat64Sub(Node* node) {
   IA32OperandGenerator g(this);
-  Emit(kSSEFloat64Sub, g.DefineSameAsFirst(node),
-       g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
+  if (IsSupported(AVX)) {
+    Emit(kAVXFloat64Sub, g.DefineAsRegister(node),
+         g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
+  } else {
+    Emit(kSSEFloat64Sub, g.DefineSameAsFirst(node),
+         g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
+  }
 }
 
 
 void InstructionSelector::VisitFloat64Mul(Node* node) {
   IA32OperandGenerator g(this);
-  Emit(kSSEFloat64Mul, g.DefineSameAsFirst(node),
-       g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
+  if (IsSupported(AVX)) {
+    Emit(kAVXFloat64Mul, g.DefineAsRegister(node),
+         g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
+  } else {
+    Emit(kSSEFloat64Mul, g.DefineSameAsFirst(node),
+         g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
+  }
 }
 
 
 void InstructionSelector::VisitFloat64Div(Node* node) {
   IA32OperandGenerator g(this);
-  Emit(kSSEFloat64Div, g.DefineSameAsFirst(node),
-       g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
+  if (IsSupported(AVX)) {
+    Emit(kAVXFloat64Div, g.DefineAsRegister(node),
+         g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
+  } else {
+    Emit(kSSEFloat64Div, g.DefineSameAsFirst(node),
+         g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
+  }
 }
 
 
@@ -810,7 +719,7 @@ void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
 
 void InstructionSelector::VisitCall(Node* node) {
   IA32OperandGenerator g(this);
-  CallDescriptor* descriptor = OpParameter<CallDescriptor*>(node);
+  const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
 
   FrameStateDescriptor* frame_state_descriptor = NULL;
 
@@ -1010,10 +919,6 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
 void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
                                       BasicBlock* fbranch) {
   FlagsContinuation cont(kNotEqual, tbranch, fbranch);
-  if (IsNextInAssemblyOrder(tbranch)) {  // We can fallthru to the true block.
-    cont.Negate();
-    cont.SwapBlocks();
-  }
   VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
 }
 
@@ -1096,7 +1001,8 @@ InstructionSelector::SupportedMachineOperatorFlags() {
   if (CpuFeatures::IsSupported(SSE4_1)) {
     return MachineOperatorBuilder::kFloat64Floor |
            MachineOperatorBuilder::kFloat64Ceil |
-           MachineOperatorBuilder::kFloat64RoundTruncate;
+           MachineOperatorBuilder::kFloat64RoundTruncate |
+           MachineOperatorBuilder::kWord32ShiftIsSafe;
   }
   return MachineOperatorBuilder::Flag::kNoFlags;
 }
index 2d10f6f..12cc34f 100644 (file)
@@ -46,9 +46,9 @@ CallDescriptor* Linkage::GetRuntimeCallDescriptor(
 
 CallDescriptor* Linkage::GetStubCallDescriptor(
     const CallInterfaceDescriptor& descriptor, int stack_parameter_count,
-    CallDescriptor::Flags flags, Zone* zone) {
+    CallDescriptor::Flags flags, Operator::Properties properties, Zone* zone) {
   return LH::GetStubCallDescriptor(zone, descriptor, stack_parameter_count,
-                                   flags);
+                                   flags, properties);
 }
 
 
index 3bd12fe..ea17854 100644 (file)
@@ -15,6 +15,8 @@
 #include "src/compiler/ia32/instruction-codes-ia32.h"
 #elif V8_TARGET_ARCH_MIPS
 #include "src/compiler/mips/instruction-codes-mips.h"
+#elif V8_TARGET_ARCH_MIPS64
+#include "src/compiler/mips64/instruction-codes-mips64.h"
 #elif V8_TARGET_ARCH_X64
 #include "src/compiler/x64/instruction-codes-x64.h"
 #else
@@ -37,6 +39,18 @@ namespace compiler {
   V(ArchRet)                \
   V(ArchStackPointer)       \
   V(ArchTruncateDoubleToI)  \
+  V(CheckedLoadInt8)        \
+  V(CheckedLoadUint8)       \
+  V(CheckedLoadInt16)       \
+  V(CheckedLoadUint16)      \
+  V(CheckedLoadWord32)      \
+  V(CheckedLoadFloat32)     \
+  V(CheckedLoadFloat64)     \
+  V(CheckedStoreWord8)      \
+  V(CheckedStoreWord16)     \
+  V(CheckedStoreWord32)     \
+  V(CheckedStoreFloat32)    \
+  V(CheckedStoreFloat64)    \
   TARGET_ARCH_OPCODE_LIST(V)
 
 enum ArchOpcode {
@@ -96,6 +110,10 @@ enum FlagsCondition {
   kNotOverflow
 };
 
+inline FlagsCondition NegateFlagsCondition(FlagsCondition condition) {
+  return static_cast<FlagsCondition>(condition ^ 1);
+}
+
 std::ostream& operator<<(std::ostream& os, const FlagsCondition& fc);
 
 // The InstructionCode is an opaque, target-specific integer that encodes
index 53e288d..bdcd952 100644 (file)
@@ -5,7 +5,6 @@
 #ifndef V8_COMPILER_INSTRUCTION_SELECTOR_IMPL_H_
 #define V8_COMPILER_INSTRUCTION_SELECTOR_IMPL_H_
 
-#include "src/compiler/generic-node-inl.h"
 #include "src/compiler/instruction.h"
 #include "src/compiler/instruction-selector.h"
 #include "src/compiler/linkage.h"
@@ -137,8 +136,8 @@ class OperandGenerator {
   }
 
   InstructionOperand* Label(BasicBlock* block) {
-    // TODO(bmeurer): We misuse ImmediateOperand here.
-    return TempImmediate(block->rpo_number());
+    int index = sequence()->AddImmediate(Constant(block->GetRpoNumber()));
+    return ImmediateOperand::Create(index, zone());
   }
 
  protected:
@@ -257,7 +256,7 @@ class FlagsContinuation FINAL {
 
   void Negate() {
     DCHECK(!IsNone());
-    condition_ = static_cast<FlagsCondition>(condition_ ^ 1);
+    condition_ = NegateFlagsCondition(condition_);
   }
 
   void Commute() {
@@ -317,8 +316,6 @@ class FlagsContinuation FINAL {
     if (negate) Negate();
   }
 
-  void SwapBlocks() { std::swap(true_block_, false_block_); }
-
   // Encodes this flags continuation into the given opcode.
   InstructionCode Encode(InstructionCode opcode) {
     opcode |= FlagsModeField::encode(mode_);
@@ -341,10 +338,10 @@ class FlagsContinuation FINAL {
 // TODO(bmeurer): Get rid of the CallBuffer business and make
 // InstructionSelector::VisitCall platform independent instead.
 struct CallBuffer {
-  CallBuffer(Zone* zone, CallDescriptor* descriptor,
+  CallBuffer(Zone* zone, const CallDescriptor* descriptor,
              FrameStateDescriptor* frame_state);
 
-  CallDescriptor* descriptor;
+  const CallDescriptor* descriptor;
   FrameStateDescriptor* frame_state_descriptor;
   NodeVector output_nodes;
   InstructionOperandVector outputs;
index e601b2c..ffb8f9f 100644 (file)
@@ -47,9 +47,8 @@ void InstructionSelector::SelectInstructions() {
       if (phi->opcode() != IrOpcode::kPhi) continue;
 
       // Mark all inputs as used.
-      Node::Inputs inputs = phi->inputs();
-      for (InputIter k = inputs.begin(); k != inputs.end(); ++k) {
-        MarkAsUsed(*k);
+      for (Node* const k : phi->inputs()) {
+        MarkAsUsed(k);
       }
     }
   }
@@ -133,6 +132,31 @@ Instruction* InstructionSelector::Emit(
 
 
 Instruction* InstructionSelector::Emit(
+    InstructionCode opcode, InstructionOperand* output, InstructionOperand* a,
+    InstructionOperand* b, InstructionOperand* c, InstructionOperand* d,
+    InstructionOperand* e, size_t temp_count, InstructionOperand** temps) {
+  size_t output_count = output == NULL ? 0 : 1;
+  InstructionOperand* inputs[] = {a, b, c, d, e};
+  size_t input_count = arraysize(inputs);
+  return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
+              temps);
+}
+
+
+Instruction* InstructionSelector::Emit(
+    InstructionCode opcode, InstructionOperand* output, InstructionOperand* a,
+    InstructionOperand* b, InstructionOperand* c, InstructionOperand* d,
+    InstructionOperand* e, InstructionOperand* f, size_t temp_count,
+    InstructionOperand** temps) {
+  size_t output_count = output == NULL ? 0 : 1;
+  InstructionOperand* inputs[] = {a, b, c, d, e, f};
+  size_t input_count = arraysize(inputs);
+  return Emit(opcode, output_count, &output, input_count, inputs, temp_count,
+              temps);
+}
+
+
+Instruction* InstructionSelector::Emit(
     InstructionCode opcode, size_t output_count, InstructionOperand** outputs,
     size_t input_count, InstructionOperand** inputs, size_t temp_count,
     InstructionOperand** temps) {
@@ -149,11 +173,6 @@ Instruction* InstructionSelector::Emit(Instruction* instr) {
 }
 
 
-bool InstructionSelector::IsNextInAssemblyOrder(const BasicBlock* block) const {
-  return current_block_->GetAoNumber().IsNext(block->GetAoNumber());
-}
-
-
 bool InstructionSelector::CanCover(Node* user, Node* node) const {
   return node->OwnedBy(user) &&
          schedule()->block(node) == schedule()->block(user);
@@ -274,7 +293,7 @@ void InstructionSelector::MarkAsRepresentation(MachineType rep, Node* node) {
 
 // TODO(bmeurer): Get rid of the CallBuffer business and make
 // InstructionSelector::VisitCall platform independent instead.
-CallBuffer::CallBuffer(Zone* zone, CallDescriptor* d,
+CallBuffer::CallBuffer(Zone* zone, const CallDescriptor* d,
                        FrameStateDescriptor* frame_desc)
     : descriptor(d),
       frame_state_descriptor(frame_desc),
@@ -295,7 +314,7 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
                                                bool call_code_immediate,
                                                bool call_address_immediate) {
   OperandGenerator g(this);
-  DCHECK_EQ(call->op()->OutputCount(),
+  DCHECK_EQ(call->op()->ValueOutputCount(),
             static_cast<int>(buffer->descriptor->ReturnCount()));
   DCHECK_EQ(
       call->op()->ValueInputCount(),
@@ -384,11 +403,10 @@ void InstructionSelector::InitializeCallBuffer(Node* call, CallBuffer* buffer,
   // arguments require an explicit push instruction before the call and do
   // not appear as arguments to the call. Everything else ends up
   // as an InstructionOperand argument to the call.
-  InputIter iter(call->inputs().begin());
+  auto iter(call->inputs().begin());
   int pushed_count = 0;
   for (size_t index = 0; index < input_count; ++iter, ++index) {
     DCHECK(iter != call->inputs().end());
-    DCHECK(index == static_cast<size_t>(iter.index()));
     DCHECK((*iter)->op()->opcode() != IrOpcode::kFrameState);
     if (index == 0) continue;  // The first argument (callee) is already done.
     InstructionOperand* op =
@@ -538,6 +556,10 @@ MachineType InstructionSelector::GetMachineType(Node* node) {
       return OpParameter<LoadRepresentation>(node);
     case IrOpcode::kStore:
       return kMachNone;
+    case IrOpcode::kCheckedLoad:
+      return OpParameter<MachineType>(node);
+    case IrOpcode::kCheckedStore:
+      return kMachNone;
     case IrOpcode::kWord32And:
     case IrOpcode::kWord32Or:
     case IrOpcode::kWord32Xor:
@@ -808,6 +830,13 @@ void InstructionSelector::VisitNode(Node* node) {
       return MarkAsDouble(node), VisitFloat64RoundTiesAway(node);
     case IrOpcode::kLoadStackPointer:
       return VisitLoadStackPointer(node);
+    case IrOpcode::kCheckedLoad: {
+      MachineType rep = OpParameter<MachineType>(node);
+      MarkAsRepresentation(rep, node);
+      return VisitCheckedLoad(node);
+    }
+    case IrOpcode::kCheckedStore:
+      return VisitCheckedStore(node);
     default:
       V8_Fatal(__FILE__, __LINE__, "Unexpected operator #%d:%s @ node #%d",
                node->opcode(), node->op()->mnemonic(), node->id());
@@ -925,16 +954,15 @@ void InstructionSelector::VisitParameter(Node* node) {
 
 
 void InstructionSelector::VisitPhi(Node* node) {
-  // TODO(bmeurer): Emit a PhiInstruction here.
+  const int input_count = node->op()->ValueInputCount();
   PhiInstruction* phi = new (instruction_zone())
-      PhiInstruction(instruction_zone(), GetVirtualRegister(node));
+      PhiInstruction(instruction_zone(), GetVirtualRegister(node),
+                     static_cast<size_t>(input_count));
   sequence()->InstructionBlockAt(current_block_->GetRpoNumber())->AddPhi(phi);
-  const int input_count = node->op()->InputCount();
-  phi->operands().reserve(static_cast<size_t>(input_count));
   for (int i = 0; i < input_count; ++i) {
     Node* const input = node->InputAt(i);
     MarkAsUsed(input);
-    phi->operands().push_back(GetVirtualRegister(input));
+    phi->Extend(instruction_zone(), GetVirtualRegister(input));
   }
 }
 
@@ -967,14 +995,9 @@ void InstructionSelector::VisitConstant(Node* node) {
 
 
 void InstructionSelector::VisitGoto(BasicBlock* target) {
-  if (IsNextInAssemblyOrder(target)) {
-    // fall through to the next block.
-    Emit(kArchNop, NULL)->MarkAsControl();
-  } else {
-    // jump to the next block.
-    OperandGenerator g(this);
-    Emit(kArchJmp, NULL, g.Label(target))->MarkAsControl();
-  }
+  // jump to the next block.
+  OperandGenerator g(this);
+  Emit(kArchJmp, NULL, g.Label(target))->MarkAsControl();
 }
 
 
index 4e916be..5e3c52f 100644 (file)
@@ -59,6 +59,16 @@ class InstructionSelector FINAL {
                     InstructionOperand* a, InstructionOperand* b,
                     InstructionOperand* c, InstructionOperand* d,
                     size_t temp_count = 0, InstructionOperand* *temps = NULL);
+  Instruction* Emit(InstructionCode opcode, InstructionOperand* output,
+                    InstructionOperand* a, InstructionOperand* b,
+                    InstructionOperand* c, InstructionOperand* d,
+                    InstructionOperand* e, size_t temp_count = 0,
+                    InstructionOperand* *temps = NULL);
+  Instruction* Emit(InstructionCode opcode, InstructionOperand* output,
+                    InstructionOperand* a, InstructionOperand* b,
+                    InstructionOperand* c, InstructionOperand* d,
+                    InstructionOperand* e, InstructionOperand* f,
+                    size_t temp_count = 0, InstructionOperand* *temps = NULL);
   Instruction* Emit(InstructionCode opcode, size_t output_count,
                     InstructionOperand** outputs, size_t input_count,
                     InstructionOperand** inputs, size_t temp_count = 0,
@@ -123,10 +133,6 @@ class InstructionSelector FINAL {
  private:
   friend class OperandGenerator;
 
-  // Checks if {block} will appear directly after {current_block_} when
-  // assembling code, in which case, a fall-through can be used.
-  bool IsNextInAssemblyOrder(const BasicBlock* block) const;
-
   // Inform the instruction selection that {node} was just defined.
   void MarkAsDefined(Node* node);
 
index d575be4..f83cdeb 100644 (file)
@@ -3,7 +3,6 @@
 // found in the LICENSE file.
 
 #include "src/compiler/common-operator.h"
-#include "src/compiler/generic-node-inl.h"
 #include "src/compiler/graph.h"
 #include "src/compiler/instruction.h"
 
@@ -16,8 +15,6 @@ std::ostream& operator<<(std::ostream& os,
   const InstructionOperand& op = *printable.op_;
   const RegisterConfiguration* conf = printable.register_configuration_;
   switch (op.kind()) {
-    case InstructionOperand::INVALID:
-      return os << "(0)";
     case InstructionOperand::UNALLOCATED: {
       const UnallocatedOperand* unalloc = UnallocatedOperand::cast(&op);
       os << "v" << unalloc->virtual_register();
@@ -120,6 +117,16 @@ bool ParallelMove::IsRedundant() const {
 }
 
 
+bool GapInstruction::IsRedundant() const {
+  for (int i = GapInstruction::FIRST_INNER_POSITION;
+       i <= GapInstruction::LAST_INNER_POSITION; i++) {
+    if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant())
+      return false;
+  }
+  return true;
+}
+
+
 std::ostream& operator<<(std::ostream& os,
                          const PrintableParallelMove& printable) {
   const ParallelMove& pm = *printable.parallel_move_;
@@ -332,6 +339,8 @@ std::ostream& operator<<(std::ostream& os, const Constant& constant) {
                        constant.ToExternalReference().address());
     case Constant::kHeapObject:
       return os << Brief(*constant.ToHeapObject());
+    case Constant::kRpoNumber:
+      return os << "RPO" << constant.ToRpoNumber().ToInt();
   }
   UNREACHABLE();
   return os;
@@ -339,7 +348,6 @@ std::ostream& operator<<(std::ostream& os, const Constant& constant) {
 
 
 InstructionBlock::InstructionBlock(Zone* zone, BasicBlock::Id id,
-                                   BasicBlock::RpoNumber ao_number,
                                    BasicBlock::RpoNumber rpo_number,
                                    BasicBlock::RpoNumber loop_header,
                                    BasicBlock::RpoNumber loop_end,
@@ -348,7 +356,7 @@ InstructionBlock::InstructionBlock(Zone* zone, BasicBlock::Id id,
       predecessors_(zone),
       phis_(zone),
       id_(id),
-      ao_number_(ao_number),
+      ao_number_(rpo_number),
       rpo_number_(rpo_number),
       loop_header_(loop_header),
       loop_end_(loop_end),
@@ -383,8 +391,8 @@ static BasicBlock::RpoNumber GetLoopEndRpo(const BasicBlock* block) {
 static InstructionBlock* InstructionBlockFor(Zone* zone,
                                              const BasicBlock* block) {
   InstructionBlock* instr_block = new (zone) InstructionBlock(
-      zone, block->id(), block->GetAoNumber(), block->GetRpoNumber(),
-      GetRpo(block->loop_header()), GetLoopEndRpo(block), block->deferred());
+      zone, block->id(), block->GetRpoNumber(), GetRpo(block->loop_header()),
+      GetLoopEndRpo(block), block->deferred());
   // Map successors and precessors
   instr_block->successors().reserve(block->SuccessorCount());
   for (auto it = block->successors_begin(); it != block->successors_end();
@@ -412,10 +420,26 @@ InstructionBlocks* InstructionSequence::InstructionBlocksFor(
     DCHECK((*it)->GetRpoNumber().ToSize() == rpo_number);
     (*blocks)[rpo_number] = InstructionBlockFor(zone, *it);
   }
+  ComputeAssemblyOrder(blocks);
   return blocks;
 }
 
 
+void InstructionSequence::ComputeAssemblyOrder(InstructionBlocks* blocks) {
+  int ao = 0;
+  for (auto const block : *blocks) {
+    if (!block->IsDeferred()) {
+      block->set_ao_number(BasicBlock::RpoNumber::FromInt(ao++));
+    }
+  }
+  for (auto const block : *blocks) {
+    if (block->IsDeferred()) {
+      block->set_ao_number(BasicBlock::RpoNumber::FromInt(ao++));
+    }
+  }
+}
+
+
 InstructionSequence::InstructionSequence(Zone* instruction_zone,
                                          InstructionBlocks* instruction_blocks)
     : zone_(instruction_zone),
@@ -435,8 +459,8 @@ InstructionSequence::InstructionSequence(Zone* instruction_zone,
 
 
 BlockStartInstruction* InstructionSequence::GetBlockStart(
-    BasicBlock::RpoNumber rpo) {
-  InstructionBlock* block = InstructionBlockAt(rpo);
+    BasicBlock::RpoNumber rpo) const {
+  const InstructionBlock* block = InstructionBlockAt(rpo);
   return BlockStartInstruction::cast(InstructionAt(block->code_start()));
 }
 
@@ -644,9 +668,12 @@ std::ostream& operator<<(std::ostream& os,
     os << "\n";
 
     for (auto phi : block->phis()) {
-      os << "     phi: v" << phi->virtual_register() << " =";
-      for (auto op_vreg : phi->operands()) {
-        os << " v" << op_vreg;
+      PrintableInstructionOperand printable_op = {
+          printable.register_configuration_, phi->output()};
+      os << "     phi: " << printable_op << " =";
+      for (auto input : phi->inputs()) {
+        printable_op.op_ = input;
+        os << " " << printable_op;
       }
       os << "\n";
     }
index b6fcb3c..daa83f2 100644 (file)
@@ -39,7 +39,6 @@ const InstructionCode kSourcePositionInstruction = -3;
 class InstructionOperand : public ZoneObject {
  public:
   enum Kind {
-    INVALID,
     UNALLOCATED,
     CONSTANT,
     IMMEDIATE,
@@ -49,7 +48,6 @@ class InstructionOperand : public ZoneObject {
     DOUBLE_REGISTER
   };
 
-  InstructionOperand() : value_(KindField::encode(INVALID)) {}
   InstructionOperand(Kind kind, int index) { ConvertTo(kind, index); }
 
   Kind kind() const { return KindField::decode(value_); }
@@ -58,16 +56,15 @@ class InstructionOperand : public ZoneObject {
   bool Is##name() const { return kind() == type; }
   INSTRUCTION_OPERAND_LIST(INSTRUCTION_OPERAND_PREDICATE)
   INSTRUCTION_OPERAND_PREDICATE(Unallocated, UNALLOCATED, 0)
-  INSTRUCTION_OPERAND_PREDICATE(Ignored, INVALID, 0)
 #undef INSTRUCTION_OPERAND_PREDICATE
-  bool Equals(InstructionOperand* other) const {
+  bool Equals(const InstructionOperand* other) const {
     return value_ == other->value_;
   }
 
   void ConvertTo(Kind kind, int index) {
     if (kind == REGISTER || kind == DOUBLE_REGISTER) DCHECK(index >= 0);
     value_ = KindField::encode(kind);
-    value_ |= index << KindField::kSize;
+    value_ |= bit_cast<unsigned>(index << KindField::kSize);
     DCHECK(this->index() == index);
   }
 
@@ -76,9 +73,9 @@ class InstructionOperand : public ZoneObject {
   static void TearDownCaches();
 
  protected:
-  typedef BitField<Kind, 0, 3> KindField;
+  typedef BitField64<Kind, 0, 3> KindField;
 
-  unsigned value_;
+  uint64_t value_;
 };
 
 typedef ZoneVector<InstructionOperand*> InstructionOperandVector;
@@ -120,6 +117,7 @@ class UnallocatedOperand : public InstructionOperand {
 
   explicit UnallocatedOperand(ExtendedPolicy policy)
       : InstructionOperand(UNALLOCATED, 0) {
+    value_ |= VirtualRegisterField::encode(kInvalidVirtualRegister);
     value_ |= BasicPolicyField::encode(EXTENDED_POLICY);
     value_ |= ExtendedPolicyField::encode(policy);
     value_ |= LifetimeField::encode(USED_AT_END);
@@ -128,14 +126,16 @@ class UnallocatedOperand : public InstructionOperand {
   UnallocatedOperand(BasicPolicy policy, int index)
       : InstructionOperand(UNALLOCATED, 0) {
     DCHECK(policy == FIXED_SLOT);
+    value_ |= VirtualRegisterField::encode(kInvalidVirtualRegister);
     value_ |= BasicPolicyField::encode(policy);
-    value_ |= index << FixedSlotIndexField::kShift;
+    value_ |= static_cast<int64_t>(index) << FixedSlotIndexField::kShift;
     DCHECK(this->fixed_slot_index() == index);
   }
 
   UnallocatedOperand(ExtendedPolicy policy, int index)
       : InstructionOperand(UNALLOCATED, 0) {
     DCHECK(policy == FIXED_REGISTER || policy == FIXED_DOUBLE_REGISTER);
+    value_ |= VirtualRegisterField::encode(kInvalidVirtualRegister);
     value_ |= BasicPolicyField::encode(EXTENDED_POLICY);
     value_ |= ExtendedPolicyField::encode(policy);
     value_ |= LifetimeField::encode(USED_AT_END);
@@ -144,6 +144,7 @@ class UnallocatedOperand : public InstructionOperand {
 
   UnallocatedOperand(ExtendedPolicy policy, Lifetime lifetime)
       : InstructionOperand(UNALLOCATED, 0) {
+    value_ |= VirtualRegisterField::encode(kInvalidVirtualRegister);
     value_ |= BasicPolicyField::encode(EXTENDED_POLICY);
     value_ |= ExtendedPolicyField::encode(policy);
     value_ |= LifetimeField::encode(lifetime);
@@ -181,24 +182,25 @@ class UnallocatedOperand : public InstructionOperand {
   //     +------------------------------------------+    P ... Policy
   //
   // The slot index is a signed value which requires us to decode it manually
-  // instead of using the BitField utility class.
+  // instead of using the BitField64 utility class.
 
   // The superclass has a KindField.
   STATIC_ASSERT(KindField::kSize == 3);
 
   // BitFields for all unallocated operands.
-  class BasicPolicyField : public BitField<BasicPolicy, 3, 1> {};
-  class VirtualRegisterField : public BitField<unsigned, 4, 18> {};
+  class BasicPolicyField : public BitField64<BasicPolicy, 3, 1> {};
+  class VirtualRegisterField : public BitField64<unsigned, 4, 30> {};
 
   // BitFields specific to BasicPolicy::FIXED_SLOT.
-  class FixedSlotIndexField : public BitField<int, 22, 10> {};
+  class FixedSlotIndexField : public BitField64<int, 34, 30> {};
 
   // BitFields specific to BasicPolicy::EXTENDED_POLICY.
-  class ExtendedPolicyField : public BitField<ExtendedPolicy, 22, 3> {};
-  class LifetimeField : public BitField<Lifetime, 25, 1> {};
-  class FixedRegisterField : public BitField<int, 26, 6> {};
+  class ExtendedPolicyField : public BitField64<ExtendedPolicy, 34, 3> {};
+  class LifetimeField : public BitField64<Lifetime, 37, 1> {};
+  class FixedRegisterField : public BitField64<int, 38, 6> {};
 
-  static const int kMaxVirtualRegisters = VirtualRegisterField::kMax + 1;
+  static const int kInvalidVirtualRegister = VirtualRegisterField::kMax;
+  static const int kMaxVirtualRegisters = VirtualRegisterField::kMax;
   static const int kFixedSlotIndexWidth = FixedSlotIndexField::kSize;
   static const int kMaxFixedSlotIndex = (1 << (kFixedSlotIndexWidth - 1)) - 1;
   static const int kMinFixedSlotIndex = -(1 << (kFixedSlotIndexWidth - 1));
@@ -242,7 +244,8 @@ class UnallocatedOperand : public InstructionOperand {
   // [fixed_slot_index]: Only for FIXED_SLOT.
   int fixed_slot_index() const {
     DCHECK(HasFixedSlotPolicy());
-    return static_cast<int>(value_) >> FixedSlotIndexField::kShift;
+    return static_cast<int>(bit_cast<int64_t>(value_) >>
+                            FixedSlotIndexField::kShift);
   }
 
   // [fixed_register_index]: Only for FIXED_REGISTER or FIXED_DOUBLE_REGISTER.
@@ -286,16 +289,12 @@ class MoveOperands FINAL {
   }
 
   // A move is redundant if it's been eliminated, if its source and
-  // destination are the same, or if its destination is unneeded or constant.
+  // destination are the same, or if its destination is  constant.
   bool IsRedundant() const {
-    return IsEliminated() || source_->Equals(destination_) || IsIgnored() ||
+    return IsEliminated() || source_->Equals(destination_) ||
            (destination_ != NULL && destination_->IsConstant());
   }
 
-  bool IsIgnored() const {
-    return destination_ != NULL && destination_->IsIgnored();
-  }
-
   // We clear both operands to indicate move that's been eliminated.
   void Eliminate() { source_ = destination_ = NULL; }
   bool IsEliminated() const {
@@ -332,13 +331,18 @@ class SubKindOperand FINAL : public InstructionOperand {
     return reinterpret_cast<SubKindOperand*>(op);
   }
 
+  static const SubKindOperand* cast(const InstructionOperand* op) {
+    DCHECK(op->kind() == kOperandKind);
+    return reinterpret_cast<const SubKindOperand*>(op);
+  }
+
   static void SetUpCache();
   static void TearDownCache();
 
  private:
   static SubKindOperand* cache;
 
-  SubKindOperand() : InstructionOperand() {}
+  SubKindOperand() : InstructionOperand(kOperandKind, 0) {}  // For the caches.
   explicit SubKindOperand(int index)
       : InstructionOperand(kOperandKind, index) {}
 };
@@ -431,6 +435,10 @@ class Instruction : public ZoneObject {
     DCHECK(i < InputCount());
     return operands_[OutputCount() + i];
   }
+  void SetInputAt(size_t i, InstructionOperand* operand) {
+    DCHECK(i < InputCount());
+    operands_[OutputCount() + i] = operand;
+  }
 
   size_t TempCount() const { return TempCountField::decode(bit_field_); }
   InstructionOperand* TempAt(size_t i) const {
@@ -510,6 +518,17 @@ class Instruction : public ZoneObject {
 
   void operator delete(void* pointer, void* location) { UNREACHABLE(); }
 
+  void OverwriteWithNop() {
+    opcode_ = ArchOpcodeField::encode(kArchNop);
+    bit_field_ = 0;
+    pointer_map_ = NULL;
+  }
+
+  bool IsNop() const {
+    return arch_opcode() == kArchNop && InputCount() == 0 &&
+           OutputCount() == 0 && TempCount() == 0;
+  }
+
  protected:
   explicit Instruction(InstructionCode opcode)
       : opcode_(opcode),
@@ -585,6 +604,14 @@ class GapInstruction : public Instruction {
     return parallel_moves_[pos];
   }
 
+  const ParallelMove* GetParallelMove(InnerPosition pos) const {
+    return parallel_moves_[pos];
+  }
+
+  bool IsRedundant() const;
+
+  ParallelMove** parallel_moves() { return parallel_moves_; }
+
   static GapInstruction* New(Zone* zone) {
     void* buffer = zone->New(sizeof(GapInstruction));
     return new (buffer) GapInstruction(kGapInstruction);
@@ -629,6 +656,11 @@ class BlockStartInstruction FINAL : public GapInstruction {
     return static_cast<BlockStartInstruction*>(instr);
   }
 
+  static const BlockStartInstruction* cast(const Instruction* instr) {
+    DCHECK(instr->IsBlockStart());
+    return static_cast<const BlockStartInstruction*>(instr);
+  }
+
  private:
   BlockStartInstruction() : GapInstruction(kBlockStartInstruction) {}
 };
@@ -673,7 +705,8 @@ class Constant FINAL {
     kFloat32,
     kFloat64,
     kExternalReference,
-    kHeapObject
+    kHeapObject,
+    kRpoNumber
   };
 
   explicit Constant(int32_t v) : type_(kInt32), value_(v) {}
@@ -684,6 +717,8 @@ class Constant FINAL {
       : type_(kExternalReference), value_(bit_cast<intptr_t>(ref)) {}
   explicit Constant(Handle<HeapObject> obj)
       : type_(kHeapObject), value_(bit_cast<intptr_t>(obj)) {}
+  explicit Constant(BasicBlock::RpoNumber rpo)
+      : type_(kRpoNumber), value_(rpo.ToInt()) {}
 
   Type type() const { return type_; }
 
@@ -716,6 +751,11 @@ class Constant FINAL {
     return bit_cast<ExternalReference>(static_cast<intptr_t>(value_));
   }
 
+  BasicBlock::RpoNumber ToRpoNumber() const {
+    DCHECK_EQ(kRpoNumber, type());
+    return BasicBlock::RpoNumber::FromInt(static_cast<int>(value_));
+  }
+
   Handle<HeapObject> ToHeapObject() const {
     DCHECK_EQ(kHeapObject, type());
     return bit_cast<Handle<HeapObject> >(static_cast<intptr_t>(value_));
@@ -768,19 +808,45 @@ class FrameStateDescriptor : public ZoneObject {
 std::ostream& operator<<(std::ostream& os, const Constant& constant);
 
 
-// TODO(dcarney): this is a temporary hack.  turn into an actual instruction.
 class PhiInstruction FINAL : public ZoneObject {
  public:
-  PhiInstruction(Zone* zone, int virtual_register)
-      : virtual_register_(virtual_register), operands_(zone) {}
+  typedef ZoneVector<InstructionOperand*> Inputs;
+
+  PhiInstruction(Zone* zone, int virtual_register, size_t reserved_input_count)
+      : virtual_register_(virtual_register),
+        operands_(zone),
+        output_(nullptr),
+        inputs_(zone) {
+    UnallocatedOperand* output =
+        new (zone) UnallocatedOperand(UnallocatedOperand::NONE);
+    output->set_virtual_register(virtual_register);
+    output_ = output;
+    inputs_.reserve(reserved_input_count);
+    operands_.reserve(reserved_input_count);
+  }
 
   int virtual_register() const { return virtual_register_; }
   const IntVector& operands() const { return operands_; }
-  IntVector& operands() { return operands_; }
+
+  void Extend(Zone* zone, int virtual_register) {
+    UnallocatedOperand* input =
+        new (zone) UnallocatedOperand(UnallocatedOperand::ANY);
+    input->set_virtual_register(virtual_register);
+    operands_.push_back(virtual_register);
+    inputs_.push_back(input);
+  }
+
+  InstructionOperand* output() const { return output_; }
+  const Inputs& inputs() const { return inputs_; }
+  Inputs& inputs() { return inputs_; }
 
  private:
+  // TODO(dcarney): some of these fields are only for verification, move them to
+  // verifier.
   const int virtual_register_;
   IntVector operands_;
+  InstructionOperand* output_;
+  Inputs inputs_;
 };
 
 
@@ -788,7 +854,6 @@ class PhiInstruction FINAL : public ZoneObject {
 class InstructionBlock FINAL : public ZoneObject {
  public:
   InstructionBlock(Zone* zone, BasicBlock::Id id,
-                   BasicBlock::RpoNumber ao_number,
                    BasicBlock::RpoNumber rpo_number,
                    BasicBlock::RpoNumber loop_header,
                    BasicBlock::RpoNumber loop_end, bool deferred);
@@ -840,13 +905,16 @@ class InstructionBlock FINAL : public ZoneObject {
   const PhiInstructions& phis() const { return phis_; }
   void AddPhi(PhiInstruction* phi) { phis_.push_back(phi); }
 
+  void set_ao_number(BasicBlock::RpoNumber ao_number) {
+    ao_number_ = ao_number;
+  }
+
  private:
   Successors successors_;
   Predecessors predecessors_;
   PhiInstructions phis_;
   const BasicBlock::Id id_;
-  const BasicBlock::RpoNumber ao_number_;  // Assembly order number.
-  // TODO(dcarney): probably dont't need this.
+  BasicBlock::RpoNumber ao_number_;  // Assembly order number.
   const BasicBlock::RpoNumber rpo_number_;
   const BasicBlock::RpoNumber loop_header_;
   const BasicBlock::RpoNumber loop_end_;
@@ -870,10 +938,12 @@ struct PrintableInstructionSequence;
 // Represents architecture-specific generated code before, during, and after
 // register allocation.
 // TODO(titzer): s/IsDouble/IsFloat64/
-class InstructionSequence FINAL {
+class InstructionSequence FINAL : public ZoneObject {
  public:
   static InstructionBlocks* InstructionBlocksFor(Zone* zone,
                                                  const Schedule* schedule);
+  // Puts the deferred blocks last.
+  static void ComputeAssemblyOrder(InstructionBlocks* blocks);
 
   InstructionSequence(Zone* zone, InstructionBlocks* instruction_blocks);
 
@@ -912,11 +982,12 @@ class InstructionSequence FINAL {
 
   void AddGapMove(int index, InstructionOperand* from, InstructionOperand* to);
 
-  BlockStartInstruction* GetBlockStart(BasicBlock::RpoNumber rpo);
+  BlockStartInstruction* GetBlockStart(BasicBlock::RpoNumber rpo) const;
 
   typedef InstructionDeque::const_iterator const_iterator;
   const_iterator begin() const { return instructions_.begin(); }
   const_iterator end() const { return instructions_.end(); }
+  const InstructionDeque& instructions() const { return instructions_; }
 
   GapInstruction* GapAt(int index) const {
     return GapInstruction::cast(InstructionAt(index));
@@ -938,6 +1009,8 @@ class InstructionSequence FINAL {
   void EndBlock(BasicBlock::RpoNumber rpo);
 
   int AddConstant(int virtual_register, Constant constant) {
+    // TODO(titzer): allow RPO numbers as constants?
+    DCHECK(constant.type() != Constant::kRpoNumber);
     DCHECK(virtual_register >= 0 && virtual_register < next_virtual_register_);
     DCHECK(constants_.find(virtual_register) == constants_.end());
     constants_.insert(std::make_pair(virtual_register, constant));
@@ -950,8 +1023,8 @@ class InstructionSequence FINAL {
     return it->second;
   }
 
-  typedef ConstantDeque Immediates;
-  const Immediates& immediates() const { return immediates_; }
+  typedef ZoneVector<Constant> Immediates;
+  Immediates& immediates() { return immediates_; }
 
   int AddImmediate(Constant constant) {
     int index = static_cast<int>(immediates_.size());
@@ -978,6 +1051,13 @@ class InstructionSequence FINAL {
   FrameStateDescriptor* GetFrameStateDescriptor(StateId deoptimization_id);
   int GetFrameStateDescriptorCount();
 
+  BasicBlock::RpoNumber InputRpo(Instruction* instr, size_t index) {
+    InstructionOperand* operand = instr->InputAt(index);
+    Constant constant = operand->IsImmediate() ? GetImmediate(operand->index())
+                                               : GetConstant(operand->index());
+    return constant.ToRpoNumber();
+  }
+
  private:
   friend std::ostream& operator<<(std::ostream& os,
                                   const PrintableInstructionSequence& code);
@@ -988,13 +1068,15 @@ class InstructionSequence FINAL {
   InstructionBlocks* const instruction_blocks_;
   IntVector block_starts_;
   ConstantMap constants_;
-  ConstantDeque immediates_;
+  Immediates immediates_;
   InstructionDeque instructions_;
   int next_virtual_register_;
   PointerMapDeque pointer_maps_;
   VirtualRegisterSet doubles_;
   VirtualRegisterSet references_;
   DeoptimizationVector deoptimization_entries_;
+
+  DISALLOW_COPY_AND_ASSIGN(InstructionSequence);
 };
 
 
index 28580a1..263b0fe 100644 (file)
@@ -5,6 +5,7 @@
 #include "src/compiler/diamond.h"
 #include "src/compiler/graph-inl.h"
 #include "src/compiler/js-builtin-reducer.h"
+#include "src/compiler/js-graph.h"
 #include "src/compiler/node-matchers.h"
 #include "src/compiler/node-properties-inl.h"
 #include "src/types.h"
@@ -96,6 +97,10 @@ class JSCallReduction {
 };
 
 
+JSBuiltinReducer::JSBuiltinReducer(JSGraph* jsgraph)
+    : jsgraph_(jsgraph), simplified_(jsgraph->zone()) {}
+
+
 // ECMA-262, section 15.8.2.1.
 Reduction JSBuiltinReducer::ReduceMathAbs(Node* node) {
   JSCallReduction r(node);
@@ -232,6 +237,19 @@ Reduction JSBuiltinReducer::Reduce(Node* node) {
   return NoChange();
 }
 
+
+Graph* JSBuiltinReducer::graph() const { return jsgraph()->graph(); }
+
+
+CommonOperatorBuilder* JSBuiltinReducer::common() const {
+  return jsgraph()->common();
+}
+
+
+MachineOperatorBuilder* JSBuiltinReducer::machine() const {
+  return jsgraph()->machine();
+}
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
index 4b3be29..ac6f266 100644 (file)
@@ -6,30 +6,26 @@
 #define V8_COMPILER_JS_BUILTIN_REDUCER_H_
 
 #include "src/compiler/graph-reducer.h"
-#include "src/compiler/js-graph.h"
-#include "src/compiler/machine-operator.h"
-#include "src/compiler/node.h"
 #include "src/compiler/simplified-operator.h"
 
 namespace v8 {
 namespace internal {
 namespace compiler {
 
+// Forward declarations.
+class CommonOperatorBuilder;
+class JSGraph;
+class MachineOperatorBuilder;
+
+
 class JSBuiltinReducer FINAL : public Reducer {
  public:
-  explicit JSBuiltinReducer(JSGraph* jsgraph)
-      : jsgraph_(jsgraph), simplified_(jsgraph->zone()) {}
-  virtual ~JSBuiltinReducer() {}
+  explicit JSBuiltinReducer(JSGraph* jsgraph);
+  ~JSBuiltinReducer() FINAL {}
 
-  virtual Reduction Reduce(Node* node) OVERRIDE;
+  Reduction Reduce(Node* node) FINAL;
 
  private:
-  JSGraph* jsgraph() const { return jsgraph_; }
-  Graph* graph() const { return jsgraph_->graph(); }
-  CommonOperatorBuilder* common() const { return jsgraph_->common(); }
-  MachineOperatorBuilder* machine() const { return jsgraph_->machine(); }
-  SimplifiedOperatorBuilder* simplified() { return &simplified_; }
-
   Reduction ReduceMathAbs(Node* node);
   Reduction ReduceMathSqrt(Node* node);
   Reduction ReduceMathMax(Node* node);
@@ -38,6 +34,12 @@ class JSBuiltinReducer FINAL : public Reducer {
   Reduction ReduceMathFloor(Node* node);
   Reduction ReduceMathCeil(Node* node);
 
+  JSGraph* jsgraph() const { return jsgraph_; }
+  Graph* graph() const;
+  CommonOperatorBuilder* common() const;
+  MachineOperatorBuilder* machine() const;
+  SimplifiedOperatorBuilder* simplified() { return &simplified_; }
+
   JSGraph* jsgraph_;
   SimplifiedOperatorBuilder simplified_;
 };
index 8c4bb17..a700b47 100644 (file)
@@ -2,12 +2,12 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include "src/compiler/js-context-specialization.h"
+
+#include "src/compiler.h"
 #include "src/compiler/common-operator.h"
-#include "src/compiler/generic-node-inl.h"
 #include "src/compiler/graph-inl.h"
-#include "src/compiler/js-context-specialization.h"
 #include "src/compiler/js-operator.h"
-#include "src/compiler/node-aux-data-inl.h"
 #include "src/compiler/node-matchers.h"
 #include "src/compiler/node-properties-inl.h"
 
@@ -15,45 +15,19 @@ namespace v8 {
 namespace internal {
 namespace compiler {
 
-class ContextSpecializationVisitor : public NullNodeVisitor {
- public:
-  explicit ContextSpecializationVisitor(JSContextSpecializer* spec)
-      : spec_(spec) {}
-
-  void Post(Node* node) {
-    switch (node->opcode()) {
-      case IrOpcode::kJSLoadContext: {
-        Reduction r = spec_->ReduceJSLoadContext(node);
-        if (r.Changed() && r.replacement() != node) {
-          NodeProperties::ReplaceWithValue(node, r.replacement());
-          node->RemoveAllInputs();
-        }
-        break;
-      }
-      case IrOpcode::kJSStoreContext: {
-        Reduction r = spec_->ReduceJSStoreContext(node);
-        if (r.Changed() && r.replacement() != node) {
-          NodeProperties::ReplaceWithValue(node, r.replacement());
-          node->RemoveAllInputs();
-        }
-        break;
-      }
-      default:
-        break;
-    }
+Reduction JSContextSpecializer::Reduce(Node* node) {
+  if (node == context_) {
+    Node* constant = jsgraph_->Constant(info_->context());
+    NodeProperties::ReplaceWithValue(node, constant);
+    return Replace(constant);
   }
-
- private:
-  JSContextSpecializer* spec_;
-};
-
-
-void JSContextSpecializer::SpecializeToContext() {
-  NodeProperties::ReplaceWithValue(context_,
-                                   jsgraph_->Constant(info_->context()));
-
-  ContextSpecializationVisitor visitor(this);
-  jsgraph_->graph()->VisitNodeInputsFromEnd(&visitor);
+  if (node->opcode() == IrOpcode::kJSLoadContext) {
+    return ReduceJSLoadContext(node);
+  }
+  if (node->opcode() == IrOpcode::kJSStoreContext) {
+    return ReduceJSStoreContext(node);
+  }
+  return NoChange();
 }
 
 
@@ -63,7 +37,7 @@ Reduction JSContextSpecializer::ReduceJSLoadContext(Node* node) {
   HeapObjectMatcher<Context> m(NodeProperties::GetValueInput(node, 0));
   // If the context is not constant, no reduction can occur.
   if (!m.HasValue()) {
-    return Reducer::NoChange();
+    return NoChange();
   }
 
   const ContextAccess& access = ContextAccessOf(node->op());
@@ -78,14 +52,14 @@ Reduction JSContextSpecializer::ReduceJSLoadContext(Node* node) {
   if (!access.immutable()) {
     // The access does not have to look up a parent, nothing to fold.
     if (access.depth() == 0) {
-      return Reducer::NoChange();
+      return NoChange();
     }
     const Operator* op = jsgraph_->javascript()->LoadContext(
         0, access.index(), access.immutable());
     node->set_op(op);
     Handle<Object> context_handle = Handle<Object>(context, info_->isolate());
     node->ReplaceInput(0, jsgraph_->Constant(context_handle));
-    return Reducer::Changed(node);
+    return Changed(node);
   }
   Handle<Object> value = Handle<Object>(
       context->get(static_cast<int>(access.index())), info_->isolate());
@@ -95,13 +69,15 @@ Reduction JSContextSpecializer::ReduceJSLoadContext(Node* node) {
   // We must be conservative and check if the value in the slot is currently the
   // hole or undefined. If it is neither of these, then it must be initialized.
   if (value->IsUndefined() || value->IsTheHole()) {
-    return Reducer::NoChange();
+    return NoChange();
   }
 
   // Success. The context load can be replaced with the constant.
   // TODO(titzer): record the specialization for sharing code across multiple
   // contexts that have the same value in the corresponding context slot.
-  return Reducer::Replace(jsgraph_->Constant(value));
+  Node* constant = jsgraph_->Constant(value);
+  NodeProperties::ReplaceWithValue(node, constant);
+  return Replace(constant);
 }
 
 
@@ -111,14 +87,14 @@ Reduction JSContextSpecializer::ReduceJSStoreContext(Node* node) {
   HeapObjectMatcher<Context> m(NodeProperties::GetValueInput(node, 0));
   // If the context is not constant, no reduction can occur.
   if (!m.HasValue()) {
-    return Reducer::NoChange();
+    return NoChange();
   }
 
   const ContextAccess& access = ContextAccessOf(node->op());
 
   // The access does not have to look up a parent, nothing to fold.
   if (access.depth() == 0) {
-    return Reducer::NoChange();
+    return NoChange();
   }
 
   // Find the right parent context.
@@ -132,7 +108,7 @@ Reduction JSContextSpecializer::ReduceJSStoreContext(Node* node) {
   Handle<Object> new_context_handle = Handle<Object>(context, info_->isolate());
   node->ReplaceInput(0, jsgraph_->Constant(new_context_handle));
 
-  return Reducer::Changed(node);
+  return Changed(node);
 }
 
 }  // namespace compiler
index b8b50ed..298d3a3 100644 (file)
@@ -16,12 +16,14 @@ namespace compiler {
 
 // Specializes a given JSGraph to a given context, potentially constant folding
 // some {LoadContext} nodes or strength reducing some {StoreContext} nodes.
-class JSContextSpecializer {
+class JSContextSpecializer : public Reducer {
  public:
   JSContextSpecializer(CompilationInfo* info, JSGraph* jsgraph, Node* context)
       : info_(info), jsgraph_(jsgraph), context_(context) {}
 
-  void SpecializeToContext();
+  Reduction Reduce(Node* node) OVERRIDE;
+
+  // Visible for unit testing.
   Reduction ReduceJSLoadContext(Node* node);
   Reduction ReduceJSStoreContext(Node* node);
 
index fb18ba1..4886442 100644 (file)
@@ -33,40 +33,25 @@ void JSGenericLowering::PatchInsertInput(Node* node, int index, Node* input) {
 }
 
 
-Node* JSGenericLowering::SmiConstant(int32_t immediate) {
-  return jsgraph()->SmiConstant(immediate);
-}
-
-
-Node* JSGenericLowering::Int32Constant(int immediate) {
-  return jsgraph()->Int32Constant(immediate);
-}
-
-
-Node* JSGenericLowering::CodeConstant(Handle<Code> code) {
-  return jsgraph()->HeapConstant(code);
-}
-
-
-Node* JSGenericLowering::FunctionConstant(Handle<JSFunction> function) {
-  return jsgraph()->HeapConstant(function);
-}
-
-
-Node* JSGenericLowering::ExternalConstant(ExternalReference ref) {
-  return jsgraph()->ExternalConstant(ref);
-}
-
-
 Reduction JSGenericLowering::Reduce(Node* node) {
   switch (node->opcode()) {
-#define DECLARE_CASE(x) \
-  case IrOpcode::k##x:  \
-    Lower##x(node);     \
-    break;
-    DECLARE_CASE(Branch)
+#define DECLARE_CASE(x)  \
+    case IrOpcode::k##x: \
+      Lower##x(node);    \
+      break;
     JS_OP_LIST(DECLARE_CASE)
 #undef DECLARE_CASE
+    case IrOpcode::kBranch:
+      // TODO(mstarzinger): If typing is enabled then simplified lowering will
+      // have inserted the correct ChangeBoolToBit, otherwise we need to perform
+      // poor-man's representation inference here and insert manual change.
+      if (!info()->is_typing_enabled()) {
+        Node* test = graph()->NewNode(machine()->WordEqual(), node->InputAt(0),
+                                      jsgraph()->TrueConstant());
+        node->ReplaceInput(0, test);
+        break;
+      }
+      // Fall-through.
     default:
       // Nothing to see.
       return NoChange();
@@ -94,18 +79,18 @@ REPLACE_BINARY_OP_IC_CALL(JSModulus, Token::MOD)
 #undef REPLACE_BINARY_OP_IC_CALL
 
 
-#define REPLACE_COMPARE_IC_CALL(op, token, pure)  \
+#define REPLACE_COMPARE_IC_CALL(op, token)        \
   void JSGenericLowering::Lower##op(Node* node) { \
-    ReplaceWithCompareIC(node, token, pure);      \
+    ReplaceWithCompareIC(node, token);            \
   }
-REPLACE_COMPARE_IC_CALL(JSEqual, Token::EQ, false)
-REPLACE_COMPARE_IC_CALL(JSNotEqual, Token::NE, false)
-REPLACE_COMPARE_IC_CALL(JSStrictEqual, Token::EQ_STRICT, true)
-REPLACE_COMPARE_IC_CALL(JSStrictNotEqual, Token::NE_STRICT, true)
-REPLACE_COMPARE_IC_CALL(JSLessThan, Token::LT, false)
-REPLACE_COMPARE_IC_CALL(JSGreaterThan, Token::GT, false)
-REPLACE_COMPARE_IC_CALL(JSLessThanOrEqual, Token::LTE, false)
-REPLACE_COMPARE_IC_CALL(JSGreaterThanOrEqual, Token::GTE, false)
+REPLACE_COMPARE_IC_CALL(JSEqual, Token::EQ)
+REPLACE_COMPARE_IC_CALL(JSNotEqual, Token::NE)
+REPLACE_COMPARE_IC_CALL(JSStrictEqual, Token::EQ_STRICT)
+REPLACE_COMPARE_IC_CALL(JSStrictNotEqual, Token::NE_STRICT)
+REPLACE_COMPARE_IC_CALL(JSLessThan, Token::LT)
+REPLACE_COMPARE_IC_CALL(JSGreaterThan, Token::GT)
+REPLACE_COMPARE_IC_CALL(JSLessThanOrEqual, Token::LTE)
+REPLACE_COMPARE_IC_CALL(JSGreaterThanOrEqual, Token::GTE)
 #undef REPLACE_COMPARE_IC_CALL
 
 
@@ -120,7 +105,7 @@ REPLACE_RUNTIME_CALL(JSCreateCatchContext, Runtime::kPushCatchContext)
 REPLACE_RUNTIME_CALL(JSCreateWithContext, Runtime::kPushWithContext)
 REPLACE_RUNTIME_CALL(JSCreateBlockContext, Runtime::kPushBlockContext)
 REPLACE_RUNTIME_CALL(JSCreateModuleContext, Runtime::kPushModuleContext)
-REPLACE_RUNTIME_CALL(JSCreateGlobalContext, Runtime::kAbort)
+REPLACE_RUNTIME_CALL(JSCreateScriptContext, Runtime::kAbort)
 #undef REPLACE_RUNTIME
 
 
@@ -141,8 +126,7 @@ static CallDescriptor::Flags FlagsForNode(Node* node) {
 }
 
 
-void JSGenericLowering::ReplaceWithCompareIC(Node* node, Token::Value token,
-                                             bool pure) {
+void JSGenericLowering::ReplaceWithCompareIC(Node* node, Token::Value token) {
   Callable callable = CodeFactory::CompareIC(isolate(), token);
   bool has_frame_state = OperatorProperties::HasFrameStateInput(node->op());
   CallDescriptor* desc_compare = linkage()->GetStubCallDescriptor(
@@ -150,11 +134,11 @@ void JSGenericLowering::ReplaceWithCompareIC(Node* node, Token::Value token,
       CallDescriptor::kPatchableCallSiteWithNop | FlagsForNode(node));
   NodeVector inputs(zone());
   inputs.reserve(node->InputCount() + 1);
-  inputs.push_back(CodeConstant(callable.code()));
+  inputs.push_back(jsgraph()->HeapConstant(callable.code()));
   inputs.push_back(NodeProperties::GetValueInput(node, 0));
   inputs.push_back(NodeProperties::GetValueInput(node, 1));
   inputs.push_back(NodeProperties::GetContextInput(node));
-  if (pure) {
+  if (node->op()->HasProperty(Operator::kPure)) {
     // A pure (strict) comparison doesn't have an effect, control or frame
     // state.  But for the graph, we need to add control and effect inputs.
     DCHECK(!has_frame_state);
@@ -173,7 +157,7 @@ void JSGenericLowering::ReplaceWithCompareIC(Node* node, Token::Value token,
                        static_cast<int>(inputs.size()), &inputs.front());
 
   node->ReplaceInput(0, compare);
-  node->ReplaceInput(1, SmiConstant(token));
+  node->ReplaceInput(1, jsgraph()->SmiConstant(token));
 
   if (has_frame_state) {
     // Remove the frame state from inputs.
@@ -186,9 +170,10 @@ void JSGenericLowering::ReplaceWithCompareIC(Node* node, Token::Value token,
 
 void JSGenericLowering::ReplaceWithStubCall(Node* node, Callable callable,
                                             CallDescriptor::Flags flags) {
+  Operator::Properties properties = node->op()->properties();
   CallDescriptor* desc = linkage()->GetStubCallDescriptor(
-      callable.descriptor(), 0, flags | FlagsForNode(node));
-  Node* stub_code = CodeConstant(callable.code());
+      callable.descriptor(), 0, flags | FlagsForNode(node), properties);
+  Node* stub_code = jsgraph()->HeapConstant(callable.code());
   PatchInsertInput(node, 0, stub_code);
   PatchOperator(node, common()->Call(desc));
 }
@@ -197,16 +182,17 @@ void JSGenericLowering::ReplaceWithStubCall(Node* node, Callable callable,
 void JSGenericLowering::ReplaceWithBuiltinCall(Node* node,
                                                Builtins::JavaScript id,
                                                int nargs) {
+  Operator::Properties properties = node->op()->properties();
   Callable callable =
       CodeFactory::CallFunction(isolate(), nargs - 1, NO_CALL_FUNCTION_FLAGS);
   CallDescriptor* desc = linkage()->GetStubCallDescriptor(
-      callable.descriptor(), nargs, FlagsForNode(node));
+      callable.descriptor(), nargs, FlagsForNode(node), properties);
   // TODO(mstarzinger): Accessing the builtins object this way prevents sharing
   // of code across native contexts. Fix this by loading from given context.
   Handle<JSFunction> function(
       JSFunction::cast(info()->context()->builtins()->javascript_builtin(id)));
-  Node* stub_code = CodeConstant(callable.code());
-  Node* function_node = FunctionConstant(function);
+  Node* stub_code = jsgraph()->HeapConstant(callable.code());
+  Node* function_node = jsgraph()->HeapConstant(function);
   PatchInsertInput(node, 0, stub_code);
   PatchInsertInput(node, 1, function_node);
   PatchOperator(node, common()->Call(desc));
@@ -221,27 +207,15 @@ void JSGenericLowering::ReplaceWithRuntimeCall(Node* node,
   int nargs = (nargs_override < 0) ? fun->nargs : nargs_override;
   CallDescriptor* desc =
       linkage()->GetRuntimeCallDescriptor(f, nargs, properties);
-  Node* ref = ExternalConstant(ExternalReference(f, isolate()));
-  Node* arity = Int32Constant(nargs);
-  PatchInsertInput(node, 0, jsgraph()->CEntryStubConstant());
+  Node* ref = jsgraph()->ExternalConstant(ExternalReference(f, isolate()));
+  Node* arity = jsgraph()->Int32Constant(nargs);
+  PatchInsertInput(node, 0, jsgraph()->CEntryStubConstant(fun->result_size));
   PatchInsertInput(node, nargs + 1, ref);
   PatchInsertInput(node, nargs + 2, arity);
   PatchOperator(node, common()->Call(desc));
 }
 
 
-void JSGenericLowering::LowerBranch(Node* node) {
-  if (!info()->is_typing_enabled()) {
-    // TODO(mstarzinger): If typing is enabled then simplified lowering will
-    // have inserted the correct ChangeBoolToBit, otherwise we need to perform
-    // poor-man's representation inference here and insert manual change.
-    Node* test = graph()->NewNode(machine()->WordEqual(), node->InputAt(0),
-                                  jsgraph()->TrueConstant());
-    node->ReplaceInput(0, test);
-  }
-}
-
-
 void JSGenericLowering::LowerJSUnaryNot(Node* node) {
   Callable callable = CodeFactory::ToBoolean(
       isolate(), ToBooleanStub::RESULT_AS_INVERSE_ODDBALL);
@@ -313,7 +287,7 @@ void JSGenericLowering::LowerJSStoreNamed(Node* node) {
 
 void JSGenericLowering::LowerJSDeleteProperty(Node* node) {
   StrictMode strict_mode = OpParameter<StrictMode>(node);
-  PatchInsertInput(node, 2, SmiConstant(strict_mode));
+  PatchInsertInput(node, 2, jsgraph()->SmiConstant(strict_mode));
   ReplaceWithBuiltinCall(node, Builtins::DELETE, 3);
 }
 
@@ -331,7 +305,7 @@ void JSGenericLowering::LowerJSInstanceOf(Node* node) {
   CallInterfaceDescriptor d = stub.GetCallInterfaceDescriptor();
   CallDescriptor* desc =
       linkage()->GetStubCallDescriptor(d, 0, FlagsForNode(node));
-  Node* stub_code = CodeConstant(stub.GetCode());
+  Node* stub_code = jsgraph()->HeapConstant(stub.GetCode());
   PatchInsertInput(node, 0, stub_code);
   PatchOperator(node, common()->Call(desc));
 }
@@ -341,14 +315,15 @@ void JSGenericLowering::LowerJSLoadContext(Node* node) {
   const ContextAccess& access = ContextAccessOf(node->op());
   for (size_t i = 0; i < access.depth(); ++i) {
     node->ReplaceInput(
-        0, graph()->NewNode(
-               machine()->Load(kMachAnyTagged),
-               NodeProperties::GetValueInput(node, 0),
-               Int32Constant(Context::SlotOffset(Context::PREVIOUS_INDEX)),
-               NodeProperties::GetEffectInput(node), graph()->start()));
+        0, graph()->NewNode(machine()->Load(kMachAnyTagged),
+                            NodeProperties::GetValueInput(node, 0),
+                            jsgraph()->Int32Constant(
+                                Context::SlotOffset(Context::PREVIOUS_INDEX)),
+                            NodeProperties::GetEffectInput(node),
+                            graph()->start()));
   }
-  node->ReplaceInput(
-      1, Int32Constant(Context::SlotOffset(static_cast<int>(access.index()))));
+  node->ReplaceInput(1, jsgraph()->Int32Constant(Context::SlotOffset(
+                            static_cast<int>(access.index()))));
   node->AppendInput(zone(), graph()->start());
   PatchOperator(node, machine()->Load(kMachAnyTagged));
 }
@@ -358,15 +333,16 @@ void JSGenericLowering::LowerJSStoreContext(Node* node) {
   const ContextAccess& access = ContextAccessOf(node->op());
   for (size_t i = 0; i < access.depth(); ++i) {
     node->ReplaceInput(
-        0, graph()->NewNode(
-               machine()->Load(kMachAnyTagged),
-               NodeProperties::GetValueInput(node, 0),
-               Int32Constant(Context::SlotOffset(Context::PREVIOUS_INDEX)),
-               NodeProperties::GetEffectInput(node), graph()->start()));
+        0, graph()->NewNode(machine()->Load(kMachAnyTagged),
+                            NodeProperties::GetValueInput(node, 0),
+                            jsgraph()->Int32Constant(
+                                Context::SlotOffset(Context::PREVIOUS_INDEX)),
+                            NodeProperties::GetEffectInput(node),
+                            graph()->start()));
   }
   node->ReplaceInput(2, NodeProperties::GetValueInput(node, 1));
-  node->ReplaceInput(
-      1, Int32Constant(Context::SlotOffset(static_cast<int>(access.index()))));
+  node->ReplaceInput(1, jsgraph()->Int32Constant(Context::SlotOffset(
+                            static_cast<int>(access.index()))));
   PatchOperator(node, machine()->Store(StoreRepresentation(kMachAnyTagged,
                                                            kFullWriteBarrier)));
 }
@@ -378,10 +354,10 @@ void JSGenericLowering::LowerJSCallConstruct(Node* node) {
   CallInterfaceDescriptor d = stub.GetCallInterfaceDescriptor();
   CallDescriptor* desc =
       linkage()->GetStubCallDescriptor(d, arity, FlagsForNode(node));
-  Node* stub_code = CodeConstant(stub.GetCode());
+  Node* stub_code = jsgraph()->HeapConstant(stub.GetCode());
   Node* construct = NodeProperties::GetValueInput(node, 0);
   PatchInsertInput(node, 0, stub_code);
-  PatchInsertInput(node, 1, Int32Constant(arity - 1));
+  PatchInsertInput(node, 1, jsgraph()->Int32Constant(arity - 1));
   PatchInsertInput(node, 2, construct);
   PatchInsertInput(node, 3, jsgraph()->UndefinedConstant());
   PatchOperator(node, common()->Call(desc));
@@ -436,7 +412,7 @@ void JSGenericLowering::LowerJSCallFunction(Node* node) {
   CallInterfaceDescriptor d = stub.GetCallInterfaceDescriptor();
   CallDescriptor* desc = linkage()->GetStubCallDescriptor(
       d, static_cast<int>(p.arity() - 1), FlagsForNode(node));
-  Node* stub_code = CodeConstant(stub.GetCode());
+  Node* stub_code = jsgraph()->HeapConstant(stub.GetCode());
   PatchInsertInput(node, 0, stub_code);
   PatchOperator(node, common()->Call(desc));
 }
index 63d8e93..f626338 100644 (file)
@@ -22,37 +22,34 @@ class CommonOperatorBuilder;
 class MachineOperatorBuilder;
 class Linkage;
 
+
 // Lowers JS-level operators to runtime and IC calls in the "generic" case.
-class JSGenericLowering : public Reducer {
+class JSGenericLowering FINAL : public Reducer {
  public:
   JSGenericLowering(CompilationInfo* info, JSGraph* graph);
-  virtual ~JSGenericLowering() {}
+  ~JSGenericLowering() FINAL {}
 
-  virtual Reduction Reduce(Node* node);
+  Reduction Reduce(Node* node) FINAL;
 
  protected:
 #define DECLARE_LOWER(x) void Lower##x(Node* node);
   // Dispatched depending on opcode.
-  ALL_OP_LIST(DECLARE_LOWER)
+  JS_OP_LIST(DECLARE_LOWER)
 #undef DECLARE_LOWER
 
-  // Helpers to create new constant nodes.
-  Node* SmiConstant(int immediate);
-  Node* Int32Constant(int immediate);
-  Node* CodeConstant(Handle<Code> code);
-  Node* FunctionConstant(Handle<JSFunction> function);
-  Node* ExternalConstant(ExternalReference ref);
-
   // Helpers to patch existing nodes in the graph.
   void PatchOperator(Node* node, const Operator* new_op);
   void PatchInsertInput(Node* node, int index, Node* input);
 
   // Helpers to replace existing nodes with a generic call.
-  void ReplaceWithCompareIC(Node* node, Token::Value token, bool pure);
+  void ReplaceWithCompareIC(Node* node, Token::Value token);
   void ReplaceWithStubCall(Node* node, Callable c, CallDescriptor::Flags flags);
   void ReplaceWithBuiltinCall(Node* node, Builtins::JavaScript id, int args);
   void ReplaceWithRuntimeCall(Node* node, Runtime::FunctionId f, int args = -1);
 
+  // Helper for optimization of JSCallFunction.
+  bool TryLowerDirectJSCall(Node* node);
+
   Zone* zone() const { return graph()->zone(); }
   Isolate* isolate() const { return zone()->isolate(); }
   JSGraph* jsgraph() const { return jsgraph_; }
@@ -66,8 +63,6 @@ class JSGenericLowering : public Reducer {
   CompilationInfo* info_;
   JSGraph* jsgraph_;
   Linkage* linkage_;
-
-  bool TryLowerDirectJSCall(Node* node);
 };
 
 }  // namespace compiler
index da6d66d..7759ba1 100644 (file)
@@ -17,12 +17,16 @@ Node* JSGraph::ImmovableHeapConstant(Handle<HeapObject> object) {
 }
 
 
-Node* JSGraph::CEntryStubConstant() {
-  if (!c_entry_stub_constant_.is_set()) {
-    c_entry_stub_constant_.set(
-        ImmovableHeapConstant(CEntryStub(isolate(), 1).GetCode()));
+Node* JSGraph::CEntryStubConstant(int result_size) {
+  if (result_size == 1) {
+    if (!c_entry_stub_constant_.is_set()) {
+      c_entry_stub_constant_.set(
+          ImmovableHeapConstant(CEntryStub(isolate(), 1).GetCode()));
+    }
+    return c_entry_stub_constant_.get();
   }
-  return c_entry_stub_constant_.get();
+
+  return ImmovableHeapConstant(CEntryStub(isolate(), result_size).GetCode());
 }
 
 
@@ -170,8 +174,11 @@ Node* JSGraph::NumberConstant(double value) {
 
 
 Node* JSGraph::Float32Constant(float value) {
-  // TODO(turbofan): cache float32 constants.
-  return graph()->NewNode(common()->Float32Constant(value));
+  Node** loc = cache_.FindFloat32Constant(value);
+  if (*loc == NULL) {
+    *loc = graph()->NewNode(common()->Float32Constant(value));
+  }
+  return *loc;
 }
 
 
index 83e103d..040a745 100644 (file)
@@ -32,7 +32,7 @@ class JSGraph : public ZoneObject {
         cache_(zone()) {}
 
   // Canonicalized global constants.
-  Node* CEntryStubConstant();
+  Node* CEntryStubConstant(int result_size);
   Node* UndefinedConstant();
   Node* TheHoleConstant();
   Node* TrueConstant();
@@ -86,6 +86,10 @@ class JSGraph : public ZoneObject {
     return machine()->Is32() ? Int32Constant(static_cast<int32_t>(value))
                              : Int64Constant(static_cast<int64_t>(value));
   }
+  template <typename T>
+  Node* PointerConstant(T* value) {
+    return IntPtrConstant(bit_cast<intptr_t>(value));
+  }
 
   // Creates a Float32Constant node, usually canonicalized.
   Node* Float32Constant(float value);
@@ -101,12 +105,17 @@ class JSGraph : public ZoneObject {
     return Constant(immediate);
   }
 
+  // Creates a dummy Constant node, used to satisfy calling conventions of
+  // stubs and runtime functions that do not require a context.
+  Node* NoContextConstant() { return ZeroConstant(); }
+
   JSOperatorBuilder* javascript() { return javascript_; }
   CommonOperatorBuilder* common() { return common_; }
   MachineOperatorBuilder* machine() { return machine_; }
   Graph* graph() { return graph_; }
   Zone* zone() { return graph()->zone(); }
   Isolate* isolate() { return zone()->isolate(); }
+  Factory* factory() { return isolate()->factory(); }
 
   void GetCachedNodes(NodeVector* nodes);
 
@@ -132,8 +141,6 @@ class JSGraph : public ZoneObject {
   Node* ImmovableHeapConstant(Handle<HeapObject> value);
   Node* NumberConstant(double value);
 
-  Factory* factory() { return isolate()->factory(); }
-
   DISALLOW_COPY_AND_ASSIGN(JSGraph);
 };
 
index e3c4427..d143382 100644 (file)
@@ -7,7 +7,6 @@
 #include "src/compiler/access-builder.h"
 #include "src/compiler/ast-graph-builder.h"
 #include "src/compiler/common-operator.h"
-#include "src/compiler/generic-node-inl.h"
 #include "src/compiler/graph-inl.h"
 #include "src/compiler/graph-visualizer.h"
 #include "src/compiler/js-inlining.h"
@@ -87,7 +86,7 @@ class Inlinee {
   }
 
   // Counts JSFunction, Receiver, arguments, context but not effect, control.
-  size_t total_parameters() { return start_->op()->OutputCount(); }
+  size_t total_parameters() { return start_->op()->ValueOutputCount(); }
 
   // Counts only formal parameters.
   size_t formal_parameters() {
@@ -127,19 +126,17 @@ void Inlinee::UnifyReturn(JSGraph* jsgraph) {
   NodeVector effects(jsgraph->zone());
   // Iterate over all control flow predecessors,
   // which must be return statements.
-  InputIter iter = final_merge->inputs().begin();
-  while (iter != final_merge->inputs().end()) {
-    Node* input = *iter;
+  for (Edge edge : final_merge->input_edges()) {
+    Node* input = edge.to();
     switch (input->opcode()) {
       case IrOpcode::kReturn:
         values.push_back(NodeProperties::GetValueInput(input, 0));
         effects.push_back(NodeProperties::GetEffectInput(input));
-        iter.UpdateToAndIncrement(NodeProperties::GetControlInput(input));
+        edge.UpdateTo(NodeProperties::GetControlInput(input));
         input->RemoveAllInputs();
         break;
       default:
         UNREACHABLE();
-        ++iter;
         break;
     }
   }
@@ -168,9 +165,8 @@ class CopyVisitor : public NullNodeVisitor {
 
   void Post(Node* original) {
     NodeVector inputs(temp_zone_);
-    for (InputIter it = original->inputs().begin();
-         it != original->inputs().end(); ++it) {
-      inputs.push_back(GetCopy(*it));
+    for (Node* const node : original->inputs()) {
+      inputs.push_back(GetCopy(node));
     }
 
     // Reuse the operator in the copy. This assumes that op lives in a zone
@@ -209,11 +205,10 @@ class CopyVisitor : public NullNodeVisitor {
   }
 
   Node* GetSentinel(Node* original) {
-    Node* sentinel = sentinels_[original->id()];
-    if (sentinel == NULL) {
-      sentinel = target_graph_->NewNode(&sentinel_op_);
+    if (sentinels_[original->id()] == NULL) {
+      sentinels_[original->id()] = target_graph_->NewNode(&sentinel_op_);
     }
-    return sentinel;
+    return sentinels_[original->id()];
   }
 
   NodeVector copies_;
@@ -244,35 +239,33 @@ void Inlinee::InlineAtCall(JSGraph* jsgraph, Node* call) {
   // context, effect, control.
   int inliner_inputs = call->op()->ValueInputCount();
   // Iterate over all uses of the start node.
-  UseIter iter = start_->uses().begin();
-  while (iter != start_->uses().end()) {
-    Node* use = *iter;
+  for (Edge edge : start_->use_edges()) {
+    Node* use = edge.from();
     switch (use->opcode()) {
       case IrOpcode::kParameter: {
         int index = 1 + OpParameter<int>(use->op());
         if (index < inliner_inputs && index < inlinee_context_index) {
           // There is an input from the call, and the index is a value
           // projection but not the context, so rewire the input.
-          NodeProperties::ReplaceWithValue(*iter, call->InputAt(index));
+          NodeProperties::ReplaceWithValue(use, call->InputAt(index));
         } else if (index == inlinee_context_index) {
           // This is the context projection, rewire it to the context from the
           // JSFunction object.
-          NodeProperties::ReplaceWithValue(*iter, context);
+          NodeProperties::ReplaceWithValue(use, context);
         } else if (index < inlinee_context_index) {
           // Call has fewer arguments than required, fill with undefined.
-          NodeProperties::ReplaceWithValue(*iter, jsgraph->UndefinedConstant());
+          NodeProperties::ReplaceWithValue(use, jsgraph->UndefinedConstant());
         } else {
           // We got too many arguments, discard for now.
           // TODO(sigurds): Fix to treat arguments array correctly.
         }
-        ++iter;
         break;
       }
       default:
-        if (NodeProperties::IsEffectEdge(iter.edge())) {
-          iter.UpdateToAndIncrement(context);
-        } else if (NodeProperties::IsControlEdge(iter.edge())) {
-          iter.UpdateToAndIncrement(control);
+        if (NodeProperties::IsEffectEdge(edge)) {
+          edge.UpdateTo(context);
+        } else if (NodeProperties::IsControlEdge(edge)) {
+          edge.UpdateTo(control);
         } else {
           UNREACHABLE();
         }
@@ -406,19 +399,21 @@ void JSInliner::TryInlineJSCall(Node* call_node) {
 
   Inlinee inlinee(visitor.GetCopy(graph.start()), visitor.GetCopy(graph.end()));
 
-  Node* outer_frame_state = call.frame_state();
-  // Insert argument adaptor frame if required.
-  if (call.formal_arguments() != inlinee.formal_parameters()) {
-    outer_frame_state =
-        CreateArgumentsAdaptorFrameState(&call, function, info.zone());
-  }
+  if (FLAG_turbo_deoptimization) {
+    Node* outer_frame_state = call.frame_state();
+    // Insert argument adaptor frame if required.
+    if (call.formal_arguments() != inlinee.formal_parameters()) {
+      outer_frame_state =
+          CreateArgumentsAdaptorFrameState(&call, function, info.zone());
+    }
 
-  for (NodeVectorConstIter it = visitor.copies().begin();
-       it != visitor.copies().end(); ++it) {
-    Node* node = *it;
-    if (node != NULL && node->opcode() == IrOpcode::kFrameState) {
-      AddClosureToFrameState(node, function);
-      NodeProperties::ReplaceFrameStateInput(node, outer_frame_state);
+    for (NodeVectorConstIter it = visitor.copies().begin();
+         it != visitor.copies().end(); ++it) {
+      Node* node = *it;
+      if (node != NULL && node->opcode() == IrOpcode::kFrameState) {
+        AddClosureToFrameState(node, function);
+        NodeProperties::ReplaceFrameStateInput(node, outer_frame_state);
+      }
     }
   }
 
@@ -455,9 +450,8 @@ class JSCallRuntimeAccessor {
 
   NodeVector inputs(Zone* zone) const {
     NodeVector inputs(zone);
-    for (InputIter it = call_->inputs().begin(); it != call_->inputs().end();
-         ++it) {
-      inputs.push_back(*it);
+    for (Node* const node : call_->inputs()) {
+      inputs.push_back(node);
     }
     return inputs;
   }
index d4c0dcf..80b6968 100644 (file)
@@ -5,7 +5,6 @@
 #include "src/compiler/access-builder.h"
 #include "src/compiler/common-operator.h"
 #include "src/compiler/diamond.h"
-#include "src/compiler/generic-node-inl.h"
 #include "src/compiler/js-intrinsic-builder.h"
 #include "src/compiler/js-operator.h"
 #include "src/compiler/simplified-operator.h"
index 12473c1..aa76a3b 100644 (file)
@@ -228,8 +228,8 @@ const StoreNamedParameters& StoreNamedParametersOf(const Operator* op) {
   V(Multiply, Operator::kNoProperties, 2, 1)              \
   V(Divide, Operator::kNoProperties, 2, 1)                \
   V(Modulus, Operator::kNoProperties, 2, 1)               \
-  V(UnaryNot, Operator::kNoProperties, 1, 1)              \
-  V(ToBoolean, Operator::kNoProperties, 1, 1)             \
+  V(UnaryNot, Operator::kPure, 1, 1)                      \
+  V(ToBoolean, Operator::kPure, 1, 1)                     \
   V(ToNumber, Operator::kNoProperties, 1, 1)              \
   V(ToString, Operator::kNoProperties, 1, 1)              \
   V(ToName, Operator::kNoProperties, 1, 1)                \
@@ -244,7 +244,7 @@ const StoreNamedParameters& StoreNamedParametersOf(const Operator* op) {
   V(CreateWithContext, Operator::kNoProperties, 2, 1)     \
   V(CreateBlockContext, Operator::kNoProperties, 2, 1)    \
   V(CreateModuleContext, Operator::kNoProperties, 2, 1)   \
-  V(CreateGlobalContext, Operator::kNoProperties, 2, 1)
+  V(CreateScriptContext, Operator::kNoProperties, 2, 1)
 
 
 struct JSOperatorGlobalCache FINAL {
@@ -259,6 +259,16 @@ struct JSOperatorGlobalCache FINAL {
   Name##Operator k##Name##Operator;
   CACHED_OP_LIST(CACHED)
 #undef CACHED
+
+  template <StrictMode kStrictMode>
+  struct StorePropertyOperator FINAL : public Operator1<StrictMode> {
+    StorePropertyOperator()
+        : Operator1<StrictMode>(IrOpcode::kJSStoreProperty,
+                                Operator::kNoProperties, "JSStoreProperty", 3,
+                                1, 1, 0, 1, 0, kStrictMode) {}
+  };
+  StorePropertyOperator<SLOPPY> kStorePropertySloppyOperator;
+  StorePropertyOperator<STRICT> kStorePropertyStrictOperator;
 };
 
 
@@ -335,11 +345,14 @@ const Operator* JSOperatorBuilder::LoadProperty(
 
 
 const Operator* JSOperatorBuilder::StoreProperty(StrictMode strict_mode) {
-  return new (zone()) Operator1<StrictMode>(                // --
-      IrOpcode::kJSStoreProperty, Operator::kNoProperties,  // opcode
-      "JSStoreProperty",                                    // name
-      3, 1, 1, 0, 1, 0,                                     // counts
-      strict_mode);                                         // parameter
+  switch (strict_mode) {
+    case SLOPPY:
+      return &cache_.kStorePropertySloppyOperator;
+    case STRICT:
+      return &cache_.kStorePropertyStrictOperator;
+  }
+  UNREACHABLE();
+  return nullptr;
 }
 
 
index ad3277f..e716a8e 100644 (file)
@@ -259,7 +259,7 @@ class JSOperatorBuilder FINAL : public ZoneObject {
   const Operator* CreateWithContext();
   const Operator* CreateBlockContext();
   const Operator* CreateModuleContext();
-  const Operator* CreateGlobalContext();
+  const Operator* CreateScriptContext();
 
  private:
   Zone* zone() const { return zone_; }
index b97fa02..2338866 100644 (file)
@@ -4,9 +4,10 @@
 
 #include "src/compiler/access-builder.h"
 #include "src/compiler/graph-inl.h"
-#include "src/compiler/js-builtin-reducer.h"
+#include "src/compiler/js-graph.h"
 #include "src/compiler/js-typed-lowering.h"
 #include "src/compiler/node-aux-data-inl.h"
+#include "src/compiler/node-matchers.h"
 #include "src/compiler/node-properties-inl.h"
 #include "src/types.h"
 
@@ -17,7 +18,6 @@ namespace compiler {
 // TODO(turbofan): js-typed-lowering improvements possible
 // - immediately put in type bounds for all new nodes
 // - relax effects from generic but not-side-effecting operations
-// - relax effects for ToNumber(mixed)
 
 
 // Relax the effects of {node} by immediately replacing effect uses of {node}
@@ -29,19 +29,34 @@ static void RelaxEffects(Node* node) {
 }
 
 
-JSTypedLowering::JSTypedLowering(JSGraph* jsgraph)
-    : jsgraph_(jsgraph), simplified_(jsgraph->zone()) {
-  Factory* factory = zone()->isolate()->factory();
-  Handle<Object> zero = factory->NewNumber(0.0);
-  Handle<Object> one = factory->NewNumber(1.0);
-  zero_range_ = Type::Range(zero, zero, zone());
-  one_range_ = Type::Range(one, one, zone());
+JSTypedLowering::JSTypedLowering(JSGraph* jsgraph, Zone* zone)
+    : jsgraph_(jsgraph), simplified_(graph()->zone()), conversions_(zone) {
+  Handle<Object> zero = factory()->NewNumber(0.0);
+  Handle<Object> one = factory()->NewNumber(1.0);
+  zero_range_ = Type::Range(zero, zero, graph()->zone());
+  one_range_ = Type::Range(one, one, graph()->zone());
+  Handle<Object> thirtyone = factory()->NewNumber(31.0);
+  zero_thirtyone_range_ = Type::Range(zero, thirtyone, graph()->zone());
+  // TODO(jarin): Can we have a correctification of the stupid type system?
+  // These stupid work-arounds are just stupid!
+  shifted_int32_ranges_[0] = Type::Signed32();
+  if (SmiValuesAre31Bits()) {
+    shifted_int32_ranges_[1] = Type::SignedSmall();
+    for (size_t k = 2; k < arraysize(shifted_int32_ranges_); ++k) {
+      Handle<Object> min = factory()->NewNumber(kMinInt / (1 << k));
+      Handle<Object> max = factory()->NewNumber(kMaxInt / (1 << k));
+      shifted_int32_ranges_[k] = Type::Range(min, max, graph()->zone());
+    }
+  } else {
+    for (size_t k = 1; k < arraysize(shifted_int32_ranges_); ++k) {
+      Handle<Object> min = factory()->NewNumber(kMinInt / (1 << k));
+      Handle<Object> max = factory()->NewNumber(kMaxInt / (1 << k));
+      shifted_int32_ranges_[k] = Type::Range(min, max, graph()->zone());
+    }
+  }
 }
 
 
-JSTypedLowering::~JSTypedLowering() {}
-
-
 Reduction JSTypedLowering::ReplaceEagerly(Node* old, Node* node) {
   NodeProperties::ReplaceWithValue(old, node, node);
   return Changed(node);
@@ -52,7 +67,7 @@ Reduction JSTypedLowering::ReplaceEagerly(Node* old, Node* node) {
 // JSOperator. This class manages the rewriting of context, control, and effect
 // dependencies during lowering of a binop and contains numerous helper
 // functions for matching the types of inputs to an operation.
-class JSBinopReduction {
+class JSBinopReduction FINAL {
  public:
   JSBinopReduction(JSTypedLowering* lowering, Node* node)
       : lowering_(lowering),
@@ -65,9 +80,10 @@ class JSBinopReduction {
     node_->ReplaceInput(1, ConvertToNumber(right()));
   }
 
-  void ConvertInputsToInt32(bool left_signed, bool right_signed) {
-    node_->ReplaceInput(0, ConvertToI32(left_signed, left()));
-    node_->ReplaceInput(1, ConvertToI32(right_signed, right()));
+  void ConvertInputsToUI32(Signedness left_signedness,
+                           Signedness right_signedness) {
+    node_->ReplaceInput(0, ConvertToUI32(left(), left_signedness));
+    node_->ReplaceInput(1, ConvertToUI32(right(), right_signedness));
   }
 
   void ConvertInputsToString() {
@@ -76,11 +92,15 @@ class JSBinopReduction {
   }
 
   // Convert inputs for bitwise shift operation (ES5 spec 11.7).
-  void ConvertInputsForShift(bool left_signed) {
-    node_->ReplaceInput(0, ConvertToI32(left_signed, left()));
-    Node* rnum = ConvertToI32(false, right());
-    node_->ReplaceInput(1, graph()->NewNode(machine()->Word32And(), rnum,
-                                            jsgraph()->Int32Constant(0x1F)));
+  void ConvertInputsForShift(Signedness left_signedness) {
+    node_->ReplaceInput(0, ConvertToUI32(left(), left_signedness));
+    Node* rnum = ConvertToUI32(right(), kUnsigned);
+    Type* rnum_type = NodeProperties::GetBounds(rnum).upper;
+    if (!rnum_type->Is(lowering_->zero_thirtyone_range_)) {
+      rnum = graph()->NewNode(machine()->Word32And(), rnum,
+                              jsgraph()->Int32Constant(0x1F));
+    }
+    node_->ReplaceInput(1, rnum);
   }
 
   void SwapInputs() {
@@ -93,7 +113,8 @@ class JSBinopReduction {
 
   // Remove all effect and control inputs and outputs to this node and change
   // to the pure operator {op}, possibly inserting a boolean inversion.
-  Reduction ChangeToPureOperator(const Operator* op, bool invert = false) {
+  Reduction ChangeToPureOperator(const Operator* op, bool invert = false,
+                                 Type* type = Type::Any()) {
     DCHECK_EQ(0, op->EffectInputCount());
     DCHECK_EQ(false, OperatorProperties::HasContextInput(op));
     DCHECK_EQ(0, op->ControlInputCount());
@@ -108,17 +129,26 @@ class JSBinopReduction {
     // Finally, update the operator to the new one.
     node_->set_op(op);
 
+    // TODO(jarin): Replace the explicit typing hack with a call to some method
+    // that encapsulates changing the operator and re-typing.
+    Bounds const bounds = NodeProperties::GetBounds(node_);
+    NodeProperties::SetBounds(node_, Bounds::NarrowUpper(bounds, type, zone()));
+
     if (invert) {
       // Insert an boolean not to invert the value.
       Node* value = graph()->NewNode(simplified()->BooleanNot(), node_);
       node_->ReplaceUses(value);
       // Note: ReplaceUses() smashes all uses, so smash it back here.
       value->ReplaceInput(0, node_);
-      return lowering_->ReplaceWith(value);
+      return lowering_->Replace(value);
     }
     return lowering_->Changed(node_);
   }
 
+  Reduction ChangeToPureOperator(const Operator* op, Type* type) {
+    return ChangeToPureOperator(op, false, type);
+  }
+
   bool OneInputIs(Type* t) { return left_type_->Is(t) || right_type_->Is(t); }
 
   bool BothInputsAre(Type* t) {
@@ -142,10 +172,11 @@ class JSBinopReduction {
   Type* right_type() { return right_type_; }
 
   SimplifiedOperatorBuilder* simplified() { return lowering_->simplified(); }
-  Graph* graph() { return lowering_->graph(); }
+  Graph* graph() const { return lowering_->graph(); }
   JSGraph* jsgraph() { return lowering_->jsgraph(); }
   JSOperatorBuilder* javascript() { return lowering_->javascript(); }
   MachineOperatorBuilder* machine() { return lowering_->machine(); }
+  Zone* zone() const { return graph()->zone(); }
 
  private:
   JSTypedLowering* lowering_;  // The containing lowering instance.
@@ -164,65 +195,30 @@ class JSBinopReduction {
   }
 
   Node* ConvertToNumber(Node* node) {
-    // Avoid introducing too many eager ToNumber() operations.
-    Reduction reduced = lowering_->ReduceJSToNumberInput(node);
-    if (reduced.Changed()) return reduced.replacement();
+    if (NodeProperties::GetBounds(node).upper->Is(Type::PlainPrimitive())) {
+      return lowering_->ConvertToNumber(node);
+    }
     Node* n = graph()->NewNode(javascript()->ToNumber(), node, context(),
                                effect(), control());
     update_effect(n);
     return n;
   }
 
-  // Try narrowing a double or number operation to an Int32 operation.
-  bool TryNarrowingToI32(Type* type, Node* node) {
-    switch (node->opcode()) {
-      case IrOpcode::kFloat64Add:
-      case IrOpcode::kNumberAdd: {
-        JSBinopReduction r(lowering_, node);
-        if (r.BothInputsAre(Type::Integral32())) {
-          node->set_op(lowering_->machine()->Int32Add());
-          // TODO(titzer): narrow bounds instead of overwriting.
-          NodeProperties::SetBounds(node, Bounds(type));
-          return true;
-        }
+  Node* ConvertToUI32(Node* node, Signedness signedness) {
+    // Avoid introducing too many eager NumberToXXnt32() operations.
+    node = ConvertToNumber(node);
+    Type* type = NodeProperties::GetBounds(node).upper;
+    if (signedness == kSigned) {
+      if (!type->Is(Type::Signed32())) {
+        node = graph()->NewNode(simplified()->NumberToInt32(), node);
       }
-      case IrOpcode::kFloat64Sub:
-      case IrOpcode::kNumberSubtract: {
-        JSBinopReduction r(lowering_, node);
-        if (r.BothInputsAre(Type::Integral32())) {
-          node->set_op(lowering_->machine()->Int32Sub());
-          // TODO(titzer): narrow bounds instead of overwriting.
-          NodeProperties::SetBounds(node, Bounds(type));
-          return true;
-        }
+    } else {
+      DCHECK_EQ(kUnsigned, signedness);
+      if (!type->Is(Type::Unsigned32())) {
+        node = graph()->NewNode(simplified()->NumberToUint32(), node);
       }
-      default:
-        return false;
     }
-  }
-
-  Node* ConvertToI32(bool is_signed, Node* node) {
-    Type* type = is_signed ? Type::Signed32() : Type::Unsigned32();
-    if (node->OwnedBy(node_)) {
-      // If this node {node_} has the only edge to {node}, then try narrowing
-      // its operation to an Int32 add or subtract.
-      if (TryNarrowingToI32(type, node)) return node;
-    } else {
-      // Otherwise, {node} has multiple uses. Leave it as is and let the
-      // further lowering passes deal with it, which use a full backwards
-      // fixpoint.
-    }
-
-    // Avoid introducing too many eager NumberToXXnt32() operations.
-    node = ConvertToNumber(node);
-    Type* input_type = NodeProperties::GetBounds(node).upper;
-
-    if (input_type->Is(type)) return node;  // already in the value range.
-
-    const Operator* op = is_signed ? simplified()->NumberToInt32()
-                                   : simplified()->NumberToUint32();
-    Node* n = graph()->NewNode(op, node);
-    return n;
+    return node;
   }
 
   void update_effect(Node* effect) {
@@ -235,13 +231,13 @@ Reduction JSTypedLowering::ReduceJSAdd(Node* node) {
   JSBinopReduction r(this, node);
   if (r.BothInputsAre(Type::Number())) {
     // JSAdd(x:number, y:number) => NumberAdd(x, y)
-    return r.ChangeToPureOperator(simplified()->NumberAdd());
+    return r.ChangeToPureOperator(simplified()->NumberAdd(), Type::Number());
   }
-  Type* maybe_string = Type::Union(Type::String(), Type::Receiver(), zone());
-  if (r.BothInputsAre(Type::Primitive()) && r.NeitherInputCanBe(maybe_string)) {
+  if (r.BothInputsAre(Type::Primitive()) &&
+      r.NeitherInputCanBe(Type::StringOrReceiver())) {
     // JSAdd(x:-string, y:-string) => NumberAdd(ToNumber(x), ToNumber(y))
     r.ConvertInputsToNumber();
-    return r.ChangeToPureOperator(simplified()->NumberAdd());
+    return r.ChangeToPureOperator(simplified()->NumberAdd(), Type::Number());
   }
 #if 0
   // TODO(turbofan): General ToNumber disabled for now because:
@@ -279,8 +275,8 @@ Reduction JSTypedLowering::ReduceJSBitwiseOr(Node* node) {
     // operations
     // on some platforms.
     // TODO(turbofan): make this heuristic configurable for code size.
-    r.ConvertInputsToInt32(true, true);
-    return r.ChangeToPureOperator(machine()->Word32Or());
+    r.ConvertInputsToUI32(kSigned, kSigned);
+    return r.ChangeToPureOperator(machine()->Word32Or(), Type::Integral32());
   }
   return NoChange();
 }
@@ -292,7 +288,8 @@ Reduction JSTypedLowering::ReduceJSMultiply(Node* node) {
     // TODO(jarin): Propagate frame state input from non-primitive input node to
     // JSToNumber node.
     r.ConvertInputsToNumber();
-    return r.ChangeToPureOperator(simplified()->NumberMultiply());
+    return r.ChangeToPureOperator(simplified()->NumberMultiply(),
+                                  Type::Number());
   }
   // TODO(turbofan): relax/remove the effects of this operator in other cases.
   return NoChange();
@@ -304,7 +301,7 @@ Reduction JSTypedLowering::ReduceNumberBinop(Node* node,
   JSBinopReduction r(this, node);
   if (r.BothInputsAre(Type::Primitive())) {
     r.ConvertInputsToNumber();
-    return r.ChangeToPureOperator(numberOp);
+    return r.ChangeToPureOperator(numberOp, Type::Number());
   }
 #if 0
   // TODO(turbofan): General ToNumber disabled for now because:
@@ -323,9 +320,7 @@ Reduction JSTypedLowering::ReduceNumberBinop(Node* node,
 }
 
 
-Reduction JSTypedLowering::ReduceI32Binop(Node* node, bool left_signed,
-                                          bool right_signed,
-                                          const Operator* intOp) {
+Reduction JSTypedLowering::ReduceInt32Binop(Node* node, const Operator* intOp) {
   JSBinopReduction r(this, node);
   if (r.BothInputsAre(Type::Primitive())) {
     // TODO(titzer): some Smi bitwise operations don't really require going
@@ -333,19 +328,20 @@ Reduction JSTypedLowering::ReduceI32Binop(Node* node, bool left_signed,
     // operations
     // on some platforms.
     // TODO(turbofan): make this heuristic configurable for code size.
-    r.ConvertInputsToInt32(left_signed, right_signed);
-    return r.ChangeToPureOperator(intOp);
+    r.ConvertInputsToUI32(kSigned, kSigned);
+    return r.ChangeToPureOperator(intOp, Type::Integral32());
   }
   return NoChange();
 }
 
 
-Reduction JSTypedLowering::ReduceI32Shift(Node* node, bool left_signed,
-                                          const Operator* shift_op) {
+Reduction JSTypedLowering::ReduceUI32Shift(Node* node,
+                                           Signedness left_signedness,
+                                           const Operator* shift_op) {
   JSBinopReduction r(this, node);
   if (r.BothInputsAre(Type::Primitive())) {
-    r.ConvertInputsForShift(left_signed);
-    return r.ChangeToPureOperator(shift_op);
+    r.ConvertInputsForShift(left_signedness);
+    return r.ChangeToPureOperator(shift_op, Type::Integral32());
   }
   return NoChange();
 }
@@ -386,8 +382,8 @@ Reduction JSTypedLowering::ReduceJSComparison(Node* node) {
     ...
   }
 #endif
-  Type* maybe_string = Type::Union(Type::String(), Type::Receiver(), zone());
-  if (r.BothInputsAre(Type::Primitive()) && r.OneInputCannotBe(maybe_string)) {
+  if (r.BothInputsAre(Type::Primitive()) &&
+      r.OneInputCannotBe(Type::StringOrReceiver())) {
     const Operator* less_than;
     const Operator* less_than_or_equal;
     if (r.BothInputsAre(Type::Unsigned32())) {
@@ -453,8 +449,14 @@ Reduction JSTypedLowering::ReduceJSStrictEqual(Node* node, bool invert) {
   if (r.left() == r.right()) {
     // x === x is always true if x != NaN
     if (!r.left_type()->Maybe(Type::NaN())) {
-      return ReplaceEagerly(node, invert ? jsgraph()->FalseConstant()
-                                         : jsgraph()->TrueConstant());
+      return ReplaceEagerly(node, jsgraph()->BooleanConstant(!invert));
+    }
+  }
+  if (r.OneInputCannotBe(Type::NumberOrString())) {
+    // For values with canonical representation (i.e. not string nor number) an
+    // empty type intersection means the values cannot be strictly equal.
+    if (!r.left_type()->Maybe(r.right_type())) {
+      return ReplaceEagerly(node, jsgraph()->BooleanConstant(invert));
     }
   }
   if (r.OneInputIs(Type::Undefined())) {
@@ -488,16 +490,137 @@ Reduction JSTypedLowering::ReduceJSStrictEqual(Node* node, bool invert) {
 }
 
 
+Reduction JSTypedLowering::ReduceJSToBooleanInput(Node* input) {
+  if (input->opcode() == IrOpcode::kJSToBoolean) {
+    // Recursively try to reduce the input first.
+    Reduction result = ReduceJSToBoolean(input);
+    if (result.Changed()) return result;
+    return Changed(input);  // JSToBoolean(JSToBoolean(x)) => JSToBoolean(x)
+  }
+  // Check if we have a cached conversion.
+  Node* conversion = FindConversion<IrOpcode::kJSToBoolean>(input);
+  if (conversion) return Replace(conversion);
+  Type* input_type = NodeProperties::GetBounds(input).upper;
+  if (input_type->Is(Type::Boolean())) {
+    return Changed(input);  // JSToBoolean(x:boolean) => x
+  }
+  if (input_type->Is(Type::Undefined())) {
+    // JSToBoolean(undefined) => #false
+    return Replace(jsgraph()->FalseConstant());
+  }
+  if (input_type->Is(Type::Null())) {
+    // JSToBoolean(null) => #false
+    return Replace(jsgraph()->FalseConstant());
+  }
+  if (input_type->Is(Type::DetectableReceiver())) {
+    // JSToBoolean(x:detectable) => #true
+    return Replace(jsgraph()->TrueConstant());
+  }
+  if (input_type->Is(Type::Undetectable())) {
+    // JSToBoolean(x:undetectable) => #false
+    return Replace(jsgraph()->FalseConstant());
+  }
+  if (input_type->Is(Type::OrderedNumber())) {
+    // JSToBoolean(x:ordered-number) => BooleanNot(NumberEqual(x, #0))
+    Node* cmp = graph()->NewNode(simplified()->NumberEqual(), input,
+                                 jsgraph()->ZeroConstant());
+    Node* inv = graph()->NewNode(simplified()->BooleanNot(), cmp);
+    return Replace(inv);
+  }
+  if (input_type->Is(Type::String())) {
+    // JSToBoolean(x:string) => BooleanNot(NumberEqual(x.length, #0))
+    FieldAccess access = AccessBuilder::ForStringLength();
+    Node* length = graph()->NewNode(simplified()->LoadField(access), input,
+                                    graph()->start(), graph()->start());
+    Node* cmp = graph()->NewNode(simplified()->NumberEqual(), length,
+                                 jsgraph()->ZeroConstant());
+    Node* inv = graph()->NewNode(simplified()->BooleanNot(), cmp);
+    return Replace(inv);
+  }
+  return NoChange();
+}
+
+
+Reduction JSTypedLowering::ReduceJSToBoolean(Node* node) {
+  // Try to reduce the input first.
+  Node* const input = node->InputAt(0);
+  Reduction reduction = ReduceJSToBooleanInput(input);
+  if (reduction.Changed()) return reduction;
+  if (input->opcode() == IrOpcode::kPhi) {
+    // JSToBoolean(phi(x1,...,xn,control),context)
+    //   => phi(JSToBoolean(x1,no-context),...,JSToBoolean(xn,no-context))
+    int const input_count = input->InputCount() - 1;
+    Node* const control = input->InputAt(input_count);
+    DCHECK_LE(0, input_count);
+    DCHECK(NodeProperties::IsControl(control));
+    DCHECK(NodeProperties::GetBounds(node).upper->Is(Type::Boolean()));
+    DCHECK(!NodeProperties::GetBounds(input).upper->Is(Type::Boolean()));
+    node->set_op(common()->Phi(kMachAnyTagged, input_count));
+    for (int i = 0; i < input_count; ++i) {
+      // We must be very careful not to introduce cycles when pushing
+      // operations into phis. It is safe for {value}, since it appears
+      // as input to the phi that we are replacing, but it's not safe
+      // to simply reuse the context of the {node}. However, ToBoolean()
+      // does not require a context anyways, so it's safe to discard it
+      // here and pass the dummy context.
+      Node* const value = ConvertToBoolean(input->InputAt(i));
+      if (i < node->InputCount()) {
+        node->ReplaceInput(i, value);
+      } else {
+        node->AppendInput(graph()->zone(), value);
+      }
+    }
+    if (input_count < node->InputCount()) {
+      node->ReplaceInput(input_count, control);
+    } else {
+      node->AppendInput(graph()->zone(), control);
+    }
+    node->TrimInputCount(input_count + 1);
+    return Changed(node);
+  }
+  if (input->opcode() == IrOpcode::kSelect) {
+    // JSToBoolean(select(c,x1,x2),context)
+    //   => select(c,JSToBoolean(x1,no-context),...,JSToBoolean(x2,no-context))
+    int const input_count = input->InputCount();
+    BranchHint const input_hint = SelectParametersOf(input->op()).hint();
+    DCHECK_EQ(3, input_count);
+    DCHECK(NodeProperties::GetBounds(node).upper->Is(Type::Boolean()));
+    DCHECK(!NodeProperties::GetBounds(input).upper->Is(Type::Boolean()));
+    node->set_op(common()->Select(kMachAnyTagged, input_hint));
+    node->InsertInput(graph()->zone(), 0, input->InputAt(0));
+    for (int i = 1; i < input_count; ++i) {
+      // We must be very careful not to introduce cycles when pushing
+      // operations into selects. It is safe for {value}, since it appears
+      // as input to the select that we are replacing, but it's not safe
+      // to simply reuse the context of the {node}. However, ToBoolean()
+      // does not require a context anyways, so it's safe to discard it
+      // here and pass the dummy context.
+      Node* const value = ConvertToBoolean(input->InputAt(i));
+      node->ReplaceInput(i, value);
+    }
+    DCHECK_EQ(3, node->InputCount());
+    return Changed(node);
+  }
+  InsertConversion(node);
+  if (node->InputAt(1) != jsgraph()->NoContextConstant()) {
+    // JSToBoolean(x,context) => JSToBoolean(x,no-context)
+    node->ReplaceInput(1, jsgraph()->NoContextConstant());
+    return Changed(node);
+  }
+  return NoChange();
+}
+
+
 Reduction JSTypedLowering::ReduceJSToNumberInput(Node* input) {
   if (input->opcode() == IrOpcode::kJSToNumber) {
     // Recursively try to reduce the input first.
-    Reduction result = ReduceJSToNumberInput(input->InputAt(0));
-    if (result.Changed()) {
-      RelaxEffects(input);
-      return result;
-    }
+    Reduction result = ReduceJSToNumber(input);
+    if (result.Changed()) return result;
     return Changed(input);  // JSToNumber(JSToNumber(x)) => JSToNumber(x)
   }
+  // Check if we have a cached conversion.
+  Node* conversion = FindConversion<IrOpcode::kJSToNumber>(input);
+  if (conversion) return Replace(conversion);
   Type* input_type = NodeProperties::GetBounds(input).upper;
   if (input_type->Is(Type::Number())) {
     // JSToNumber(x:number) => x
@@ -505,30 +628,113 @@ Reduction JSTypedLowering::ReduceJSToNumberInput(Node* input) {
   }
   if (input_type->Is(Type::Undefined())) {
     // JSToNumber(undefined) => #NaN
-    return ReplaceWith(jsgraph()->NaNConstant());
+    return Replace(jsgraph()->NaNConstant());
   }
   if (input_type->Is(Type::Null())) {
     // JSToNumber(null) => #0
-    return ReplaceWith(jsgraph()->ZeroConstant());
+    return Replace(jsgraph()->ZeroConstant());
   }
   if (input_type->Is(Type::Boolean())) {
     // JSToNumber(x:boolean) => BooleanToNumber(x)
-    return ReplaceWith(
-        graph()->NewNode(simplified()->BooleanToNumber(), input));
+    return Replace(graph()->NewNode(simplified()->BooleanToNumber(), input));
   }
   // TODO(turbofan): js-typed-lowering of ToNumber(x:string)
   return NoChange();
 }
 
 
+Reduction JSTypedLowering::ReduceJSToNumber(Node* node) {
+  // Try to reduce the input first.
+  Node* const input = node->InputAt(0);
+  Reduction reduction = ReduceJSToNumberInput(input);
+  if (reduction.Changed()) {
+    NodeProperties::ReplaceWithValue(node, reduction.replacement());
+    return reduction;
+  }
+  Type* const input_type = NodeProperties::GetBounds(input).upper;
+  if (input_type->Is(Type::PlainPrimitive())) {
+    if (input->opcode() == IrOpcode::kPhi) {
+      // JSToNumber(phi(x1,...,xn,control):plain-primitive,context)
+      //   => phi(JSToNumber(x1,no-context),
+      //          ...,
+      //          JSToNumber(xn,no-context),control)
+      int const input_count = input->InputCount() - 1;
+      Node* const control = input->InputAt(input_count);
+      DCHECK_LE(0, input_count);
+      DCHECK(NodeProperties::IsControl(control));
+      DCHECK(NodeProperties::GetBounds(node).upper->Is(Type::Number()));
+      DCHECK(!NodeProperties::GetBounds(input).upper->Is(Type::Number()));
+      RelaxEffects(node);
+      node->set_op(common()->Phi(kMachAnyTagged, input_count));
+      for (int i = 0; i < input_count; ++i) {
+        // We must be very careful not to introduce cycles when pushing
+        // operations into phis. It is safe for {value}, since it appears
+        // as input to the phi that we are replacing, but it's not safe
+        // to simply reuse the context of the {node}. However, ToNumber()
+        // does not require a context anyways, so it's safe to discard it
+        // here and pass the dummy context.
+        Node* const value = ConvertToNumber(input->InputAt(i));
+        if (i < node->InputCount()) {
+          node->ReplaceInput(i, value);
+        } else {
+          node->AppendInput(graph()->zone(), value);
+        }
+      }
+      if (input_count < node->InputCount()) {
+        node->ReplaceInput(input_count, control);
+      } else {
+        node->AppendInput(graph()->zone(), control);
+      }
+      node->TrimInputCount(input_count + 1);
+      return Changed(node);
+    }
+    if (input->opcode() == IrOpcode::kSelect) {
+      // JSToNumber(select(c,x1,x2):plain-primitive,context)
+      //   => select(c,JSToNumber(x1,no-context),JSToNumber(x2,no-context))
+      int const input_count = input->InputCount();
+      BranchHint const input_hint = SelectParametersOf(input->op()).hint();
+      DCHECK_EQ(3, input_count);
+      DCHECK(NodeProperties::GetBounds(node).upper->Is(Type::Number()));
+      DCHECK(!NodeProperties::GetBounds(input).upper->Is(Type::Number()));
+      RelaxEffects(node);
+      node->set_op(common()->Select(kMachAnyTagged, input_hint));
+      node->ReplaceInput(0, input->InputAt(0));
+      for (int i = 1; i < input_count; ++i) {
+        // We must be very careful not to introduce cycles when pushing
+        // operations into selects. It is safe for {value}, since it appears
+        // as input to the select that we are replacing, but it's not safe
+        // to simply reuse the context of the {node}. However, ToNumber()
+        // does not require a context anyways, so it's safe to discard it
+        // here and pass the dummy context.
+        Node* const value = ConvertToNumber(input->InputAt(i));
+        node->ReplaceInput(i, value);
+      }
+      node->TrimInputCount(input_count);
+      return Changed(node);
+    }
+    // Remember this conversion.
+    InsertConversion(node);
+    if (node->InputAt(1) != jsgraph()->NoContextConstant() ||
+        node->InputAt(2) != graph()->start() ||
+        node->InputAt(3) != graph()->start()) {
+      // JSToNumber(x:plain-primitive,context,effect,control)
+      //   => JSToNumber(x,no-context,start,start)
+      RelaxEffects(node);
+      node->ReplaceInput(1, jsgraph()->NoContextConstant());
+      node->ReplaceInput(2, graph()->start());
+      node->ReplaceInput(3, graph()->start());
+      return Changed(node);
+    }
+  }
+  return NoChange();
+}
+
+
 Reduction JSTypedLowering::ReduceJSToStringInput(Node* input) {
   if (input->opcode() == IrOpcode::kJSToString) {
     // Recursively try to reduce the input first.
-    Reduction result = ReduceJSToStringInput(input->InputAt(0));
-    if (result.Changed()) {
-      RelaxEffects(input);
-      return result;
-    }
+    Reduction result = ReduceJSToString(input);
+    if (result.Changed()) return result;
     return Changed(input);  // JSToString(JSToString(x)) => JSToString(x)
   }
   Type* input_type = NodeProperties::GetBounds(input).upper;
@@ -536,12 +742,10 @@ Reduction JSTypedLowering::ReduceJSToStringInput(Node* input) {
     return Changed(input);  // JSToString(x:string) => x
   }
   if (input_type->Is(Type::Undefined())) {
-    return ReplaceWith(jsgraph()->HeapConstant(
-        graph()->zone()->isolate()->factory()->undefined_string()));
+    return Replace(jsgraph()->HeapConstant(factory()->undefined_string()));
   }
   if (input_type->Is(Type::Null())) {
-    return ReplaceWith(jsgraph()->HeapConstant(
-        graph()->zone()->isolate()->factory()->null_string()));
+    return Replace(jsgraph()->HeapConstant(factory()->null_string()));
   }
   // TODO(turbofan): js-typed-lowering of ToString(x:boolean)
   // TODO(turbofan): js-typed-lowering of ToString(x:number)
@@ -549,84 +753,13 @@ Reduction JSTypedLowering::ReduceJSToStringInput(Node* input) {
 }
 
 
-Reduction JSTypedLowering::ReduceJSToBooleanInput(Node* input) {
-  if (input->opcode() == IrOpcode::kJSToBoolean) {
-    // Recursively try to reduce the input first.
-    Reduction result = ReduceJSToBooleanInput(input->InputAt(0));
-    if (result.Changed()) {
-      RelaxEffects(input);
-      return result;
-    }
-    return Changed(input);  // JSToBoolean(JSToBoolean(x)) => JSToBoolean(x)
-  }
-  Type* input_type = NodeProperties::GetBounds(input).upper;
-  if (input_type->Is(Type::Boolean())) {
-    return Changed(input);  // JSToBoolean(x:boolean) => x
-  }
-  if (input_type->Is(Type::Undefined())) {
-    // JSToBoolean(undefined) => #false
-    return ReplaceWith(jsgraph()->FalseConstant());
-  }
-  if (input_type->Is(Type::Null())) {
-    // JSToBoolean(null) => #false
-    return ReplaceWith(jsgraph()->FalseConstant());
-  }
-  if (input_type->Is(Type::DetectableReceiver())) {
-    // JSToBoolean(x:detectable) => #true
-    return ReplaceWith(jsgraph()->TrueConstant());
-  }
-  if (input_type->Is(Type::Undetectable())) {
-    // JSToBoolean(x:undetectable) => #false
-    return ReplaceWith(jsgraph()->FalseConstant());
-  }
-  if (input_type->Is(Type::OrderedNumber())) {
-    // JSToBoolean(x:ordered-number) => BooleanNot(NumberEqual(x, #0))
-    Node* cmp = graph()->NewNode(simplified()->NumberEqual(), input,
-                                 jsgraph()->ZeroConstant());
-    Node* inv = graph()->NewNode(simplified()->BooleanNot(), cmp);
-    return ReplaceWith(inv);
-  }
-  if (input_type->Is(Type::String())) {
-    // JSToBoolean(x:string) => BooleanNot(NumberEqual(x.length, #0))
-    FieldAccess access = AccessBuilder::ForStringLength();
-    Node* length = graph()->NewNode(simplified()->LoadField(access), input,
-                                    graph()->start(), graph()->start());
-    Node* cmp = graph()->NewNode(simplified()->NumberEqual(), length,
-                                 jsgraph()->ZeroConstant());
-    Node* inv = graph()->NewNode(simplified()->BooleanNot(), cmp);
-    return ReplaceWith(inv);
-  }
-  // TODO(turbofan): We need some kinda of PrimitiveToBoolean simplified
-  // operator, then we can do the pushing in the SimplifiedOperatorReducer
-  // and do not need to protect against stack overflow (because of backedges
-  // in phis) below.
-  if (input->opcode() == IrOpcode::kPhi &&
-      input_type->Is(
-          Type::Union(Type::Boolean(), Type::OrderedNumber(), zone()))) {
-    // JSToBoolean(phi(x1,...,xn):ordered-number|boolean)
-    //   => phi(JSToBoolean(x1),...,JSToBoolean(xn))
-    int input_count = input->InputCount() - 1;
-    Node** inputs = zone()->NewArray<Node*>(input_count + 1);
-    for (int i = 0; i < input_count; ++i) {
-      Node* value = input->InputAt(i);
-      Type* value_type = NodeProperties::GetBounds(value).upper;
-      // Recursively try to reduce the value first.
-      Reduction result = (value_type->Is(Type::Boolean()) ||
-                          value_type->Is(Type::OrderedNumber()))
-                             ? ReduceJSToBooleanInput(value)
-                             : NoChange();
-      if (result.Changed()) {
-        inputs[i] = result.replacement();
-      } else {
-        inputs[i] = graph()->NewNode(javascript()->ToBoolean(), value,
-                                     jsgraph()->ZeroConstant(),
-                                     graph()->start(), graph()->start());
-      }
-    }
-    inputs[input_count] = input->InputAt(input_count);
-    Node* phi = graph()->NewNode(common()->Phi(kMachAnyTagged, input_count),
-                                 input_count + 1, inputs);
-    return ReplaceWith(phi);
+Reduction JSTypedLowering::ReduceJSToString(Node* node) {
+  // Try to reduce the input first.
+  Node* const input = node->InputAt(0);
+  Reduction reduction = ReduceJSToStringInput(input);
+  if (reduction.Changed()) {
+    NodeProperties::ReplaceWithValue(node, reduction.replacement());
+    return reduction;
   }
   return NoChange();
 }
@@ -636,30 +769,39 @@ Reduction JSTypedLowering::ReduceJSLoadProperty(Node* node) {
   Node* key = NodeProperties::GetValueInput(node, 1);
   Node* base = NodeProperties::GetValueInput(node, 0);
   Type* key_type = NodeProperties::GetBounds(key).upper;
-  Type* base_type = NodeProperties::GetBounds(base).upper;
   // TODO(mstarzinger): This lowering is not correct if:
   //   a) The typed array or it's buffer is neutered.
-  if (base_type->IsConstant() && key_type->Is(Type::Integral32()) &&
-      base_type->AsConstant()->Value()->IsJSTypedArray()) {
-    // JSLoadProperty(typed-array, int32)
-    Handle<JSTypedArray> array =
-        Handle<JSTypedArray>::cast(base_type->AsConstant()->Value());
-    if (IsExternalArrayElementsKind(array->map()->elements_kind())) {
-      ExternalArrayType type = array->type();
-      double byte_length = array->byte_length()->Number();
-      if (byte_length <= kMaxInt) {
-        Handle<ExternalArray> elements =
-            Handle<ExternalArray>::cast(handle(array->elements()));
-        Node* pointer = jsgraph()->IntPtrConstant(
-            bit_cast<intptr_t>(elements->external_pointer()));
-        Node* length = jsgraph()->Constant(array->length()->Number());
-        Node* effect = NodeProperties::GetEffectInput(node);
+  HeapObjectMatcher<Object> mbase(base);
+  if (mbase.HasValue() && mbase.Value().handle()->IsJSTypedArray()) {
+    Handle<JSTypedArray> const array =
+        Handle<JSTypedArray>::cast(mbase.Value().handle());
+    array->GetBuffer()->set_is_neuterable(false);
+    BufferAccess const access(array->type());
+    size_t const k = ElementSizeLog2Of(access.machine_type());
+    double const byte_length = array->byte_length()->Number();
+    CHECK_LT(k, arraysize(shifted_int32_ranges_));
+    if (IsExternalArrayElementsKind(array->map()->elements_kind()) &&
+        key_type->Is(shifted_int32_ranges_[k]) && byte_length <= kMaxInt) {
+      // JSLoadProperty(typed-array, int32)
+      Handle<ExternalArray> elements =
+          Handle<ExternalArray>::cast(handle(array->elements()));
+      Node* buffer = jsgraph()->PointerConstant(elements->external_pointer());
+      Node* length = jsgraph()->Constant(byte_length);
+      Node* effect = NodeProperties::GetEffectInput(node);
+      Node* control = NodeProperties::GetControlInput(node);
+      // Check if we can avoid the bounds check.
+      if (key_type->Min() >= 0 && key_type->Max() < array->length()->Number()) {
         Node* load = graph()->NewNode(
             simplified()->LoadElement(
-                AccessBuilder::ForTypedArrayElement(type, true)),
-            pointer, key, length, effect);
+                AccessBuilder::ForTypedArrayElement(array->type(), true)),
+            buffer, key, effect, control);
         return ReplaceEagerly(node, load);
       }
+      // Compute byte offset.
+      Node* offset = Word32Shl(key, static_cast<int>(k));
+      Node* load = graph()->NewNode(simplified()->LoadBuffer(access), buffer,
+                                    offset, length, effect, control);
+      return ReplaceEagerly(node, load);
     }
   }
   return NoChange();
@@ -671,43 +813,115 @@ Reduction JSTypedLowering::ReduceJSStoreProperty(Node* node) {
   Node* base = NodeProperties::GetValueInput(node, 0);
   Node* value = NodeProperties::GetValueInput(node, 2);
   Type* key_type = NodeProperties::GetBounds(key).upper;
-  Type* base_type = NodeProperties::GetBounds(base).upper;
+  Type* value_type = NodeProperties::GetBounds(value).upper;
   // TODO(mstarzinger): This lowering is not correct if:
   //   a) The typed array or its buffer is neutered.
-  if (key_type->Is(Type::Integral32()) && base_type->IsConstant() &&
-      base_type->AsConstant()->Value()->IsJSTypedArray()) {
-    // JSStoreProperty(typed-array, int32, value)
-    Handle<JSTypedArray> array =
-        Handle<JSTypedArray>::cast(base_type->AsConstant()->Value());
-    if (IsExternalArrayElementsKind(array->map()->elements_kind())) {
-      ExternalArrayType type = array->type();
-      double byte_length = array->byte_length()->Number();
-      if (byte_length <= kMaxInt) {
-        Handle<ExternalArray> elements =
-            Handle<ExternalArray>::cast(handle(array->elements()));
-        Node* pointer = jsgraph()->IntPtrConstant(
-            bit_cast<intptr_t>(elements->external_pointer()));
-        Node* length = jsgraph()->Constant(array->length()->Number());
-        Node* effect = NodeProperties::GetEffectInput(node);
-        Node* control = NodeProperties::GetControlInput(node);
-        Node* store = graph()->NewNode(
-            simplified()->StoreElement(
-                AccessBuilder::ForTypedArrayElement(type, true)),
-            pointer, key, length, value, effect, control);
-        return ReplaceEagerly(node, store);
+  HeapObjectMatcher<Object> mbase(base);
+  if (mbase.HasValue() && mbase.Value().handle()->IsJSTypedArray()) {
+    Handle<JSTypedArray> const array =
+        Handle<JSTypedArray>::cast(mbase.Value().handle());
+    array->GetBuffer()->set_is_neuterable(false);
+    BufferAccess const access(array->type());
+    size_t const k = ElementSizeLog2Of(access.machine_type());
+    double const byte_length = array->byte_length()->Number();
+    CHECK_LT(k, arraysize(shifted_int32_ranges_));
+    if (IsExternalArrayElementsKind(array->map()->elements_kind()) &&
+        access.external_array_type() != kExternalUint8ClampedArray &&
+        key_type->Is(shifted_int32_ranges_[k]) && byte_length <= kMaxInt) {
+      // JSLoadProperty(typed-array, int32)
+      Handle<ExternalArray> elements =
+          Handle<ExternalArray>::cast(handle(array->elements()));
+      Node* buffer = jsgraph()->PointerConstant(elements->external_pointer());
+      Node* length = jsgraph()->Constant(byte_length);
+      Node* context = NodeProperties::GetContextInput(node);
+      Node* effect = NodeProperties::GetEffectInput(node);
+      Node* control = NodeProperties::GetControlInput(node);
+      // Convert to a number first.
+      if (!value_type->Is(Type::Number())) {
+        Reduction number_reduction = ReduceJSToNumberInput(value);
+        if (number_reduction.Changed()) {
+          value = number_reduction.replacement();
+        } else {
+          value = effect = graph()->NewNode(javascript()->ToNumber(), value,
+                                            context, effect, control);
+        }
+      }
+      // For integer-typed arrays, convert to the integer type.
+      if (TypeOf(access.machine_type()) == kTypeInt32 &&
+          !value_type->Is(Type::Signed32())) {
+        value = graph()->NewNode(simplified()->NumberToInt32(), value);
+      } else if (TypeOf(access.machine_type()) == kTypeUint32 &&
+                 !value_type->Is(Type::Unsigned32())) {
+        value = graph()->NewNode(simplified()->NumberToUint32(), value);
+      }
+      // Check if we can avoid the bounds check.
+      if (key_type->Min() >= 0 && key_type->Max() < array->length()->Number()) {
+        node->set_op(simplified()->StoreElement(
+            AccessBuilder::ForTypedArrayElement(array->type(), true)));
+        node->ReplaceInput(0, buffer);
+        DCHECK_EQ(key, node->InputAt(1));
+        node->ReplaceInput(2, value);
+        node->ReplaceInput(3, effect);
+        node->ReplaceInput(4, control);
+        node->TrimInputCount(5);
+        return Changed(node);
       }
+      // Compute byte offset.
+      Node* offset = Word32Shl(key, static_cast<int>(k));
+      // Turn into a StoreBuffer operation.
+      node->set_op(simplified()->StoreBuffer(access));
+      node->ReplaceInput(0, buffer);
+      node->ReplaceInput(1, offset);
+      node->ReplaceInput(2, length);
+      node->ReplaceInput(3, value);
+      node->ReplaceInput(4, effect);
+      DCHECK_EQ(control, node->InputAt(5));
+      DCHECK_EQ(6, node->InputCount());
+      return Changed(node);
     }
   }
   return NoChange();
 }
 
 
-static Reduction ReplaceWithReduction(Node* node, Reduction reduction) {
-  if (reduction.Changed()) {
-    NodeProperties::ReplaceWithValue(node, reduction.replacement());
-    return reduction;
+Reduction JSTypedLowering::ReduceJSLoadContext(Node* node) {
+  DCHECK_EQ(IrOpcode::kJSLoadContext, node->opcode());
+  ContextAccess const& access = ContextAccessOf(node->op());
+  Node* const effect = NodeProperties::GetEffectInput(node);
+  Node* const control = graph()->start();
+  for (size_t i = 0; i < access.depth(); ++i) {
+    node->ReplaceInput(
+        0, graph()->NewNode(
+               simplified()->LoadField(
+                   AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX)),
+               NodeProperties::GetValueInput(node, 0), effect, control));
+  }
+  node->set_op(
+      simplified()->LoadField(AccessBuilder::ForContextSlot(access.index())));
+  node->ReplaceInput(1, effect);
+  node->ReplaceInput(2, control);
+  DCHECK_EQ(3, node->InputCount());
+  return Changed(node);
+}
+
+
+Reduction JSTypedLowering::ReduceJSStoreContext(Node* node) {
+  DCHECK_EQ(IrOpcode::kJSStoreContext, node->opcode());
+  ContextAccess const& access = ContextAccessOf(node->op());
+  Node* const effect = NodeProperties::GetEffectInput(node);
+  Node* const control = graph()->start();
+  for (size_t i = 0; i < access.depth(); ++i) {
+    node->ReplaceInput(
+        0, graph()->NewNode(
+               simplified()->LoadField(
+                   AccessBuilder::ForContextSlot(Context::PREVIOUS_INDEX)),
+               NodeProperties::GetValueInput(node, 0), effect, control));
   }
-  return Reducer::NoChange();
+  node->set_op(
+      simplified()->StoreField(AccessBuilder::ForContextSlot(access.index())));
+  node->RemoveInput(2);
+  DCHECK_EQ(4, node->InputCount());
+  return Changed(node);
 }
 
 
@@ -739,15 +953,15 @@ Reduction JSTypedLowering::Reduce(Node* node) {
     case IrOpcode::kJSBitwiseOr:
       return ReduceJSBitwiseOr(node);
     case IrOpcode::kJSBitwiseXor:
-      return ReduceI32Binop(node, true, true, machine()->Word32Xor());
+      return ReduceInt32Binop(node, machine()->Word32Xor());
     case IrOpcode::kJSBitwiseAnd:
-      return ReduceI32Binop(node, true, true, machine()->Word32And());
+      return ReduceInt32Binop(node, machine()->Word32And());
     case IrOpcode::kJSShiftLeft:
-      return ReduceI32Shift(node, true, machine()->Word32Shl());
+      return ReduceUI32Shift(node, kSigned, machine()->Word32Shl());
     case IrOpcode::kJSShiftRight:
-      return ReduceI32Shift(node, true, machine()->Word32Sar());
+      return ReduceUI32Shift(node, kSigned, machine()->Word32Sar());
     case IrOpcode::kJSShiftRightLogical:
-      return ReduceI32Shift(node, false, machine()->Word32Shr());
+      return ReduceUI32Shift(node, kUnsigned, machine()->Word32Shr());
     case IrOpcode::kJSAdd:
       return ReduceJSAdd(node);
     case IrOpcode::kJSSubtract:
@@ -760,44 +974,112 @@ Reduction JSTypedLowering::Reduce(Node* node) {
       return ReduceNumberBinop(node, simplified()->NumberModulus());
     case IrOpcode::kJSUnaryNot: {
       Reduction result = ReduceJSToBooleanInput(node->InputAt(0));
-      Node* value;
       if (result.Changed()) {
         // JSUnaryNot(x:boolean) => BooleanNot(x)
-        value =
-            graph()->NewNode(simplified()->BooleanNot(), result.replacement());
-        NodeProperties::ReplaceWithValue(node, value);
-        return Changed(value);
+        node = result.replacement();
       } else {
         // JSUnaryNot(x) => BooleanNot(JSToBoolean(x))
-        value = graph()->NewNode(simplified()->BooleanNot(), node);
         node->set_op(javascript()->ToBoolean());
-        NodeProperties::ReplaceWithValue(node, value, node);
-        // Note: ReplaceUses() smashes all uses, so smash it back here.
-        value->ReplaceInput(0, node);
-        return Changed(node);
       }
+      Node* value = graph()->NewNode(simplified()->BooleanNot(), node);
+      return Replace(value);
     }
     case IrOpcode::kJSToBoolean:
-      return ReplaceWithReduction(node,
-                                  ReduceJSToBooleanInput(node->InputAt(0)));
+      return ReduceJSToBoolean(node);
     case IrOpcode::kJSToNumber:
-      return ReplaceWithReduction(node,
-                                  ReduceJSToNumberInput(node->InputAt(0)));
+      return ReduceJSToNumber(node);
     case IrOpcode::kJSToString:
-      return ReplaceWithReduction(node,
-                                  ReduceJSToStringInput(node->InputAt(0)));
+      return ReduceJSToString(node);
     case IrOpcode::kJSLoadProperty:
       return ReduceJSLoadProperty(node);
     case IrOpcode::kJSStoreProperty:
       return ReduceJSStoreProperty(node);
-    case IrOpcode::kJSCallFunction:
-      return JSBuiltinReducer(jsgraph()).Reduce(node);
+    case IrOpcode::kJSLoadContext:
+      return ReduceJSLoadContext(node);
+    case IrOpcode::kJSStoreContext:
+      return ReduceJSStoreContext(node);
     default:
       break;
   }
   return NoChange();
 }
 
+
+Node* JSTypedLowering::ConvertToBoolean(Node* input) {
+  // Avoid inserting too many eager ToBoolean() operations.
+  Reduction const reduction = ReduceJSToBooleanInput(input);
+  if (reduction.Changed()) return reduction.replacement();
+  Node* const conversion = graph()->NewNode(javascript()->ToBoolean(), input,
+                                            jsgraph()->NoContextConstant());
+  InsertConversion(conversion);
+  return conversion;
+}
+
+
+Node* JSTypedLowering::ConvertToNumber(Node* input) {
+  DCHECK(NodeProperties::GetBounds(input).upper->Is(Type::PlainPrimitive()));
+  // Avoid inserting too many eager ToNumber() operations.
+  Reduction const reduction = ReduceJSToNumberInput(input);
+  if (reduction.Changed()) return reduction.replacement();
+  Node* const conversion = graph()->NewNode(javascript()->ToNumber(), input,
+                                            jsgraph()->NoContextConstant(),
+                                            graph()->start(), graph()->start());
+  InsertConversion(conversion);
+  return conversion;
+}
+
+
+template <IrOpcode::Value kOpcode>
+Node* JSTypedLowering::FindConversion(Node* input) {
+  size_t const input_id = input->id();
+  if (input_id < conversions_.size()) {
+    Node* const conversion = conversions_[input_id];
+    if (conversion && conversion->opcode() == kOpcode) {
+      return conversion;
+    }
+  }
+  return nullptr;
+}
+
+
+void JSTypedLowering::InsertConversion(Node* conversion) {
+  DCHECK(conversion->opcode() == IrOpcode::kJSToBoolean ||
+         conversion->opcode() == IrOpcode::kJSToNumber);
+  size_t const input_id = conversion->InputAt(0)->id();
+  if (input_id >= conversions_.size()) {
+    conversions_.resize(2 * input_id + 1);
+  }
+  conversions_[input_id] = conversion;
+}
+
+
+Node* JSTypedLowering::Word32Shl(Node* const lhs, int32_t const rhs) {
+  if (rhs == 0) return lhs;
+  return graph()->NewNode(machine()->Word32Shl(), lhs,
+                          jsgraph()->Int32Constant(rhs));
+}
+
+
+Factory* JSTypedLowering::factory() const { return jsgraph()->factory(); }
+
+
+Graph* JSTypedLowering::graph() const { return jsgraph()->graph(); }
+
+
+JSOperatorBuilder* JSTypedLowering::javascript() const {
+  return jsgraph()->javascript();
+}
+
+
+CommonOperatorBuilder* JSTypedLowering::common() const {
+  return jsgraph()->common();
+}
+
+
+MachineOperatorBuilder* JSTypedLowering::machine() const {
+  return jsgraph()->machine();
+}
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
index 1b7529f..aa7510b 100644 (file)
@@ -6,58 +6,75 @@
 #define V8_COMPILER_JS_TYPED_LOWERING_H_
 
 #include "src/compiler/graph-reducer.h"
-#include "src/compiler/js-graph.h"
-#include "src/compiler/machine-operator.h"
-#include "src/compiler/node.h"
 #include "src/compiler/simplified-operator.h"
 
 namespace v8 {
 namespace internal {
 namespace compiler {
 
+// Forward declarations.
+class CommonOperatorBuilder;
+class JSGraph;
+class JSOperatorBuilder;
+class MachineOperatorBuilder;
+
+
 // Lowers JS-level operators to simplified operators based on types.
 class JSTypedLowering FINAL : public Reducer {
  public:
-  explicit JSTypedLowering(JSGraph* jsgraph);
-  virtual ~JSTypedLowering();
+  JSTypedLowering(JSGraph* jsgraph, Zone* zone);
+  ~JSTypedLowering() FINAL {}
 
-  virtual Reduction Reduce(Node* node) OVERRIDE;
-
-  JSGraph* jsgraph() { return jsgraph_; }
-  Graph* graph() { return jsgraph_->graph(); }
-  Zone* zone() { return jsgraph_->zone(); }
+  Reduction Reduce(Node* node) FINAL;
 
  private:
   friend class JSBinopReduction;
 
   Reduction ReplaceEagerly(Node* old, Node* node);
-  Reduction ReplaceWith(Node* node) { return Reducer::Replace(node); }
   Reduction ReduceJSAdd(Node* node);
   Reduction ReduceJSBitwiseOr(Node* node);
   Reduction ReduceJSMultiply(Node* node);
   Reduction ReduceJSComparison(Node* node);
   Reduction ReduceJSLoadProperty(Node* node);
   Reduction ReduceJSStoreProperty(Node* node);
+  Reduction ReduceJSLoadContext(Node* node);
+  Reduction ReduceJSStoreContext(Node* node);
   Reduction ReduceJSEqual(Node* node, bool invert);
   Reduction ReduceJSStrictEqual(Node* node, bool invert);
+  Reduction ReduceJSToBooleanInput(Node* input);
+  Reduction ReduceJSToBoolean(Node* node);
   Reduction ReduceJSToNumberInput(Node* input);
+  Reduction ReduceJSToNumber(Node* node);
   Reduction ReduceJSToStringInput(Node* input);
-  Reduction ReduceJSToBooleanInput(Node* input);
+  Reduction ReduceJSToString(Node* node);
   Reduction ReduceNumberBinop(Node* node, const Operator* numberOp);
-  Reduction ReduceI32Binop(Node* node, bool left_signed, bool right_signed,
-                           const Operator* intOp);
-  Reduction ReduceI32Shift(Node* node, bool left_signed,
-                           const Operator* shift_op);
+  Reduction ReduceInt32Binop(Node* node, const Operator* intOp);
+  Reduction ReduceUI32Shift(Node* node, Signedness left_signedness,
+                            const Operator* shift_op);
+
+  Node* ConvertToBoolean(Node* input);
+  Node* ConvertToNumber(Node* input);
+  template <IrOpcode::Value>
+  Node* FindConversion(Node* input);
+  void InsertConversion(Node* conversion);
+
+  Node* Word32Shl(Node* const lhs, int32_t const rhs);
 
-  JSOperatorBuilder* javascript() { return jsgraph_->javascript(); }
-  CommonOperatorBuilder* common() { return jsgraph_->common(); }
+  Factory* factory() const;
+  Graph* graph() const;
+  JSGraph* jsgraph() const { return jsgraph_; }
+  JSOperatorBuilder* javascript() const;
+  CommonOperatorBuilder* common() const;
   SimplifiedOperatorBuilder* simplified() { return &simplified_; }
-  MachineOperatorBuilder* machine() { return jsgraph_->machine(); }
+  MachineOperatorBuilder* machine() const;
 
   JSGraph* jsgraph_;
   SimplifiedOperatorBuilder simplified_;
+  ZoneVector<Node*> conversions_;  // Cache inserted JSToXXX() conversions.
   Type* zero_range_;
   Type* one_range_;
+  Type* zero_thirtyone_range_;
+  Type* shifted_int32_ranges_[4];
 };
 
 }  // namespace compiler
diff --git a/deps/v8/src/compiler/jump-threading.cc b/deps/v8/src/compiler/jump-threading.cc
new file mode 100644 (file)
index 0000000..f0bb731
--- /dev/null
@@ -0,0 +1,198 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/jump-threading.h"
+#include "src/compiler/code-generator-impl.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+typedef BasicBlock::RpoNumber RpoNumber;
+
+#define TRACE(x) \
+  if (FLAG_trace_turbo_jt) PrintF x
+
+struct JumpThreadingState {
+  bool forwarded;
+  ZoneVector<RpoNumber>& result;
+  ZoneStack<RpoNumber>& stack;
+
+  void Clear(size_t count) { result.assign(count, unvisited()); }
+  void PushIfUnvisited(RpoNumber num) {
+    if (result[num.ToInt()] == unvisited()) {
+      stack.push(num);
+      result[num.ToInt()] = onstack();
+    }
+  }
+  void Forward(RpoNumber to) {
+    RpoNumber from = stack.top();
+    RpoNumber to_to = result[to.ToInt()];
+    bool pop = true;
+    if (to == from) {
+      TRACE(("  xx %d\n", from.ToInt()));
+      result[from.ToInt()] = from;
+    } else if (to_to == unvisited()) {
+      TRACE(("  fw %d -> %d (recurse)\n", from.ToInt(), to.ToInt()));
+      stack.push(to);
+      result[to.ToInt()] = onstack();
+      pop = false;  // recurse.
+    } else if (to_to == onstack()) {
+      TRACE(("  fw %d -> %d (cycle)\n", from.ToInt(), to.ToInt()));
+      result[from.ToInt()] = to;  // break the cycle.
+      forwarded = true;
+    } else {
+      TRACE(("  fw %d -> %d (forward)\n", from.ToInt(), to.ToInt()));
+      result[from.ToInt()] = to_to;  // forward the block.
+      forwarded = true;
+    }
+    if (pop) stack.pop();
+  }
+  RpoNumber unvisited() { return RpoNumber::FromInt(-1); }
+  RpoNumber onstack() { return RpoNumber::FromInt(-2); }
+};
+
+
+bool JumpThreading::ComputeForwarding(Zone* local_zone,
+                                      ZoneVector<RpoNumber>& result,
+                                      InstructionSequence* code) {
+  ZoneStack<RpoNumber> stack(local_zone);
+  JumpThreadingState state = {false, result, stack};
+  state.Clear(code->InstructionBlockCount());
+
+  // Iterate over the blocks forward, pushing the blocks onto the stack.
+  for (auto const block : code->instruction_blocks()) {
+    RpoNumber current = block->rpo_number();
+    state.PushIfUnvisited(current);
+
+    // Process the stack, which implements DFS through empty blocks.
+    while (!state.stack.empty()) {
+      InstructionBlock* block = code->InstructionBlockAt(state.stack.top());
+      // Process the instructions in a block up to a non-empty instruction.
+      TRACE(("jt [%d] B%d RPO%d\n", static_cast<int>(stack.size()),
+             block->id().ToInt(), block->rpo_number().ToInt()));
+      bool fallthru = true;
+      RpoNumber fw = block->rpo_number();
+      for (int i = block->code_start(); i < block->code_end(); ++i) {
+        Instruction* instr = code->InstructionAt(i);
+        if (instr->IsGapMoves() && GapInstruction::cast(instr)->IsRedundant()) {
+          // skip redundant gap moves.
+          TRACE(("  nop gap\n"));
+          continue;
+        } else if (instr->IsSourcePosition()) {
+          // skip source positions.
+          TRACE(("  src pos\n"));
+          continue;
+        } else if (FlagsModeField::decode(instr->opcode()) != kFlags_none) {
+          // can't skip instructions with flags continuations.
+          TRACE(("  flags\n"));
+          fallthru = false;
+        } else if (instr->IsNop()) {
+          // skip nops.
+          TRACE(("  nop\n"));
+          continue;
+        } else if (instr->arch_opcode() == kArchJmp) {
+          // try to forward the jump instruction.
+          TRACE(("  jmp\n"));
+          fw = code->InputRpo(instr, 0);
+          fallthru = false;
+        } else {
+          // can't skip other instructions.
+          TRACE(("  other\n"));
+          fallthru = false;
+        }
+        break;
+      }
+      if (fallthru) {
+        int next = 1 + block->rpo_number().ToInt();
+        if (next < code->InstructionBlockCount()) fw = RpoNumber::FromInt(next);
+      }
+      state.Forward(fw);
+    }
+  }
+
+#ifdef DEBUG
+  for (RpoNumber num : result) {
+    CHECK(num.IsValid());
+  }
+#endif
+
+  if (FLAG_trace_turbo_jt) {
+    for (int i = 0; i < static_cast<int>(result.size()); i++) {
+      TRACE(("RPO%d B%d ", i,
+             code->InstructionBlockAt(RpoNumber::FromInt(i))->id().ToInt()));
+      int to = result[i].ToInt();
+      if (i != to) {
+        TRACE(("-> B%d\n",
+               code->InstructionBlockAt(RpoNumber::FromInt(to))->id().ToInt()));
+      } else {
+        TRACE(("\n"));
+      }
+    }
+  }
+
+  return state.forwarded;
+}
+
+
+void JumpThreading::ApplyForwarding(ZoneVector<RpoNumber>& result,
+                                    InstructionSequence* code) {
+  if (!FLAG_turbo_jt) return;
+
+  Zone local_zone(code->zone()->isolate());
+  ZoneVector<bool> skip(static_cast<int>(result.size()), false, &local_zone);
+
+  // Skip empty blocks when the previous block doesn't fall through.
+  bool prev_fallthru = true;
+  for (auto const block : code->instruction_blocks()) {
+    int block_num = block->rpo_number().ToInt();
+    skip[block_num] = !prev_fallthru && result[block_num].ToInt() != block_num;
+
+    bool fallthru = true;
+    for (int i = block->code_start(); i < block->code_end(); ++i) {
+      Instruction* instr = code->InstructionAt(i);
+      if (FlagsModeField::decode(instr->opcode()) == kFlags_branch) {
+        fallthru = false;  // branches don't fall through to the next block.
+      } else if (instr->arch_opcode() == kArchJmp) {
+        if (skip[block_num]) {
+          // Overwrite a redundant jump with a nop.
+          TRACE(("jt-fw nop @%d\n", i));
+          instr->OverwriteWithNop();
+        }
+        fallthru = false;  // jumps don't fall through to the next block.
+      }
+    }
+    prev_fallthru = fallthru;
+  }
+
+  // Patch RPO immediates.
+  InstructionSequence::Immediates& immediates = code->immediates();
+  for (size_t i = 0; i < immediates.size(); i++) {
+    Constant constant = immediates[i];
+    if (constant.type() == Constant::kRpoNumber) {
+      RpoNumber rpo = constant.ToRpoNumber();
+      RpoNumber fw = result[rpo.ToInt()];
+      if (!(fw == rpo)) immediates[i] = Constant(fw);
+    }
+  }
+
+  // Recompute assembly order numbers.
+  int ao = 0;
+  for (auto const block : code->instruction_blocks()) {
+    if (!block->IsDeferred()) {
+      block->set_ao_number(RpoNumber::FromInt(ao));
+      if (!skip[block->rpo_number().ToInt()]) ao++;
+    }
+  }
+  for (auto const block : code->instruction_blocks()) {
+    if (block->IsDeferred()) {
+      block->set_ao_number(RpoNumber::FromInt(ao));
+      if (!skip[block->rpo_number().ToInt()]) ao++;
+    }
+  }
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/deps/v8/src/compiler/jump-threading.h b/deps/v8/src/compiler/jump-threading.h
new file mode 100644 (file)
index 0000000..b801fec
--- /dev/null
@@ -0,0 +1,34 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_JUMP_THREADING_H_
+#define V8_COMPILER_JUMP_THREADING_H_
+
+#include "src/compiler/instruction.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// Forwards jumps to empty basic blocks that end with a second jump to the
+// destination of the second jump, transitively.
+class JumpThreading {
+ public:
+  // Compute the forwarding map of basic blocks to their ultimate destination.
+  // Returns {true} if there is at least one block that is forwarded.
+  static bool ComputeForwarding(Zone* local_zone,
+                                ZoneVector<BasicBlock::RpoNumber>& result,
+                                InstructionSequence* code);
+
+  // Rewrite the instructions to forward jumps and branches.
+  // May also negate some branches.
+  static void ApplyForwarding(ZoneVector<BasicBlock::RpoNumber>& forwarding,
+                              InstructionSequence* code);
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_JUMP_THREADING_H
index c964eee..c13bd74 100644 (file)
@@ -134,7 +134,8 @@ class LinkageHelper {
   // TODO(turbofan): cache call descriptors for code stub calls.
   static CallDescriptor* GetStubCallDescriptor(
       Zone* zone, const CallInterfaceDescriptor& descriptor,
-      int stack_parameter_count, CallDescriptor::Flags flags) {
+      int stack_parameter_count, CallDescriptor::Flags flags,
+      Operator::Properties properties) {
     const int register_parameter_count =
         descriptor.GetEnvironmentParameterCount();
     const int js_parameter_count =
@@ -178,7 +179,7 @@ class LinkageHelper {
         types.Build(),                    // machine_sig
         locations.Build(),                // location_sig
         js_parameter_count,               // js_parameter_count
-        Operator::kNoProperties,          // properties
+        properties,                       // properties
         kNoCalleeSaved,                   // callee-saved registers
         flags,                            // flags
         descriptor.DebugName(zone->isolate()));
index a97e484..fc6b19e 100644 (file)
@@ -56,7 +56,8 @@ CallDescriptor* Linkage::ComputeIncoming(Zone* zone, CompilationInfo* info) {
     // Use the code stub interface descriptor.
     CallInterfaceDescriptor descriptor =
         info->code_stub()->GetCallInterfaceDescriptor();
-    return GetStubCallDescriptor(descriptor, 0, CallDescriptor::kNoFlags, zone);
+    return GetStubCallDescriptor(descriptor, 0, CallDescriptor::kNoFlags,
+                                 Operator::kNoProperties, zone);
   }
   return NULL;  // TODO(titzer): ?
 }
@@ -105,8 +106,9 @@ CallDescriptor* Linkage::GetRuntimeCallDescriptor(
 
 CallDescriptor* Linkage::GetStubCallDescriptor(
     const CallInterfaceDescriptor& descriptor, int stack_parameter_count,
-    CallDescriptor::Flags flags) const {
-  return GetStubCallDescriptor(descriptor, stack_parameter_count, flags, zone_);
+    CallDescriptor::Flags flags, Operator::Properties properties) const {
+  return GetStubCallDescriptor(descriptor, stack_parameter_count, flags,
+                               properties, zone_);
 }
 
 
@@ -183,9 +185,10 @@ bool Linkage::NeedsFrameState(Runtime::FunctionId function) {
     case Runtime::kPreventExtensions:
     case Runtime::kPromiseRejectEvent:
     case Runtime::kPromiseRevokeReject:
-    case Runtime::kRegExpCompile:
+    case Runtime::kRegExpInitializeAndCompile:
     case Runtime::kRegExpExecMultiple:
     case Runtime::kResolvePossiblyDirectEval:
+    case Runtime::kRunMicrotasks:
     case Runtime::kSetPrototype:
     case Runtime::kSetScriptBreakPoint:
     case Runtime::kSparseJoinWithSeparator:
@@ -237,7 +240,8 @@ CallDescriptor* Linkage::GetRuntimeCallDescriptor(
 
 CallDescriptor* Linkage::GetStubCallDescriptor(
     const CallInterfaceDescriptor& descriptor, int stack_parameter_count,
-    CallDescriptor::Flags flags, Zone* zone) {
+    CallDescriptor::Flags flags, Operator::Properties properties,
+    Zone* zone) {
   UNIMPLEMENTED();
   return NULL;
 }
index 60a2142..0ad0761 100644 (file)
@@ -187,10 +187,11 @@ class Linkage : public ZoneObject {
 
   CallDescriptor* GetStubCallDescriptor(
       const CallInterfaceDescriptor& descriptor, int stack_parameter_count = 0,
-      CallDescriptor::Flags flags = CallDescriptor::kNoFlags) const;
+      CallDescriptor::Flags flags = CallDescriptor::kNoFlags,
+      Operator::Properties properties = Operator::kNoProperties) const;
   static CallDescriptor* GetStubCallDescriptor(
       const CallInterfaceDescriptor& descriptor, int stack_parameter_count,
-      CallDescriptor::Flags flags, Zone* zone);
+      CallDescriptor::Flags flags, Operator::Properties properties, Zone* zone);
 
   // Creates a call descriptor for simplified C calls that is appropriate
   // for the host platform. This simplified calling convention only supports
diff --git a/deps/v8/src/compiler/load-elimination.cc b/deps/v8/src/compiler/load-elimination.cc
new file mode 100644 (file)
index 0000000..fe0714e
--- /dev/null
@@ -0,0 +1,76 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/load-elimination.h"
+
+#include "src/compiler/node-properties-inl.h"
+#include "src/compiler/simplified-operator.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+LoadElimination::~LoadElimination() {}
+
+
+Reduction LoadElimination::Reduce(Node* node) {
+  switch (node->opcode()) {
+    case IrOpcode::kLoadField:
+      return ReduceLoadField(node);
+    default:
+      break;
+  }
+  return NoChange();
+}
+
+
+Reduction LoadElimination::ReduceLoadField(Node* node) {
+  DCHECK_EQ(IrOpcode::kLoadField, node->opcode());
+  FieldAccess const access = FieldAccessOf(node->op());
+  Node* const object = NodeProperties::GetValueInput(node, 0);
+  for (Node* effect = NodeProperties::GetEffectInput(node);;
+       effect = NodeProperties::GetEffectInput(effect)) {
+    switch (effect->opcode()) {
+      case IrOpcode::kLoadField: {
+        if (object == NodeProperties::GetValueInput(effect, 0) &&
+            access == FieldAccessOf(effect->op())) {
+          Node* const value = effect;
+          NodeProperties::ReplaceWithValue(node, value);
+          return Replace(value);
+        }
+        break;
+      }
+      case IrOpcode::kStoreField: {
+        if (access == FieldAccessOf(effect->op())) {
+          if (object == NodeProperties::GetValueInput(effect, 0)) {
+            Node* const value = NodeProperties::GetValueInput(effect, 1);
+            NodeProperties::ReplaceWithValue(node, value);
+            return Replace(value);
+          }
+          // TODO(turbofan): Alias analysis to the rescue?
+          return NoChange();
+        }
+        break;
+      }
+      case IrOpcode::kStoreBuffer:
+      case IrOpcode::kStoreElement: {
+        // These can never interfere with field loads.
+        break;
+      }
+      default: {
+        if (!effect->op()->HasProperty(Operator::kNoWrite) ||
+            effect->op()->EffectInputCount() != 1) {
+          return NoChange();
+        }
+        break;
+      }
+    }
+  }
+  UNREACHABLE();
+  return NoChange();
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/deps/v8/src/compiler/load-elimination.h b/deps/v8/src/compiler/load-elimination.h
new file mode 100644 (file)
index 0000000..6917ce3
--- /dev/null
@@ -0,0 +1,29 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_LOAD_ELIMINATION_H_
+#define V8_COMPILER_LOAD_ELIMINATION_H_
+
+#include "src/compiler/graph-reducer.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class LoadElimination FINAL : public Reducer {
+ public:
+  LoadElimination() {}
+  ~LoadElimination() FINAL;
+
+  Reduction Reduce(Node* node) FINAL;
+
+ private:
+  Reduction ReduceLoadField(Node* node);
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_LOAD_ELIMINATION_H_
diff --git a/deps/v8/src/compiler/loop-analysis.cc b/deps/v8/src/compiler/loop-analysis.cc
new file mode 100644 (file)
index 0000000..e1b703e
--- /dev/null
@@ -0,0 +1,411 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/graph.h"
+#include "src/compiler/loop-analysis.h"
+#include "src/compiler/node.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+typedef uint32_t LoopMarks;
+
+
+// TODO(titzer): don't assume entry edges have a particular index.
+// TODO(titzer): use a BitMatrix to generalize this algorithm.
+static const size_t kMaxLoops = 31;
+static const int kAssumedLoopEntryIndex = 0;  // assume loops are entered here.
+static const LoopMarks kVisited = 1;          // loop #0 is reserved.
+
+
+// Temporary information for each node during marking.
+struct NodeInfo {
+  Node* node;
+  NodeInfo* next;       // link in chaining loop members
+  LoopMarks forward;    // accumulated marks in the forward direction
+  LoopMarks backward;   // accumulated marks in the backward direction
+  LoopMarks loop_mark;  // loop mark for header nodes; encodes loop_num
+
+  bool MarkBackward(LoopMarks bw) {
+    LoopMarks prev = backward;
+    LoopMarks next = backward | bw;
+    backward = next;
+    return prev != next;
+  }
+
+  bool MarkForward(LoopMarks fw) {
+    LoopMarks prev = forward;
+    LoopMarks next = forward | fw;
+    forward = next;
+    return prev != next;
+  }
+
+  bool IsInLoop(size_t loop_num) {
+    DCHECK(loop_num > 0 && loop_num <= 31);
+    return forward & backward & (1 << loop_num);
+  }
+
+  bool IsLoopHeader() { return loop_mark != 0; }
+  bool IsInAnyLoop() { return (forward & backward) > kVisited; }
+
+  bool IsInHeaderForLoop(size_t loop_num) {
+    DCHECK(loop_num > 0);
+    return loop_mark == (kVisited | (1 << loop_num));
+  }
+};
+
+
+// Temporary loop info needed during traversal and building the loop tree.
+struct LoopInfo {
+  Node* header;
+  NodeInfo* header_list;
+  NodeInfo* body_list;
+  LoopTree::Loop* loop;
+};
+
+
+static const NodeInfo kEmptyNodeInfo = {nullptr, nullptr, 0, 0, 0};
+
+
+// Encapsulation of the loop finding algorithm.
+// -----------------------------------------------------------------------------
+// Conceptually, the contents of a loop are those nodes that are "between" the
+// loop header and the backedges of the loop. Graphs in the soup of nodes can
+// form improper cycles, so standard loop finding algorithms that work on CFGs
+// aren't sufficient. However, in valid TurboFan graphs, all cycles involve
+// either a {Loop} node or a phi. The {Loop} node itself and its accompanying
+// phis are treated together as a set referred to here as the loop header.
+// This loop finding algorithm works by traversing the graph in two directions,
+// first from nodes to their inputs, starting at {end}, then in the reverse
+// direction, from nodes to their uses, starting at loop headers.
+// 1 bit per loop per node per direction are required during the marking phase.
+// To handle nested loops correctly, the algorithm must filter some reachability
+// marks on edges into/out-of the loop header nodes.
+class LoopFinderImpl {
+ public:
+  LoopFinderImpl(Graph* graph, LoopTree* loop_tree, Zone* zone)
+      : end_(graph->end()),
+        queue_(zone),
+        queued_(graph, 2),
+        info_(graph->NodeCount(), kEmptyNodeInfo, zone),
+        loops_(zone),
+        loop_tree_(loop_tree),
+        loops_found_(0) {}
+
+  void Run() {
+    PropagateBackward();
+    PropagateForward();
+    FinishLoopTree();
+  }
+
+  void Print() {
+    // Print out the results.
+    for (NodeInfo& ni : info_) {
+      if (ni.node == nullptr) continue;
+      for (size_t i = 1; i <= loops_.size(); i++) {
+        if (ni.IsInLoop(i)) {
+          PrintF("X");
+        } else if (ni.forward & (1 << i)) {
+          PrintF("/");
+        } else if (ni.backward & (1 << i)) {
+          PrintF("\\");
+        } else {
+          PrintF(" ");
+        }
+      }
+      PrintF(" #%d:%s\n", ni.node->id(), ni.node->op()->mnemonic());
+    }
+
+    int i = 0;
+    for (LoopInfo& li : loops_) {
+      PrintF("Loop %d headed at #%d\n", i, li.header->id());
+      i++;
+    }
+
+    for (LoopTree::Loop* loop : loop_tree_->outer_loops_) {
+      PrintLoop(loop);
+    }
+  }
+
+ private:
+  Node* end_;
+  NodeDeque queue_;
+  NodeMarker<bool> queued_;
+  ZoneVector<NodeInfo> info_;
+  ZoneVector<LoopInfo> loops_;
+  LoopTree* loop_tree_;
+  size_t loops_found_;
+
+  // Propagate marks backward from loop headers.
+  void PropagateBackward() {
+    PropagateBackward(end_, kVisited);
+
+    while (!queue_.empty()) {
+      Node* node = queue_.front();
+      queue_.pop_front();
+      queued_.Set(node, false);
+
+      // Setup loop headers first.
+      if (node->opcode() == IrOpcode::kLoop) {
+        // found the loop node first.
+        CreateLoopInfo(node);
+      } else if (node->opcode() == IrOpcode::kPhi ||
+                 node->opcode() == IrOpcode::kEffectPhi) {
+        // found a phi first.
+        Node* merge = node->InputAt(node->InputCount() - 1);
+        if (merge->opcode() == IrOpcode::kLoop) CreateLoopInfo(merge);
+      }
+
+      // Propagate reachability marks backwards from this node.
+      NodeInfo& ni = info(node);
+      if (ni.IsLoopHeader()) {
+        // Handle edges from loop header nodes specially.
+        for (int i = 0; i < node->InputCount(); i++) {
+          if (i == kAssumedLoopEntryIndex) {
+            // Don't propagate the loop mark backwards on the entry edge.
+            PropagateBackward(node->InputAt(0),
+                              kVisited | (ni.backward & ~ni.loop_mark));
+          } else {
+            // Only propagate the loop mark on backedges.
+            PropagateBackward(node->InputAt(i), ni.loop_mark);
+          }
+        }
+      } else {
+        // Propagate all loop marks backwards for a normal node.
+        for (Node* const input : node->inputs()) {
+          PropagateBackward(input, ni.backward);
+        }
+      }
+    }
+  }
+
+  // Make a new loop header for the given node.
+  void CreateLoopInfo(Node* node) {
+    NodeInfo& ni = info(node);
+    if (ni.IsLoopHeader()) return;  // loop already set up.
+
+    loops_found_++;
+    size_t loop_num = loops_.size() + 1;
+    CHECK(loops_found_ <= kMaxLoops);  // TODO(titzer): don't crash.
+    // Create a new loop.
+    loops_.push_back({node, nullptr, nullptr, nullptr});
+    loop_tree_->NewLoop();
+    LoopMarks loop_mark = kVisited | (1 << loop_num);
+    ni.node = node;
+    ni.loop_mark = loop_mark;
+
+    // Setup loop mark for phis attached to loop header.
+    for (Node* use : node->uses()) {
+      if (use->opcode() == IrOpcode::kPhi ||
+          use->opcode() == IrOpcode::kEffectPhi) {
+        info(use).loop_mark = loop_mark;
+      }
+    }
+  }
+
+  // Propagate marks forward from loops.
+  void PropagateForward() {
+    for (LoopInfo& li : loops_) {
+      queued_.Set(li.header, true);
+      queue_.push_back(li.header);
+      NodeInfo& ni = info(li.header);
+      ni.forward = ni.loop_mark;
+    }
+    // Propagate forward on paths that were backward reachable from backedges.
+    while (!queue_.empty()) {
+      Node* node = queue_.front();
+      queue_.pop_front();
+      queued_.Set(node, false);
+      NodeInfo& ni = info(node);
+      for (Edge edge : node->use_edges()) {
+        Node* use = edge.from();
+        NodeInfo& ui = info(use);
+        if (IsBackedge(use, ui, edge)) continue;  // skip backedges.
+        LoopMarks both = ni.forward & ui.backward;
+        if (ui.MarkForward(both) && !queued_.Get(use)) {
+          queued_.Set(use, true);
+          queue_.push_back(use);
+        }
+      }
+    }
+  }
+
+  bool IsBackedge(Node* use, NodeInfo& ui, Edge& edge) {
+    // TODO(titzer): checking for backedges here is ugly.
+    if (!ui.IsLoopHeader()) return false;
+    if (edge.index() == kAssumedLoopEntryIndex) return false;
+    if (use->opcode() == IrOpcode::kPhi ||
+        use->opcode() == IrOpcode::kEffectPhi) {
+      return !NodeProperties::IsControlEdge(edge);
+    }
+    return true;
+  }
+
+  NodeInfo& info(Node* node) {
+    NodeInfo& i = info_[node->id()];
+    if (i.node == nullptr) i.node = node;
+    return i;
+  }
+
+  void PropagateBackward(Node* node, LoopMarks marks) {
+    if (info(node).MarkBackward(marks) && !queued_.Get(node)) {
+      queue_.push_back(node);
+      queued_.Set(node, true);
+    }
+  }
+
+  void FinishLoopTree() {
+    // Degenerate cases.
+    if (loops_.size() == 0) return;
+    if (loops_.size() == 1) return FinishSingleLoop();
+
+    for (size_t i = 1; i <= loops_.size(); i++) ConnectLoopTree(i);
+
+    size_t count = 0;
+    // Place the node into the innermost nested loop of which it is a member.
+    for (NodeInfo& ni : info_) {
+      if (ni.node == nullptr || !ni.IsInAnyLoop()) continue;
+
+      LoopInfo* innermost = nullptr;
+      size_t index = 0;
+      for (size_t i = 1; i <= loops_.size(); i++) {
+        if (ni.IsInLoop(i)) {
+          LoopInfo* loop = &loops_[i - 1];
+          if (innermost == nullptr ||
+              loop->loop->depth_ > innermost->loop->depth_) {
+            innermost = loop;
+            index = i;
+          }
+        }
+      }
+      if (ni.IsInHeaderForLoop(index)) {
+        ni.next = innermost->header_list;
+        innermost->header_list = &ni;
+      } else {
+        ni.next = innermost->body_list;
+        innermost->body_list = &ni;
+      }
+      count++;
+    }
+
+    // Serialize the node lists for loops into the loop tree.
+    loop_tree_->loop_nodes_.reserve(count);
+    for (LoopTree::Loop* loop : loop_tree_->outer_loops_) {
+      SerializeLoop(loop);
+    }
+  }
+
+  // Handle the simpler case of a single loop (no checks for nesting necessary).
+  void FinishSingleLoop() {
+    DCHECK(loops_.size() == 1);
+    DCHECK(loop_tree_->all_loops_.size() == 1);
+
+    // Place nodes into the loop header and body.
+    LoopInfo* li = &loops_[0];
+    li->loop = &loop_tree_->all_loops_[0];
+    loop_tree_->SetParent(nullptr, li->loop);
+    size_t count = 0;
+    for (NodeInfo& ni : info_) {
+      if (ni.node == nullptr || !ni.IsInAnyLoop()) continue;
+      DCHECK(ni.IsInLoop(1));
+      if (ni.IsInHeaderForLoop(1)) {
+        ni.next = li->header_list;
+        li->header_list = &ni;
+      } else {
+        ni.next = li->body_list;
+        li->body_list = &ni;
+      }
+      count++;
+    }
+
+    // Serialize the node lists for the loop into the loop tree.
+    loop_tree_->loop_nodes_.reserve(count);
+    SerializeLoop(li->loop);
+  }
+
+  // Recursively serialize the list of header nodes and body nodes
+  // so that nested loops occupy nested intervals.
+  void SerializeLoop(LoopTree::Loop* loop) {
+    size_t loop_num = loop_tree_->LoopNum(loop);
+    LoopInfo& li = loops_[loop_num - 1];
+
+    // Serialize the header.
+    loop->header_start_ = static_cast<int>(loop_tree_->loop_nodes_.size());
+    for (NodeInfo* ni = li.header_list; ni != nullptr; ni = ni->next) {
+      loop_tree_->loop_nodes_.push_back(ni->node);
+      // TODO(titzer): lift loop count restriction.
+      loop_tree_->node_to_loop_num_[ni->node->id()] =
+          static_cast<uint8_t>(loop_num);
+    }
+
+    // Serialize the body.
+    loop->body_start_ = static_cast<int>(loop_tree_->loop_nodes_.size());
+    for (NodeInfo* ni = li.body_list; ni != nullptr; ni = ni->next) {
+      loop_tree_->loop_nodes_.push_back(ni->node);
+      // TODO(titzer): lift loop count restriction.
+      loop_tree_->node_to_loop_num_[ni->node->id()] =
+          static_cast<uint8_t>(loop_num);
+    }
+
+    // Serialize nested loops.
+    for (LoopTree::Loop* child : loop->children_) SerializeLoop(child);
+
+    loop->body_end_ = static_cast<int>(loop_tree_->loop_nodes_.size());
+  }
+
+  // Connect the LoopTree loops to their parents recursively.
+  LoopTree::Loop* ConnectLoopTree(size_t loop_num) {
+    LoopInfo& li = loops_[loop_num - 1];
+    if (li.loop != nullptr) return li.loop;
+
+    NodeInfo& ni = info(li.header);
+    LoopTree::Loop* parent = nullptr;
+    for (size_t i = 1; i <= loops_.size(); i++) {
+      if (i == loop_num) continue;
+      if (ni.IsInLoop(i)) {
+        // recursively create potential parent loops first.
+        LoopTree::Loop* upper = ConnectLoopTree(i);
+        if (parent == nullptr || upper->depth_ > parent->depth_) {
+          parent = upper;
+        }
+      }
+    }
+    li.loop = &loop_tree_->all_loops_[loop_num - 1];
+    loop_tree_->SetParent(parent, li.loop);
+    return li.loop;
+  }
+
+  void PrintLoop(LoopTree::Loop* loop) {
+    for (int i = 0; i < loop->depth_; i++) PrintF("  ");
+    PrintF("Loop depth = %d ", loop->depth_);
+    int i = loop->header_start_;
+    while (i < loop->body_start_) {
+      PrintF(" H#%d", loop_tree_->loop_nodes_[i++]->id());
+    }
+    while (i < loop->body_end_) {
+      PrintF(" B#%d", loop_tree_->loop_nodes_[i++]->id());
+    }
+    PrintF("\n");
+    for (LoopTree::Loop* child : loop->children_) PrintLoop(child);
+  }
+};
+
+
+LoopTree* LoopFinder::BuildLoopTree(Graph* graph, Zone* zone) {
+  LoopTree* loop_tree =
+      new (graph->zone()) LoopTree(graph->NodeCount(), graph->zone());
+  LoopFinderImpl finder(graph, loop_tree, zone);
+  finder.Run();
+  if (FLAG_trace_turbo_graph) {
+    finder.Print();
+  }
+  return loop_tree;
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/deps/v8/src/compiler/loop-analysis.h b/deps/v8/src/compiler/loop-analysis.h
new file mode 100644 (file)
index 0000000..8c8d19a
--- /dev/null
@@ -0,0 +1,135 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_LOOP_ANALYSIS_H_
+#define V8_COMPILER_LOOP_ANALYSIS_H_
+
+#include "src/base/iterator.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/node.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class LoopFinderImpl;
+
+typedef base::iterator_range<Node**> NodeRange;
+
+// Represents a tree of loops in a graph.
+class LoopTree : public ZoneObject {
+ public:
+  LoopTree(size_t num_nodes, Zone* zone)
+      : zone_(zone),
+        outer_loops_(zone),
+        all_loops_(zone),
+        node_to_loop_num_(static_cast<int>(num_nodes), 0, zone),
+        loop_nodes_(zone) {}
+
+  // Represents a loop in the tree of loops, including the header nodes,
+  // the body, and any nested loops.
+  class Loop {
+   public:
+    Loop* parent() const { return parent_; }
+    const ZoneVector<Loop*>& children() const { return children_; }
+    size_t HeaderSize() const { return body_start_ - header_start_; }
+    size_t BodySize() const { return body_end_ - body_start_; }
+    size_t TotalSize() const { return body_end_ - header_start_; }
+
+   private:
+    friend class LoopTree;
+    friend class LoopFinderImpl;
+
+    explicit Loop(Zone* zone)
+        : parent_(nullptr),
+          depth_(0),
+          children_(zone),
+          header_start_(-1),
+          body_start_(-1),
+          body_end_(-1) {}
+    Loop* parent_;
+    int depth_;
+    ZoneVector<Loop*> children_;
+    int header_start_;
+    int body_start_;
+    int body_end_;
+  };
+
+  // Return the innermost nested loop, if any, that contains {node}.
+  Loop* ContainingLoop(Node* node) {
+    if (node->id() >= static_cast<int>(node_to_loop_num_.size()))
+      return nullptr;
+    uint8_t num = node_to_loop_num_[node->id()];
+    return num > 0 ? &all_loops_[num - 1] : nullptr;
+  }
+
+  // Check if the {loop} contains the {node}, either directly or by containing
+  // a nested loop that contains {node}.
+  bool Contains(Loop* loop, Node* node) {
+    for (Loop* c = ContainingLoop(node); c != nullptr; c = c->parent_) {
+      if (c == loop) return true;
+    }
+    return false;
+  }
+
+  // Return the list of outer loops.
+  const ZoneVector<Loop*>& outer_loops() const { return outer_loops_; }
+
+  // Return the unique loop number for a given loop. Loop numbers start at {1}.
+  int LoopNum(Loop* loop) const {
+    return 1 + static_cast<int>(loop - &all_loops_[0]);
+  }
+
+  // Return a range which can iterate over the header nodes of {loop}.
+  NodeRange HeaderNodes(Loop* loop) {
+    return NodeRange(&loop_nodes_[0] + loop->header_start_,
+                     &loop_nodes_[0] + loop->body_start_);
+  }
+
+  // Return a range which can iterate over the body nodes of {loop}.
+  NodeRange BodyNodes(Loop* loop) {
+    return NodeRange(&loop_nodes_[0] + loop->body_start_,
+                     &loop_nodes_[0] + loop->body_end_);
+  }
+
+ private:
+  friend class LoopFinderImpl;
+
+  Loop* NewLoop() {
+    all_loops_.push_back(Loop(zone_));
+    Loop* result = &all_loops_.back();
+    return result;
+  }
+
+  void SetParent(Loop* parent, Loop* child) {
+    if (parent != nullptr) {
+      parent->children_.push_back(child);
+      child->parent_ = parent;
+      child->depth_ = parent->depth_ + 1;
+    } else {
+      outer_loops_.push_back(child);
+    }
+  }
+
+  Zone* zone_;
+  ZoneVector<Loop*> outer_loops_;
+  ZoneVector<Loop> all_loops_;
+  // TODO(titzer): lift loop count restriction.
+  ZoneVector<uint8_t> node_to_loop_num_;
+  ZoneVector<Node*> loop_nodes_;
+};
+
+
+class LoopFinder {
+ public:
+  // Build a loop tree for the entire graph.
+  static LoopTree* BuildLoopTree(Graph* graph, Zone* temp_zone);
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_LOOP_ANALYSIS_H_
index bb1596f..c3e45a1 100644 (file)
@@ -8,7 +8,6 @@
 #include "src/base/division-by-constant.h"
 #include "src/codegen.h"
 #include "src/compiler/diamond.h"
-#include "src/compiler/generic-node-inl.h"
 #include "src/compiler/graph.h"
 #include "src/compiler/js-graph.h"
 #include "src/compiler/node-matchers.h"
@@ -44,8 +43,10 @@ Node* MachineOperatorReducer::Int64Constant(int64_t value) {
 }
 
 
-Node* MachineOperatorReducer::Word32And(Node* lhs, uint32_t rhs) {
-  return graph()->NewNode(machine()->Word32And(), lhs, Uint32Constant(rhs));
+Node* MachineOperatorReducer::Word32And(Node* lhs, Node* rhs) {
+  Node* const node = graph()->NewNode(machine()->Word32And(), lhs, rhs);
+  Reduction const reduction = ReduceWord32And(node);
+  return reduction.Changed() ? reduction.replacement() : node;
 }
 
 
@@ -67,7 +68,9 @@ Node* MachineOperatorReducer::Word32Equal(Node* lhs, Node* rhs) {
 
 
 Node* MachineOperatorReducer::Int32Add(Node* lhs, Node* rhs) {
-  return graph()->NewNode(machine()->Int32Add(), lhs, rhs);
+  Node* const node = graph()->NewNode(machine()->Int32Add(), lhs, rhs);
+  Reduction const reduction = ReduceInt32Add(node);
+  return reduction.Changed() ? reduction.replacement() : node;
 }
 
 
@@ -120,85 +123,10 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
   switch (node->opcode()) {
     case IrOpcode::kProjection:
       return ReduceProjection(OpParameter<size_t>(node), node->InputAt(0));
-    case IrOpcode::kWord32And: {
-      Int32BinopMatcher m(node);
-      if (m.right().Is(0)) return Replace(m.right().node());  // x & 0  => 0
-      if (m.right().Is(-1)) return Replace(m.left().node());  // x & -1 => x
-      if (m.IsFoldable()) {                                   // K & K  => K
-        return ReplaceInt32(m.left().Value() & m.right().Value());
-      }
-      if (m.LeftEqualsRight()) return Replace(m.left().node());  // x & x => x
-      if (m.left().IsWord32And() && m.right().HasValue()) {
-        Int32BinopMatcher mleft(m.left().node());
-        if (mleft.right().HasValue()) {  // (x & K) & K => x & K
-          node->ReplaceInput(0, mleft.left().node());
-          node->ReplaceInput(
-              1, Int32Constant(m.right().Value() & mleft.right().Value()));
-          return Changed(node);
-        }
-      }
-      break;
-    }
-    case IrOpcode::kWord32Or: {
-      Int32BinopMatcher m(node);
-      if (m.right().Is(0)) return Replace(m.left().node());    // x | 0  => x
-      if (m.right().Is(-1)) return Replace(m.right().node());  // x | -1 => -1
-      if (m.IsFoldable()) {                                    // K | K  => K
-        return ReplaceInt32(m.left().Value() | m.right().Value());
-      }
-      if (m.LeftEqualsRight()) return Replace(m.left().node());  // x | x => x
-      if (m.left().IsWord32Shl() && m.right().IsWord32Shr()) {
-        Int32BinopMatcher mleft(m.left().node());
-        Int32BinopMatcher mright(m.right().node());
-        if (mleft.left().node() == mright.left().node()) {
-          // (x << y) | (x >> (32 - y)) => x ror y
-          if (mright.right().IsInt32Sub()) {
-            Int32BinopMatcher mrightright(mright.right().node());
-            if (mrightright.left().Is(32) &&
-                mrightright.right().node() == mleft.right().node()) {
-              node->set_op(machine()->Word32Ror());
-              node->ReplaceInput(0, mleft.left().node());
-              node->ReplaceInput(1, mleft.right().node());
-              return Changed(node);
-            }
-          }
-          // (x << K) | (x >> (32 - K)) => x ror K
-          if (mleft.right().IsInRange(0, 31) &&
-              mright.right().Is(32 - mleft.right().Value())) {
-            node->set_op(machine()->Word32Ror());
-            node->ReplaceInput(0, mleft.left().node());
-            node->ReplaceInput(1, mleft.right().node());
-            return Changed(node);
-          }
-        }
-      }
-      if (m.left().IsWord32Shr() && m.right().IsWord32Shl()) {
-        // (x >> (32 - y)) | (x << y)  => x ror y
-        Int32BinopMatcher mleft(m.left().node());
-        Int32BinopMatcher mright(m.right().node());
-        if (mleft.left().node() == mright.left().node()) {
-          if (mleft.right().IsInt32Sub()) {
-            Int32BinopMatcher mleftright(mleft.right().node());
-            if (mleftright.left().Is(32) &&
-                mleftright.right().node() == mright.right().node()) {
-              node->set_op(machine()->Word32Ror());
-              node->ReplaceInput(0, mright.left().node());
-              node->ReplaceInput(1, mright.right().node());
-              return Changed(node);
-            }
-          }
-          // (x >> (32 - K)) | (x << K) => x ror K
-          if (mright.right().IsInRange(0, 31) &&
-              mleft.right().Is(32 - mright.right().Value())) {
-            node->set_op(machine()->Word32Ror());
-            node->ReplaceInput(0, mright.left().node());
-            node->ReplaceInput(1, mright.right().node());
-            return Changed(node);
-          }
-        }
-      }
-      break;
-    }
+    case IrOpcode::kWord32And:
+      return ReduceWord32And(node);
+    case IrOpcode::kWord32Or:
+      return ReduceWord32Or(node);
     case IrOpcode::kWord32Xor: {
       Int32BinopMatcher m(node);
       if (m.right().Is(0)) return Replace(m.left().node());  // x ^ 0 => x
@@ -214,35 +142,15 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
       }
       break;
     }
-    case IrOpcode::kWord32Shl: {
-      Int32BinopMatcher m(node);
-      if (m.right().Is(0)) return Replace(m.left().node());  // x << 0 => x
-      if (m.IsFoldable()) {                                  // K << K => K
-        return ReplaceInt32(m.left().Value() << m.right().Value());
-      }
-      if (m.right().IsInRange(1, 31)) {
-        // (x >>> K) << K => x & ~(2^K - 1)
-        // (x >> K) << K => x & ~(2^K - 1)
-        if (m.left().IsWord32Sar() || m.left().IsWord32Shr()) {
-          Int32BinopMatcher mleft(m.left().node());
-          if (mleft.right().Is(m.right().Value())) {
-            node->set_op(machine()->Word32And());
-            node->ReplaceInput(0, mleft.left().node());
-            node->ReplaceInput(
-                1, Uint32Constant(~((1U << m.right().Value()) - 1U)));
-            return Changed(node);
-          }
-        }
-      }
-      break;
-    }
+    case IrOpcode::kWord32Shl:
+      return ReduceWord32Shl(node);
     case IrOpcode::kWord32Shr: {
       Uint32BinopMatcher m(node);
       if (m.right().Is(0)) return Replace(m.left().node());  // x >>> 0 => x
       if (m.IsFoldable()) {                                  // K >>> K => K
         return ReplaceInt32(m.left().Value() >> m.right().Value());
       }
-      break;
+      return ReduceWord32Shifts(node);
     }
     case IrOpcode::kWord32Sar: {
       Int32BinopMatcher m(node);
@@ -250,7 +158,22 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
       if (m.IsFoldable()) {                                  // K >> K => K
         return ReplaceInt32(m.left().Value() >> m.right().Value());
       }
-      break;
+      if (m.left().IsWord32Shl()) {
+        Int32BinopMatcher mleft(m.left().node());
+        if (mleft.left().IsLoad()) {
+          LoadRepresentation const rep =
+              OpParameter<LoadRepresentation>(mleft.left().node());
+          if (m.right().Is(24) && mleft.right().Is(24) && rep == kMachInt8) {
+            // Load[kMachInt8] << 24 >> 24 => Load[kMachInt8]
+            return Replace(mleft.left().node());
+          }
+          if (m.right().Is(16) && mleft.right().Is(16) && rep == kMachInt16) {
+            // Load[kMachInt16] << 16 >> 16 => Load[kMachInt8]
+            return Replace(mleft.left().node());
+          }
+        }
+      }
+      return ReduceWord32Shifts(node);
     }
     case IrOpcode::kWord32Ror: {
       Int32BinopMatcher m(node);
@@ -291,15 +214,8 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
       if (m.LeftEqualsRight()) return ReplaceBool(true);  // x == x => true
       break;
     }
-    case IrOpcode::kInt32Add: {
-      Int32BinopMatcher m(node);
-      if (m.right().Is(0)) return Replace(m.left().node());  // x + 0 => x
-      if (m.IsFoldable()) {                                  // K + K => K
-        return ReplaceInt32(static_cast<uint32_t>(m.left().Value()) +
-                            static_cast<uint32_t>(m.right().Value()));
-      }
-      break;
-    }
+    case IrOpcode::kInt32Add:
+      return ReduceInt32Add(node);
     case IrOpcode::kInt32Sub: {
       Int32BinopMatcher m(node);
       if (m.right().Is(0)) return Replace(m.left().node());  // x - 0 => x
@@ -326,7 +242,8 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
       if (m.right().IsPowerOf2()) {  // x * 2^n => x << n
         node->set_op(machine()->Word32Shl());
         node->ReplaceInput(1, Int32Constant(WhichPowerOf2(m.right().Value())));
-        return Changed(node);
+        Reduction reduction = ReduceWord32Shl(node);
+        return reduction.Changed() ? reduction : Changed(node);
       }
       break;
     }
@@ -546,6 +463,18 @@ Reduction MachineOperatorReducer::Reduce(Node* node) {
 }
 
 
+Reduction MachineOperatorReducer::ReduceInt32Add(Node* node) {
+  DCHECK_EQ(IrOpcode::kInt32Add, node->opcode());
+  Int32BinopMatcher m(node);
+  if (m.right().Is(0)) return Replace(m.left().node());  // x + 0 => x
+  if (m.IsFoldable()) {                                  // K + K => K
+    return ReplaceUint32(bit_cast<uint32_t>(m.left().Value()) +
+                         bit_cast<uint32_t>(m.right().Value()));
+  }
+  return NoChange();
+}
+
+
 Reduction MachineOperatorReducer::ReduceInt32Div(Node* node) {
   Int32BinopMatcher m(node);
   if (m.left().Is(0)) return Replace(m.left().node());    // 0 / x => 0
@@ -790,6 +719,192 @@ Reduction MachineOperatorReducer::ReduceProjection(size_t index, Node* node) {
 }
 
 
+Reduction MachineOperatorReducer::ReduceWord32Shifts(Node* node) {
+  DCHECK((node->opcode() == IrOpcode::kWord32Shl) ||
+         (node->opcode() == IrOpcode::kWord32Shr) ||
+         (node->opcode() == IrOpcode::kWord32Sar));
+  if (machine()->Word32ShiftIsSafe()) {
+    // Remove the explicit 'and' with 0x1f if the shift provided by the machine
+    // instruction matches that required by JavaScript.
+    Int32BinopMatcher m(node);
+    if (m.right().IsWord32And()) {
+      Int32BinopMatcher mright(m.right().node());
+      if (mright.right().Is(0x1f)) {
+        node->ReplaceInput(1, mright.left().node());
+        return Changed(node);
+      }
+    }
+  }
+  return NoChange();
+}
+
+
+Reduction MachineOperatorReducer::ReduceWord32Shl(Node* node) {
+  DCHECK_EQ(IrOpcode::kWord32Shl, node->opcode());
+  Int32BinopMatcher m(node);
+  if (m.right().Is(0)) return Replace(m.left().node());  // x << 0 => x
+  if (m.IsFoldable()) {                                  // K << K => K
+    return ReplaceInt32(m.left().Value() << m.right().Value());
+  }
+  if (m.right().IsInRange(1, 31)) {
+    // (x >>> K) << K => x & ~(2^K - 1)
+    // (x >> K) << K => x & ~(2^K - 1)
+    if (m.left().IsWord32Sar() || m.left().IsWord32Shr()) {
+      Int32BinopMatcher mleft(m.left().node());
+      if (mleft.right().Is(m.right().Value())) {
+        node->set_op(machine()->Word32And());
+        node->ReplaceInput(0, mleft.left().node());
+        node->ReplaceInput(1,
+                           Uint32Constant(~((1U << m.right().Value()) - 1U)));
+        Reduction reduction = ReduceWord32And(node);
+        return reduction.Changed() ? reduction : Changed(node);
+      }
+    }
+  }
+  return ReduceWord32Shifts(node);
+}
+
+
+Reduction MachineOperatorReducer::ReduceWord32And(Node* node) {
+  DCHECK_EQ(IrOpcode::kWord32And, node->opcode());
+  Int32BinopMatcher m(node);
+  if (m.right().Is(0)) return Replace(m.right().node());  // x & 0  => 0
+  if (m.right().Is(-1)) return Replace(m.left().node());  // x & -1 => x
+  if (m.IsFoldable()) {                                   // K & K  => K
+    return ReplaceInt32(m.left().Value() & m.right().Value());
+  }
+  if (m.LeftEqualsRight()) return Replace(m.left().node());  // x & x => x
+  if (m.left().IsWord32And() && m.right().HasValue()) {
+    Int32BinopMatcher mleft(m.left().node());
+    if (mleft.right().HasValue()) {  // (x & K) & K => x & K
+      node->ReplaceInput(0, mleft.left().node());
+      node->ReplaceInput(
+          1, Int32Constant(m.right().Value() & mleft.right().Value()));
+      Reduction const reduction = ReduceWord32And(node);
+      return reduction.Changed() ? reduction : Changed(node);
+    }
+  }
+  if (m.left().IsInt32Add() && m.right().IsNegativePowerOf2()) {
+    Int32BinopMatcher mleft(m.left().node());
+    if (mleft.right().HasValue() &&
+        (mleft.right().Value() & m.right().Value()) == mleft.right().Value()) {
+      // (x + (K << L)) & (-1 << L) => (x & (-1 << L)) + (K << L)
+      node->set_op(machine()->Int32Add());
+      node->ReplaceInput(0, Word32And(mleft.left().node(), m.right().node()));
+      node->ReplaceInput(1, mleft.right().node());
+      Reduction const reduction = ReduceInt32Add(node);
+      return reduction.Changed() ? reduction : Changed(node);
+    }
+    if (mleft.left().IsInt32Mul()) {
+      Int32BinopMatcher mleftleft(mleft.left().node());
+      if (mleftleft.right().IsMultipleOf(-m.right().Value())) {
+        // (y * (K << L) + x) & (-1 << L) => (x & (-1 << L)) + y * (K << L)
+        node->set_op(machine()->Int32Add());
+        node->ReplaceInput(0,
+                           Word32And(mleft.right().node(), m.right().node()));
+        node->ReplaceInput(1, mleftleft.node());
+        Reduction const reduction = ReduceInt32Add(node);
+        return reduction.Changed() ? reduction : Changed(node);
+      }
+    }
+    if (mleft.right().IsInt32Mul()) {
+      Int32BinopMatcher mleftright(mleft.right().node());
+      if (mleftright.right().IsMultipleOf(-m.right().Value())) {
+        // (x + y * (K << L)) & (-1 << L) => (x & (-1 << L)) + y * (K << L)
+        node->set_op(machine()->Int32Add());
+        node->ReplaceInput(0, Word32And(mleft.left().node(), m.right().node()));
+        node->ReplaceInput(1, mleftright.node());
+        Reduction const reduction = ReduceInt32Add(node);
+        return reduction.Changed() ? reduction : Changed(node);
+      }
+    }
+    if (mleft.left().IsWord32Shl()) {
+      Int32BinopMatcher mleftleft(mleft.left().node());
+      if (mleftleft.right().Is(
+              base::bits::CountTrailingZeros32(m.right().Value()))) {
+        // (y << L + x) & (-1 << L) => (x & (-1 << L)) + y << L
+        node->set_op(machine()->Int32Add());
+        node->ReplaceInput(0,
+                           Word32And(mleft.right().node(), m.right().node()));
+        node->ReplaceInput(1, mleftleft.node());
+        Reduction const reduction = ReduceInt32Add(node);
+        return reduction.Changed() ? reduction : Changed(node);
+      }
+    }
+    if (mleft.right().IsWord32Shl()) {
+      Int32BinopMatcher mleftright(mleft.right().node());
+      if (mleftright.right().Is(
+              base::bits::CountTrailingZeros32(m.right().Value()))) {
+        // (x + y << L) & (-1 << L) => (x & (-1 << L)) + y << L
+        node->set_op(machine()->Int32Add());
+        node->ReplaceInput(0, Word32And(mleft.left().node(), m.right().node()));
+        node->ReplaceInput(1, mleftright.node());
+        Reduction const reduction = ReduceInt32Add(node);
+        return reduction.Changed() ? reduction : Changed(node);
+      }
+    }
+  }
+  return NoChange();
+}
+
+
+Reduction MachineOperatorReducer::ReduceWord32Or(Node* node) {
+  DCHECK_EQ(IrOpcode::kWord32Or, node->opcode());
+  Int32BinopMatcher m(node);
+  if (m.right().Is(0)) return Replace(m.left().node());    // x | 0  => x
+  if (m.right().Is(-1)) return Replace(m.right().node());  // x | -1 => -1
+  if (m.IsFoldable()) {                                    // K | K  => K
+    return ReplaceInt32(m.left().Value() | m.right().Value());
+  }
+  if (m.LeftEqualsRight()) return Replace(m.left().node());  // x | x => x
+
+  Node* shl = NULL;
+  Node* shr = NULL;
+  // Recognize rotation, we are matching either:
+  //  * x << y | x >>> (32 - y) => x ror (32 - y), i.e  x rol y
+  //  * x << (32 - y) | x >>> y => x ror y
+  // as well as their commuted form.
+  if (m.left().IsWord32Shl() && m.right().IsWord32Shr()) {
+    shl = m.left().node();
+    shr = m.right().node();
+  } else if (m.left().IsWord32Shr() && m.right().IsWord32Shl()) {
+    shl = m.right().node();
+    shr = m.left().node();
+  } else {
+    return NoChange();
+  }
+
+  Int32BinopMatcher mshl(shl);
+  Int32BinopMatcher mshr(shr);
+  if (mshl.left().node() != mshr.left().node()) return NoChange();
+
+  if (mshl.right().HasValue() && mshr.right().HasValue()) {
+    // Case where y is a constant.
+    if (mshl.right().Value() + mshr.right().Value() != 32) return NoChange();
+  } else {
+    Node* sub = NULL;
+    Node* y = NULL;
+    if (mshl.right().IsInt32Sub()) {
+      sub = mshl.right().node();
+      y = mshr.right().node();
+    } else if (mshr.right().IsInt32Sub()) {
+      sub = mshr.right().node();
+      y = mshl.right().node();
+    } else {
+      return NoChange();
+    }
+
+    Int32BinopMatcher msub(sub);
+    if (!msub.left().Is(32) || msub.right().node() != y) return NoChange();
+  }
+
+  node->set_op(machine()->Word32Ror());
+  node->ReplaceInput(0, mshl.left().node());
+  node->ReplaceInput(1, mshr.right().node());
+  return Changed(node);
+}
+
+
 CommonOperatorBuilder* MachineOperatorReducer::common() const {
   return jsgraph()->common();
 }
index fefac7a..8200abb 100644 (file)
@@ -24,7 +24,7 @@ class MachineOperatorReducer FINAL : public Reducer {
   explicit MachineOperatorReducer(JSGraph* jsgraph);
   ~MachineOperatorReducer();
 
-  virtual Reduction Reduce(Node* node) OVERRIDE;
+  Reduction Reduce(Node* node) OVERRIDE;
 
  private:
   Node* Float32Constant(volatile float value);
@@ -34,7 +34,10 @@ class MachineOperatorReducer FINAL : public Reducer {
   Node* Uint32Constant(uint32_t value) {
     return Int32Constant(bit_cast<uint32_t>(value));
   }
-  Node* Word32And(Node* lhs, uint32_t rhs);
+  Node* Word32And(Node* lhs, Node* rhs);
+  Node* Word32And(Node* lhs, uint32_t rhs) {
+    return Word32And(lhs, Uint32Constant(rhs));
+  }
   Node* Word32Sar(Node* lhs, uint32_t rhs);
   Node* Word32Shr(Node* lhs, uint32_t rhs);
   Node* Word32Equal(Node* lhs, Node* rhs);
@@ -61,6 +64,7 @@ class MachineOperatorReducer FINAL : public Reducer {
     return Replace(Int64Constant(value));
   }
 
+  Reduction ReduceInt32Add(Node* node);
   Reduction ReduceInt32Div(Node* node);
   Reduction ReduceUint32Div(Node* node);
   Reduction ReduceInt32Mod(Node* node);
@@ -68,6 +72,10 @@ class MachineOperatorReducer FINAL : public Reducer {
   Reduction ReduceTruncateFloat64ToInt32(Node* node);
   Reduction ReduceStore(Node* node);
   Reduction ReduceProjection(size_t index, Node* node);
+  Reduction ReduceWord32Shifts(Node* node);
+  Reduction ReduceWord32Shl(Node* node);
+  Reduction ReduceWord32And(Node* node);
+  Reduction ReduceWord32Or(Node* node);
 
   Graph* graph() const;
   JSGraph* jsgraph() const { return jsgraph_; }
index 2ea1bf3..eb034e9 100644 (file)
@@ -7,6 +7,8 @@
 #include "src/base/lazy-instance.h"
 #include "src/compiler/opcodes.h"
 #include "src/compiler/operator.h"
+#include "src/v8.h"
+#include "src/zone-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -52,6 +54,18 @@ StoreRepresentation const& StoreRepresentationOf(Operator const* op) {
 }
 
 
+CheckedLoadRepresentation CheckedLoadRepresentationOf(Operator const* op) {
+  DCHECK_EQ(IrOpcode::kCheckedLoad, op->opcode());
+  return OpParameter<CheckedLoadRepresentation>(op);
+}
+
+
+CheckedStoreRepresentation CheckedStoreRepresentationOf(Operator const* op) {
+  DCHECK_EQ(IrOpcode::kCheckedStore, op->opcode());
+  return OpParameter<CheckedStoreRepresentation>(op);
+}
+
+
 #define PURE_OP_LIST(V)                                                       \
   V(Word32And, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)      \
   V(Word32Or, Operator::kAssociative | Operator::kCommutative, 2, 0, 1)       \
@@ -156,37 +170,53 @@ struct MachineOperatorGlobalCache {
   PURE_OP_LIST(PURE)
 #undef PURE
 
-#define LOAD(Type)                                                           \
-  struct Load##Type##Operator FINAL : public Operator1<LoadRepresentation> { \
-    Load##Type##Operator()                                                   \
-        : Operator1<LoadRepresentation>(                                     \
-              IrOpcode::kLoad, Operator::kNoThrow | Operator::kNoWrite,      \
-              "Load", 2, 1, 1, 1, 1, 0, k##Type) {}                          \
-  };                                                                         \
-  Load##Type##Operator k##Load##Type;
+#define LOAD(Type)                                                             \
+  struct Load##Type##Operator FINAL : public Operator1<LoadRepresentation> {   \
+    Load##Type##Operator()                                                     \
+        : Operator1<LoadRepresentation>(                                       \
+              IrOpcode::kLoad, Operator::kNoThrow | Operator::kNoWrite,        \
+              "Load", 2, 1, 1, 1, 1, 0, k##Type) {}                            \
+  };                                                                           \
+  struct CheckedLoad##Type##Operator FINAL                                     \
+      : public Operator1<CheckedLoadRepresentation> {                          \
+    CheckedLoad##Type##Operator()                                              \
+        : Operator1<CheckedLoadRepresentation>(                                \
+              IrOpcode::kCheckedLoad, Operator::kNoThrow | Operator::kNoWrite, \
+              "CheckedLoad", 3, 1, 1, 1, 1, 0, k##Type) {}                     \
+  };                                                                           \
+  Load##Type##Operator kLoad##Type;                                            \
+  CheckedLoad##Type##Operator kCheckedLoad##Type;
   MACHINE_TYPE_LIST(LOAD)
 #undef LOAD
 
-#define STORE(Type)                                                      \
-  struct Store##Type##Operator : public Operator1<StoreRepresentation> { \
-    explicit Store##Type##Operator(WriteBarrierKind write_barrier_kind)  \
-        : Operator1<StoreRepresentation>(                                \
-              IrOpcode::kStore, Operator::kNoRead | Operator::kNoThrow,  \
-              "Store", 3, 1, 1, 0, 1, 0,                                 \
-              StoreRepresentation(k##Type, write_barrier_kind)) {}       \
-  };                                                                     \
-  struct Store##Type##NoWriteBarrier##Operator FINAL                     \
-      : public Store##Type##Operator {                                   \
-    Store##Type##NoWriteBarrier##Operator()                              \
-        : Store##Type##Operator(kNoWriteBarrier) {}                      \
-  };                                                                     \
-  struct Store##Type##FullWriteBarrier##Operator FINAL                   \
-      : public Store##Type##Operator {                                   \
-    Store##Type##FullWriteBarrier##Operator()                            \
-        : Store##Type##Operator(kFullWriteBarrier) {}                    \
-  };                                                                     \
-  Store##Type##NoWriteBarrier##Operator k##Store##Type##NoWriteBarrier;  \
-  Store##Type##FullWriteBarrier##Operator k##Store##Type##FullWriteBarrier;
+#define STORE(Type)                                                            \
+  struct Store##Type##Operator : public Operator1<StoreRepresentation> {       \
+    explicit Store##Type##Operator(WriteBarrierKind write_barrier_kind)        \
+        : Operator1<StoreRepresentation>(                                      \
+              IrOpcode::kStore, Operator::kNoRead | Operator::kNoThrow,        \
+              "Store", 3, 1, 1, 0, 1, 0,                                       \
+              StoreRepresentation(k##Type, write_barrier_kind)) {}             \
+  };                                                                           \
+  struct Store##Type##NoWriteBarrier##Operator FINAL                           \
+      : public Store##Type##Operator {                                         \
+    Store##Type##NoWriteBarrier##Operator()                                    \
+        : Store##Type##Operator(kNoWriteBarrier) {}                            \
+  };                                                                           \
+  struct Store##Type##FullWriteBarrier##Operator FINAL                         \
+      : public Store##Type##Operator {                                         \
+    Store##Type##FullWriteBarrier##Operator()                                  \
+        : Store##Type##Operator(kFullWriteBarrier) {}                          \
+  };                                                                           \
+  struct CheckedStore##Type##Operator FINAL                                    \
+      : public Operator1<CheckedStoreRepresentation> {                         \
+    CheckedStore##Type##Operator()                                             \
+        : Operator1<CheckedStoreRepresentation>(                               \
+              IrOpcode::kCheckedStore, Operator::kNoRead | Operator::kNoThrow, \
+              "CheckedStore", 4, 1, 1, 0, 1, 0, k##Type) {}                    \
+  };                                                                           \
+  Store##Type##NoWriteBarrier##Operator kStore##Type##NoWriteBarrier;          \
+  Store##Type##FullWriteBarrier##Operator kStore##Type##FullWriteBarrier;      \
+  CheckedStore##Type##Operator kCheckedStore##Type;
   MACHINE_TYPE_LIST(STORE)
 #undef STORE
 };
@@ -196,8 +226,9 @@ static base::LazyInstance<MachineOperatorGlobalCache>::type kCache =
     LAZY_INSTANCE_INITIALIZER;
 
 
-MachineOperatorBuilder::MachineOperatorBuilder(MachineType word, Flags flags)
-    : cache_(kCache.Get()), word_(word), flags_(flags) {
+MachineOperatorBuilder::MachineOperatorBuilder(Zone* zone, MachineType word,
+                                               Flags flags)
+    : zone_(zone), cache_(kCache.Get()), word_(word), flags_(flags) {
   DCHECK(word == kRepWord32 || word == kRepWord64);
 }
 
@@ -213,15 +244,16 @@ const Operator* MachineOperatorBuilder::Load(LoadRepresentation rep) {
   switch (rep) {
 #define LOAD(Type) \
   case k##Type:    \
-    return &cache_.k##Load##Type;
+    return &cache_.kLoad##Type;
     MACHINE_TYPE_LIST(LOAD)
 #undef LOAD
-
     default:
       break;
   }
-  UNREACHABLE();
-  return NULL;
+  // Uncached.
+  return new (zone_) Operator1<LoadRepresentation>(  // --
+      IrOpcode::kLoad, Operator::kNoThrow | Operator::kNoWrite, "Load", 2, 1, 1,
+      1, 1, 0, rep);
 }
 
 
@@ -242,9 +274,48 @@ const Operator* MachineOperatorBuilder::Store(StoreRepresentation rep) {
     default:
       break;
   }
-  UNREACHABLE();
-  return NULL;
+  // Uncached.
+  return new (zone_) Operator1<StoreRepresentation>(  // --
+      IrOpcode::kStore, Operator::kNoRead | Operator::kNoThrow, "Store", 3, 1,
+      1, 0, 1, 0, rep);
+}
+
+
+const Operator* MachineOperatorBuilder::CheckedLoad(
+    CheckedLoadRepresentation rep) {
+  switch (rep) {
+#define LOAD(Type) \
+  case k##Type:    \
+    return &cache_.kCheckedLoad##Type;
+    MACHINE_TYPE_LIST(LOAD)
+#undef LOAD
+    default:
+      break;
+  }
+  // Uncached.
+  return new (zone_) Operator1<CheckedLoadRepresentation>(
+      IrOpcode::kCheckedLoad, Operator::kNoThrow | Operator::kNoWrite,
+      "CheckedLoad", 3, 1, 1, 1, 1, 0, rep);
+}
+
+
+const Operator* MachineOperatorBuilder::CheckedStore(
+    CheckedStoreRepresentation rep) {
+  switch (rep) {
+#define STORE(Type) \
+  case k##Type:     \
+    return &cache_.kCheckedStore##Type;
+    MACHINE_TYPE_LIST(STORE)
+#undef STORE
+    default:
+      break;
+  }
+  // Uncached.
+  return new (zone_) Operator1<CheckedStoreRepresentation>(
+      IrOpcode::kCheckedStore, Operator::kNoRead | Operator::kNoThrow,
+      "CheckedStore", 4, 1, 1, 0, 1, 0, rep);
 }
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
index 979a887..42f3130 100644 (file)
@@ -53,6 +53,18 @@ std::ostream& operator<<(std::ostream&, StoreRepresentation);
 StoreRepresentation const& StoreRepresentationOf(Operator const*);
 
 
+// A CheckedLoad needs a MachineType.
+typedef MachineType CheckedLoadRepresentation;
+
+CheckedLoadRepresentation CheckedLoadRepresentationOf(Operator const*);
+
+
+// A CheckedStore needs a MachineType.
+typedef MachineType CheckedStoreRepresentation;
+
+CheckedStoreRepresentation CheckedStoreRepresentationOf(Operator const*);
+
+
 // Interface for building machine-level operators. These operators are
 // machine-level but machine-independent and thus define a language suitable
 // for generating code to run on architectures such as ia32, x64, arm, etc.
@@ -67,13 +79,12 @@ class MachineOperatorBuilder FINAL : public ZoneObject {
     kFloat64RoundTruncate = 1u << 2,
     kFloat64RoundTiesAway = 1u << 3,
     kInt32DivIsSafe = 1u << 4,
-    kInt32ModIsSafe = 1u << 5,
-    kUint32DivIsSafe = 1u << 6,
-    kUint32ModIsSafe = 1u << 7
+    kUint32DivIsSafe = 1u << 5,
+    kWord32ShiftIsSafe = 1u << 6
   };
   typedef base::Flags<Flag, unsigned> Flags;
 
-  explicit MachineOperatorBuilder(MachineType word = kMachPtr,
+  explicit MachineOperatorBuilder(Zone* zone, MachineType word = kMachPtr,
                                   Flags supportedOperators = kNoFlags);
 
   const Operator* Word32And();
@@ -84,6 +95,7 @@ class MachineOperatorBuilder FINAL : public ZoneObject {
   const Operator* Word32Sar();
   const Operator* Word32Ror();
   const Operator* Word32Equal();
+  bool Word32ShiftIsSafe() const { return flags_ & kWord32ShiftIsSafe; }
 
   const Operator* Word64And();
   const Operator* Word64Or();
@@ -110,9 +122,7 @@ class MachineOperatorBuilder FINAL : public ZoneObject {
   const Operator* Uint32Mod();
   const Operator* Uint32MulHigh();
   bool Int32DivIsSafe() const { return flags_ & kInt32DivIsSafe; }
-  bool Int32ModIsSafe() const { return flags_ & kInt32ModIsSafe; }
   bool Uint32DivIsSafe() const { return flags_ & kUint32DivIsSafe; }
-  bool Uint32ModIsSafe() const { return flags_ & kUint32ModIsSafe; }
 
   const Operator* Int64Add();
   const Operator* Int64Sub();
@@ -176,6 +186,11 @@ class MachineOperatorBuilder FINAL : public ZoneObject {
   // Access to the machine stack.
   const Operator* LoadStackPointer();
 
+  // checked-load heap, index, length
+  const Operator* CheckedLoad(CheckedLoadRepresentation);
+  // checked-store heap, index, length, value
+  const Operator* CheckedStore(CheckedStoreRepresentation);
+
   // Target machine word-size assumed by this builder.
   bool Is32() const { return word() == kRepWord32; }
   bool Is64() const { return word() == kRepWord64; }
@@ -211,14 +226,17 @@ class MachineOperatorBuilder FINAL : public ZoneObject {
 #undef PSEUDO_OP_LIST
 
  private:
+  Zone* zone_;
   const MachineOperatorGlobalCache& cache_;
   const MachineType word_;
   const Flags flags_;
+
   DISALLOW_COPY_AND_ASSIGN(MachineOperatorBuilder);
 };
 
 
 DEFINE_OPERATORS_FOR_FLAGS(MachineOperatorBuilder::Flags)
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
index 2836113..dd92837 100644 (file)
@@ -20,6 +20,7 @@ namespace compiler {
 // TODO(plind): Possibly avoid using these lithium names.
 #define kScratchReg kLithiumScratchReg
 #define kCompareReg kLithiumScratchReg2
+#define kScratchReg2 kLithiumScratchReg2
 #define kScratchDoubleReg kLithiumScratchDouble
 
 
@@ -70,6 +71,9 @@ class MipsOperandConverter FINAL : public InstructionOperandConverter {
         // TODO(plind): Maybe we should handle ExtRef & HeapObj here?
         //    maybe not done on arm due to const pool ??
         break;
+      case Constant::kRpoNumber:
+        UNREACHABLE();  // TODO(titzer): RPO immediates on mips?
+        break;
     }
     UNREACHABLE();
     return Operand(zero_reg);
@@ -99,10 +103,7 @@ class MipsOperandConverter FINAL : public InstructionOperandConverter {
     return MemOperand(no_reg);
   }
 
-  MemOperand MemoryOperand() {
-    int index = 0;
-    return MemoryOperand(&index);
-  }
+  MemOperand MemoryOperand(int index = 0) { return MemoryOperand(&index); }
 
   MemOperand ToMemOperand(InstructionOperand* op) const {
     DCHECK(op != NULL);
@@ -121,6 +122,184 @@ static inline bool HasRegisterInput(Instruction* instr, int index) {
 }
 
 
+namespace {
+
+class OutOfLineLoadSingle FINAL : public OutOfLineCode {
+ public:
+  OutOfLineLoadSingle(CodeGenerator* gen, FloatRegister result)
+      : OutOfLineCode(gen), result_(result) {}
+
+  void Generate() FINAL {
+    __ Move(result_, std::numeric_limits<float>::quiet_NaN());
+  }
+
+ private:
+  FloatRegister const result_;
+};
+
+
+class OutOfLineLoadDouble FINAL : public OutOfLineCode {
+ public:
+  OutOfLineLoadDouble(CodeGenerator* gen, DoubleRegister result)
+      : OutOfLineCode(gen), result_(result) {}
+
+  void Generate() FINAL {
+    __ Move(result_, std::numeric_limits<double>::quiet_NaN());
+  }
+
+ private:
+  DoubleRegister const result_;
+};
+
+
+class OutOfLineLoadInteger FINAL : public OutOfLineCode {
+ public:
+  OutOfLineLoadInteger(CodeGenerator* gen, Register result)
+      : OutOfLineCode(gen), result_(result) {}
+
+  void Generate() FINAL { __ mov(result_, zero_reg); }
+
+ private:
+  Register const result_;
+};
+
+
+class OutOfLineRound : public OutOfLineCode {
+ public:
+  OutOfLineRound(CodeGenerator* gen, DoubleRegister result)
+      : OutOfLineCode(gen), result_(result) {}
+
+  void Generate() FINAL {
+    // Handle rounding to zero case where sign has to be preserved.
+    // High bits of double input already in kScratchReg.
+    __ srl(at, kScratchReg, 31);
+    __ sll(at, at, 31);
+    __ Mthc1(at, result_);
+  }
+
+ private:
+  DoubleRegister const result_;
+};
+
+
+class OutOfLineTruncate FINAL : public OutOfLineRound {
+ public:
+  OutOfLineTruncate(CodeGenerator* gen, DoubleRegister result)
+      : OutOfLineRound(gen, result) {}
+};
+
+
+class OutOfLineFloor FINAL : public OutOfLineRound {
+ public:
+  OutOfLineFloor(CodeGenerator* gen, DoubleRegister result)
+      : OutOfLineRound(gen, result) {}
+};
+
+
+class OutOfLineCeil FINAL : public OutOfLineRound {
+ public:
+  OutOfLineCeil(CodeGenerator* gen, DoubleRegister result)
+      : OutOfLineRound(gen, result) {}
+};
+
+}  // namespace
+
+
+#define ASSEMBLE_CHECKED_LOAD_FLOAT(width, asm_instr)                         \
+  do {                                                                        \
+    auto result = i.Output##width##Register();                                \
+    auto ool = new (zone()) OutOfLineLoad##width(this, result);               \
+    if (instr->InputAt(0)->IsRegister()) {                                    \
+      auto offset = i.InputRegister(0);                                       \
+      __ Branch(USE_DELAY_SLOT, ool->entry(), hs, offset, i.InputOperand(1)); \
+      __ addu(at, i.InputRegister(2), offset);                                \
+      __ asm_instr(result, MemOperand(at, 0));                                \
+    } else {                                                                  \
+      auto offset = i.InputOperand(0).immediate();                            \
+      __ Branch(ool->entry(), ls, i.InputRegister(1), Operand(offset));       \
+      __ asm_instr(result, MemOperand(i.InputRegister(2), offset));           \
+    }                                                                         \
+    __ bind(ool->exit());                                                     \
+  } while (0)
+
+
+#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr)                              \
+  do {                                                                        \
+    auto result = i.OutputRegister();                                         \
+    auto ool = new (zone()) OutOfLineLoadInteger(this, result);               \
+    if (instr->InputAt(0)->IsRegister()) {                                    \
+      auto offset = i.InputRegister(0);                                       \
+      __ Branch(USE_DELAY_SLOT, ool->entry(), hs, offset, i.InputOperand(1)); \
+      __ addu(at, i.InputRegister(2), offset);                                \
+      __ asm_instr(result, MemOperand(at, 0));                                \
+    } else {                                                                  \
+      auto offset = i.InputOperand(0).immediate();                            \
+      __ Branch(ool->entry(), ls, i.InputRegister(1), Operand(offset));       \
+      __ asm_instr(result, MemOperand(i.InputRegister(2), offset));           \
+    }                                                                         \
+    __ bind(ool->exit());                                                     \
+  } while (0)
+
+
+#define ASSEMBLE_CHECKED_STORE_FLOAT(width, asm_instr)                 \
+  do {                                                                 \
+    Label done;                                                        \
+    if (instr->InputAt(0)->IsRegister()) {                             \
+      auto offset = i.InputRegister(0);                                \
+      auto value = i.Input##width##Register(2);                        \
+      __ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
+      __ addu(at, i.InputRegister(3), offset);                         \
+      __ asm_instr(value, MemOperand(at, 0));                          \
+    } else {                                                           \
+      auto offset = i.InputOperand(0).immediate();                     \
+      auto value = i.Input##width##Register(2);                        \
+      __ Branch(&done, ls, i.InputRegister(1), Operand(offset));       \
+      __ asm_instr(value, MemOperand(i.InputRegister(3), offset));     \
+    }                                                                  \
+    __ bind(&done);                                                    \
+  } while (0)
+
+
+#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr)                      \
+  do {                                                                 \
+    Label done;                                                        \
+    if (instr->InputAt(0)->IsRegister()) {                             \
+      auto offset = i.InputRegister(0);                                \
+      auto value = i.InputRegister(2);                                 \
+      __ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
+      __ addu(at, i.InputRegister(3), offset);                         \
+      __ asm_instr(value, MemOperand(at, 0));                          \
+    } else {                                                           \
+      auto offset = i.InputOperand(0).immediate();                     \
+      auto value = i.InputRegister(2);                                 \
+      __ Branch(&done, ls, i.InputRegister(1), Operand(offset));       \
+      __ asm_instr(value, MemOperand(i.InputRegister(3), offset));     \
+    }                                                                  \
+    __ bind(&done);                                                    \
+  } while (0)
+
+
+#define ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(asm_instr, operation)                  \
+  do {                                                                         \
+    auto ool =                                                                 \
+        new (zone()) OutOfLine##operation(this, i.OutputDoubleRegister());     \
+    Label done;                                                                \
+    __ Mfhc1(kScratchReg, i.InputDoubleRegister(0));                           \
+    __ Ext(at, kScratchReg, HeapNumber::kExponentShift,                        \
+           HeapNumber::kExponentBits);                                         \
+    __ Branch(USE_DELAY_SLOT, &done, hs, at,                                   \
+              Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits)); \
+    __ mov_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));              \
+    __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));          \
+    __ Move(at, kScratchReg2, i.OutputDoubleRegister());                       \
+    __ or_(at, at, kScratchReg2);                                              \
+    __ Branch(USE_DELAY_SLOT, ool->entry(), eq, at, Operand(zero_reg));        \
+    __ cvt_d_l(i.OutputDoubleRegister(), i.OutputDoubleRegister());            \
+    __ bind(ool->exit());                                                      \
+    __ bind(&done);                                                            \
+  } while (0)
+
+
 // Assembles an instruction after register allocation, producing machine code.
 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
   MipsOperandConverter i(this, instr);
@@ -154,7 +333,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
       break;
     }
     case kArchJmp:
-      __ Branch(GetLabel(i.InputRpo(0)));
+      AssembleArchJump(i.InputRpo(0));
       break;
     case kArchNop:
       // don't emit code for nops.
@@ -289,6 +468,18 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
       __ MovFromFloatResult(i.OutputDoubleRegister());
       break;
     }
+    case kMipsFloat64Floor: {
+      ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(floor_l_d, Floor);
+      break;
+    }
+    case kMipsFloat64Ceil: {
+      ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(ceil_l_d, Ceil);
+      break;
+    }
+    case kMipsFloat64RoundTruncate: {
+      ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(trunc_l_d, Truncate);
+      break;
+    }
     case kMipsSqrtD: {
       __ sqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
       break;
@@ -370,7 +561,17 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
     case kMipsPush:
       __ Push(i.InputRegister(0));
       break;
-    case kMipsStoreWriteBarrier:
+    case kMipsStackClaim: {
+      int words = MiscField::decode(instr->opcode());
+      __ Subu(sp, sp, Operand(words << kPointerSizeLog2));
+      break;
+    }
+    case kMipsStoreToStackSlot: {
+      int slot = MiscField::decode(instr->opcode());
+      __ sw(i.InputRegister(0), MemOperand(sp, slot << kPointerSizeLog2));
+      break;
+    }
+    case kMipsStoreWriteBarrier: {
       Register object = i.InputRegister(0);
       Register index = i.InputRegister(1);
       Register value = i.InputRegister(2);
@@ -381,6 +582,43 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
       RAStatus ra_status = kRAHasNotBeenSaved;
       __ RecordWrite(object, index, value, ra_status, mode);
       break;
+    }
+    case kCheckedLoadInt8:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(lb);
+      break;
+    case kCheckedLoadUint8:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(lbu);
+      break;
+    case kCheckedLoadInt16:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(lh);
+      break;
+    case kCheckedLoadUint16:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(lhu);
+      break;
+    case kCheckedLoadWord32:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(lw);
+      break;
+    case kCheckedLoadFloat32:
+      ASSEMBLE_CHECKED_LOAD_FLOAT(Single, lwc1);
+      break;
+    case kCheckedLoadFloat64:
+      ASSEMBLE_CHECKED_LOAD_FLOAT(Double, ldc1);
+      break;
+    case kCheckedStoreWord8:
+      ASSEMBLE_CHECKED_STORE_INTEGER(sb);
+      break;
+    case kCheckedStoreWord16:
+      ASSEMBLE_CHECKED_STORE_INTEGER(sh);
+      break;
+    case kCheckedStoreWord32:
+      ASSEMBLE_CHECKED_STORE_INTEGER(sw);
+      break;
+    case kCheckedStoreFloat32:
+      ASSEMBLE_CHECKED_STORE_FLOAT(Single, swc1);
+      break;
+    case kCheckedStoreFloat64:
+      ASSEMBLE_CHECKED_STORE_FLOAT(Double, sdc1);
+      break;
   }
 }
 
@@ -391,33 +629,23 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
   UNIMPLEMENTED();
 
 // Assembles branches after an instruction.
-void CodeGenerator::AssembleArchBranch(Instruction* instr,
-                                       FlagsCondition condition) {
+void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
   MipsOperandConverter i(this, instr);
-  Label done;
-
-  // Emit a branch. The true and false targets are always the last two inputs
-  // to the instruction.
-  BasicBlock::RpoNumber tblock =
-      i.InputRpo(static_cast<int>(instr->InputCount()) - 2);
-  BasicBlock::RpoNumber fblock =
-      i.InputRpo(static_cast<int>(instr->InputCount()) - 1);
-  bool fallthru = IsNextInAssemblyOrder(fblock);
-  Label* tlabel = GetLabel(tblock);
-  Label* flabel = fallthru ? &done : GetLabel(fblock);
+  Label* tlabel = branch->true_label;
+  Label* flabel = branch->false_label;
   Condition cc = kNoCondition;
 
   // MIPS does not have condition code flags, so compare and branch are
   // implemented differently than on the other arch's. The compare operations
-  // emit mips psuedo-instructions, which are handled here by branch
+  // emit mips pseudo-instructions, which are handled here by branch
   // instructions that do the actual comparison. Essential that the input
-  // registers to compare psuedo-op are not modified before this branch op, as
+  // registers to compare pseudo-op are not modified before this branch op, as
   // they are tested here.
   // TODO(plind): Add CHECK() to ensure that test/cmp and this branch were
   //    not separated by other instructions.
 
   if (instr->arch_opcode() == kMipsTst) {
-    switch (condition) {
+    switch (branch->condition) {
       case kNotEqual:
         cc = ne;
         break;
@@ -425,7 +653,7 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr,
         cc = eq;
         break;
       default:
-        UNSUPPORTED_COND(kMipsTst, condition);
+        UNSUPPORTED_COND(kMipsTst, branch->condition);
         break;
     }
     __ And(at, i.InputRegister(0), i.InputOperand(1));
@@ -434,7 +662,7 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr,
   } else if (instr->arch_opcode() == kMipsAddOvf ||
              instr->arch_opcode() == kMipsSubOvf) {
     // kMipsAddOvf, SubOvf emit negative result to 'kCompareReg' on overflow.
-    switch (condition) {
+    switch (branch->condition) {
       case kOverflow:
         cc = lt;
         break;
@@ -442,13 +670,13 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr,
         cc = ge;
         break;
       default:
-        UNSUPPORTED_COND(kMipsAddOvf, condition);
+        UNSUPPORTED_COND(kMipsAddOvf, branch->condition);
         break;
     }
     __ Branch(tlabel, cc, kCompareReg, Operand(zero_reg));
 
   } else if (instr->arch_opcode() == kMipsCmp) {
-    switch (condition) {
+    switch (branch->condition) {
       case kEqual:
         cc = eq;
         break;
@@ -480,19 +708,18 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr,
         cc = hi;
         break;
       default:
-        UNSUPPORTED_COND(kMipsCmp, condition);
+        UNSUPPORTED_COND(kMipsCmp, branch->condition);
         break;
     }
     __ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
 
-    if (!fallthru) __ Branch(flabel);  // no fallthru to flabel.
-    __ bind(&done);
+    if (!branch->fallthru) __ Branch(flabel);  // no fallthru to flabel.
 
   } else if (instr->arch_opcode() == kMipsCmpD) {
-    // TODO(dusmil) optimize unordered checks to use less instructions
+    // TODO(dusmil) optimize unordered checks to use fewer instructions
     // even if we have to unfold BranchF macro.
     Label* nan = flabel;
-    switch (condition) {
+    switch (branch->condition) {
       case kUnorderedEqual:
         cc = eq;
         break;
@@ -515,14 +742,13 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr,
         nan = tlabel;
         break;
       default:
-        UNSUPPORTED_COND(kMipsCmpD, condition);
+        UNSUPPORTED_COND(kMipsCmpD, branch->condition);
         break;
     }
     __ BranchF(tlabel, nan, cc, i.InputDoubleRegister(0),
                i.InputDoubleRegister(1));
 
-    if (!fallthru) __ Branch(flabel);  // no fallthru to flabel.
-    __ bind(&done);
+    if (!branch->fallthru) __ Branch(flabel);  // no fallthru to flabel.
 
   } else {
     PrintF("AssembleArchBranch Unimplemented arch_opcode: %d\n",
@@ -532,6 +758,11 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr,
 }
 
 
+void CodeGenerator::AssembleArchJump(BasicBlock::RpoNumber target) {
+  if (!IsNextInAssemblyOrder(target)) __ Branch(GetLabel(target));
+}
+
+
 // Assembles boolean materializations after an instruction.
 void CodeGenerator::AssembleArchBoolean(Instruction* instr,
                                         FlagsCondition condition) {
@@ -715,24 +946,6 @@ void CodeGenerator::AssemblePrologue() {
     __ Prologue(info->IsCodePreAgingActive());
     frame()->SetRegisterSaveAreaSize(
         StandardFrameConstants::kFixedFrameSizeFromFp);
-
-    // Sloppy mode functions and builtins need to replace the receiver with the
-    // global proxy when called as functions (without an explicit receiver
-    // object).
-    // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
-    if (info->strict_mode() == SLOPPY && !info->is_native()) {
-      Label ok;
-      // +2 for return address and saved frame pointer.
-      int receiver_slot = info->scope()->num_parameters() + 2;
-      __ lw(a2, MemOperand(fp, receiver_slot * kPointerSize));
-      __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
-      __ Branch(&ok, ne, a2, Operand(at));
-
-      __ lw(a2, GlobalObjectOperand());
-      __ lw(a2, FieldMemOperand(a2, GlobalObject::kGlobalProxyOffset));
-      __ sw(a2, MemOperand(fp, receiver_slot * kPointerSize));
-      __ bind(&ok);
-    }
   } else {
     __ StubPrologue();
     frame()->SetRegisterSaveAreaSize(
@@ -821,17 +1034,19 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
         case Constant::kHeapObject:
           __ li(dst, src.ToHeapObject());
           break;
+        case Constant::kRpoNumber:
+          UNREACHABLE();  // TODO(titzer): loading RPO numbers on mips.
+          break;
       }
       if (destination->IsStackSlot()) __ sw(dst, g.ToMemOperand(destination));
     } else if (src.type() == Constant::kFloat32) {
-      FPURegister dst = destination->IsDoubleRegister()
-                            ? g.ToDoubleRegister(destination)
-                            : kScratchDoubleReg.low();
-      // TODO(turbofan): Can we do better here?
-      __ li(at, Operand(bit_cast<int32_t>(src.ToFloat32())));
-      __ mtc1(at, dst);
       if (destination->IsDoubleStackSlot()) {
-        __ swc1(dst, g.ToMemOperand(destination));
+        MemOperand dst = g.ToMemOperand(destination);
+        __ li(at, Operand(bit_cast<int32_t>(src.ToFloat32())));
+        __ sw(at, dst);
+      } else {
+        FloatRegister dst = g.ToSingleRegister(destination);
+        __ Move(dst, src.ToFloat32());
       }
     } else {
       DCHECK_EQ(Constant::kFloat64, src.type());
index 1e8be7f..3aa508f 100644 (file)
@@ -40,6 +40,9 @@ namespace compiler {
   V(MipsDivD)                      \
   V(MipsModD)                      \
   V(MipsSqrtD)                     \
+  V(MipsFloat64Floor)              \
+  V(MipsFloat64Ceil)               \
+  V(MipsFloat64RoundTruncate)      \
   V(MipsCvtSD)                     \
   V(MipsCvtDS)                     \
   V(MipsTruncWD)                   \
@@ -59,6 +62,8 @@ namespace compiler {
   V(MipsLdc1)                      \
   V(MipsSdc1)                      \
   V(MipsPush)                      \
+  V(MipsStoreToStackSlot)          \
+  V(MipsStackClaim)                \
   V(MipsStoreWriteBarrier)
 
 
index 4862e98..5e8e3b1 100644 (file)
@@ -42,6 +42,10 @@ class MipsOperandGenerator FINAL : public OperandGenerator {
         return is_uint16(value);
       case kMipsLdc1:
       case kMipsSdc1:
+      case kCheckedLoadFloat32:
+      case kCheckedLoadFloat64:
+      case kCheckedStoreFloat32:
+      case kCheckedStoreFloat64:
         return is_int16(value + kIntSize);
       default:
         return is_int16(value);
@@ -65,6 +69,14 @@ static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode,
 }
 
 
+static void VisitRR(InstructionSelector* selector, ArchOpcode opcode,
+                    Node* node) {
+  MipsOperandGenerator g(selector);
+  selector->Emit(opcode, g.DefineAsRegister(node),
+                 g.UseRegister(node->InputAt(0)));
+}
+
+
 static void VisitRRO(InstructionSelector* selector, ArchOpcode opcode,
                      Node* node) {
   MipsOperandGenerator g(selector);
@@ -416,14 +428,18 @@ void InstructionSelector::VisitFloat64Sqrt(Node* node) {
 }
 
 
-void InstructionSelector::VisitFloat64Floor(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitFloat64Floor(Node* node) {
+  VisitRR(this, kMipsFloat64Floor, node);
+}
 
 
-void InstructionSelector::VisitFloat64Ceil(Node* node) { UNREACHABLE(); }
+void InstructionSelector::VisitFloat64Ceil(Node* node) {
+  VisitRR(this, kMipsFloat64Ceil, node);
+}
 
 
 void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
-  UNREACHABLE();
+  VisitRR(this, kMipsFloat64RoundTruncate, node);
 }
 
 
@@ -434,7 +450,7 @@ void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
 
 void InstructionSelector::VisitCall(Node* node) {
   MipsOperandGenerator g(this);
-  CallDescriptor* descriptor = OpParameter<CallDescriptor*>(node);
+  const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
 
   FrameStateDescriptor* frame_state_descriptor = NULL;
   if (descriptor->NeedsFrameState()) {
@@ -446,15 +462,17 @@ void InstructionSelector::VisitCall(Node* node) {
 
   // Compute InstructionOperands for inputs and outputs.
   InitializeCallBuffer(node, &buffer, true, false);
-
-  // TODO(dcarney): might be possible to use claim/poke instead
-  // Push any stack arguments.
+  // Possibly align stack here for functions.
+  int push_count = buffer.pushed_nodes.size();
+  if (push_count > 0) {
+    Emit(kMipsStackClaim | MiscField::encode(push_count), NULL);
+  }
+  int slot = buffer.pushed_nodes.size() - 1;
   for (NodeVectorRIter input = buffer.pushed_nodes.rbegin();
        input != buffer.pushed_nodes.rend(); input++) {
-    // TODO(plind): inefficient for MIPS, use MultiPush here.
-    //    - Also need to align the stack. See arm64.
-    //    - Maybe combine with arg slot stuff in DirectCEntry stub.
-    Emit(kMipsPush, NULL, g.UseRegister(*input));
+    Emit(kMipsStoreToStackSlot | MiscField::encode(slot), NULL,
+         g.UseRegister(*input));
+    slot--;
   }
 
   // Select the appropriate opcode based on the call type.
@@ -483,6 +501,93 @@ void InstructionSelector::VisitCall(Node* node) {
 }
 
 
+void InstructionSelector::VisitCheckedLoad(Node* node) {
+  MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
+  MachineType typ = TypeOf(OpParameter<MachineType>(node));
+  MipsOperandGenerator g(this);
+  Node* const buffer = node->InputAt(0);
+  Node* const offset = node->InputAt(1);
+  Node* const length = node->InputAt(2);
+  ArchOpcode opcode;
+  switch (rep) {
+    case kRepWord8:
+      opcode = typ == kTypeInt32 ? kCheckedLoadInt8 : kCheckedLoadUint8;
+      break;
+    case kRepWord16:
+      opcode = typ == kTypeInt32 ? kCheckedLoadInt16 : kCheckedLoadUint16;
+      break;
+    case kRepWord32:
+      opcode = kCheckedLoadWord32;
+      break;
+    case kRepFloat32:
+      opcode = kCheckedLoadFloat32;
+      break;
+    case kRepFloat64:
+      opcode = kCheckedLoadFloat64;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+  InstructionOperand* offset_operand = g.CanBeImmediate(offset, opcode)
+                                           ? g.UseImmediate(offset)
+                                           : g.UseRegister(offset);
+
+  InstructionOperand* length_operand =
+      (!g.CanBeImmediate(offset, opcode)) ? g.CanBeImmediate(length, opcode)
+      ? g.UseImmediate(length)
+      : g.UseRegister(length)
+      : g.UseRegister(length);
+
+  Emit(opcode | AddressingModeField::encode(kMode_MRI),
+       g.DefineAsRegister(node), offset_operand, length_operand,
+       g.UseRegister(buffer));
+}
+
+
+void InstructionSelector::VisitCheckedStore(Node* node) {
+  MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
+  MipsOperandGenerator g(this);
+  Node* const buffer = node->InputAt(0);
+  Node* const offset = node->InputAt(1);
+  Node* const length = node->InputAt(2);
+  Node* const value = node->InputAt(3);
+  ArchOpcode opcode;
+  switch (rep) {
+    case kRepWord8:
+      opcode = kCheckedStoreWord8;
+      break;
+    case kRepWord16:
+      opcode = kCheckedStoreWord16;
+      break;
+    case kRepWord32:
+      opcode = kCheckedStoreWord32;
+      break;
+    case kRepFloat32:
+      opcode = kCheckedStoreFloat32;
+      break;
+    case kRepFloat64:
+      opcode = kCheckedStoreFloat64;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+  InstructionOperand* offset_operand = g.CanBeImmediate(offset, opcode)
+                                           ? g.UseImmediate(offset)
+                                           : g.UseRegister(offset);
+
+  InstructionOperand* length_operand =
+      (!g.CanBeImmediate(offset, opcode)) ? g.CanBeImmediate(length, opcode)
+      ? g.UseImmediate(length)
+      : g.UseRegister(length)
+      : g.UseRegister(length);
+
+  Emit(opcode | AddressingModeField::encode(kMode_MRI), nullptr, offset_operand,
+       length_operand, g.UseRegister(value), g.UseRegister(buffer));
+}
+
+
 namespace {
 
 // Shared routine for multiple compare operations.
@@ -634,11 +739,6 @@ void VisitWordCompareZero(InstructionSelector* selector, Node* user,
 void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
                                       BasicBlock* fbranch) {
   FlagsContinuation cont(kNotEqual, tbranch, fbranch);
-  // If we can fall through to the true block, invert the branch.
-  if (IsNextInAssemblyOrder(tbranch)) {
-    cont.Negate();
-    cont.SwapBlocks();
-  }
   VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
 }
 
@@ -718,6 +818,11 @@ void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
 // static
 MachineOperatorBuilder::Flags
 InstructionSelector::SupportedMachineOperatorFlags() {
+  if (IsMipsArchVariant(kMips32r2) || IsMipsArchVariant(kMips32r6)) {
+    return MachineOperatorBuilder::kFloat64Floor |
+           MachineOperatorBuilder::kFloat64Ceil |
+           MachineOperatorBuilder::kFloat64RoundTruncate;
+  }
   return MachineOperatorBuilder::kNoFlags;
 }
 
index d4ec190..2b314a2 100644 (file)
@@ -51,9 +51,9 @@ CallDescriptor* Linkage::GetRuntimeCallDescriptor(
 
 CallDescriptor* Linkage::GetStubCallDescriptor(
     const CallInterfaceDescriptor& descriptor, int stack_parameter_count,
-    CallDescriptor::Flags flags, Zone* zone) {
+    CallDescriptor::Flags flags, Operator::Properties properties, Zone* zone) {
   return LH::GetStubCallDescriptor(zone, descriptor, stack_parameter_count,
-                                   flags);
+                                   flags, properties);
 }
 
 
diff --git a/deps/v8/src/compiler/mips64/OWNERS b/deps/v8/src/compiler/mips64/OWNERS
new file mode 100644 (file)
index 0000000..5508ba6
--- /dev/null
@@ -0,0 +1,5 @@
+paul.lind@imgtec.com
+gergely.kis@imgtec.com
+akos.palfi@imgtec.com
+balazs.kilvady@imgtec.com
+dusan.milosavljevic@imgtec.com
diff --git a/deps/v8/src/compiler/mips64/code-generator-mips64.cc b/deps/v8/src/compiler/mips64/code-generator-mips64.cc
new file mode 100644 (file)
index 0000000..dee7705
--- /dev/null
@@ -0,0 +1,1444 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/code-generator.h"
+#include "src/compiler/code-generator-impl.h"
+#include "src/compiler/gap-resolver.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/mips/macro-assembler-mips.h"
+#include "src/scopes.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define __ masm()->
+
+
+// TODO(plind): Possibly avoid using these lithium names.
+#define kScratchReg kLithiumScratchReg
+#define kScratchReg2 kLithiumScratchReg2
+#define kScratchDoubleReg kLithiumScratchDouble
+
+
+// TODO(plind): consider renaming these macros.
+#define TRACE_MSG(msg)                                                      \
+  PrintF("code_gen: \'%s\' in function %s at line %d\n", msg, __FUNCTION__, \
+         __LINE__)
+
+#define TRACE_UNIMPL()                                                       \
+  PrintF("UNIMPLEMENTED code_generator_mips: %s at line %d\n", __FUNCTION__, \
+         __LINE__)
+
+
+// Adds Mips-specific methods to convert InstructionOperands.
+class MipsOperandConverter FINAL : public InstructionOperandConverter {
+ public:
+  MipsOperandConverter(CodeGenerator* gen, Instruction* instr)
+      : InstructionOperandConverter(gen, instr) {}
+
+  FloatRegister OutputSingleRegister(int index = 0) {
+    return ToSingleRegister(instr_->OutputAt(index));
+  }
+
+  FloatRegister InputSingleRegister(int index) {
+    return ToSingleRegister(instr_->InputAt(index));
+  }
+
+  FloatRegister ToSingleRegister(InstructionOperand* op) {
+    // Single (Float) and Double register namespace is same on MIPS,
+    // both are typedefs of FPURegister.
+    return ToDoubleRegister(op);
+  }
+
+  Operand InputImmediate(int index) {
+    Constant constant = ToConstant(instr_->InputAt(index));
+    switch (constant.type()) {
+      case Constant::kInt32:
+        return Operand(constant.ToInt32());
+      case Constant::kInt64:
+        return Operand(constant.ToInt64());
+      case Constant::kFloat32:
+        return Operand(
+            isolate()->factory()->NewNumber(constant.ToFloat32(), TENURED));
+      case Constant::kFloat64:
+        return Operand(
+            isolate()->factory()->NewNumber(constant.ToFloat64(), TENURED));
+      case Constant::kExternalReference:
+      case Constant::kHeapObject:
+        // TODO(plind): Maybe we should handle ExtRef & HeapObj here?
+        //    maybe not done on arm due to const pool ??
+        break;
+      case Constant::kRpoNumber:
+        UNREACHABLE();  // TODO(titzer): RPO immediates on mips?
+        break;
+    }
+    UNREACHABLE();
+    return Operand(zero_reg);
+  }
+
+  Operand InputOperand(int index) {
+    InstructionOperand* op = instr_->InputAt(index);
+    if (op->IsRegister()) {
+      return Operand(ToRegister(op));
+    }
+    return InputImmediate(index);
+  }
+
+  MemOperand MemoryOperand(int* first_index) {
+    const int index = *first_index;
+    switch (AddressingModeField::decode(instr_->opcode())) {
+      case kMode_None:
+        break;
+      case kMode_MRI:
+        *first_index += 2;
+        return MemOperand(InputRegister(index + 0), InputInt32(index + 1));
+      case kMode_MRR:
+        // TODO(plind): r6 address mode, to be implemented ...
+        UNREACHABLE();
+    }
+    UNREACHABLE();
+    return MemOperand(no_reg);
+  }
+
+  MemOperand MemoryOperand(int index = 0) { return MemoryOperand(&index); }
+
+  MemOperand ToMemOperand(InstructionOperand* op) const {
+    DCHECK(op != NULL);
+    DCHECK(!op->IsRegister());
+    DCHECK(!op->IsDoubleRegister());
+    DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+    // The linkage computes where all spill slots are located.
+    FrameOffset offset = linkage()->GetFrameOffset(op->index(), frame(), 0);
+    return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
+  }
+};
+
+
+static inline bool HasRegisterInput(Instruction* instr, int index) {
+  return instr->InputAt(index)->IsRegister();
+}
+
+
+namespace {
+
+class OutOfLineLoadSingle FINAL : public OutOfLineCode {
+ public:
+  OutOfLineLoadSingle(CodeGenerator* gen, FloatRegister result)
+      : OutOfLineCode(gen), result_(result) {}
+
+  void Generate() FINAL {
+    __ Move(result_, std::numeric_limits<float>::quiet_NaN());
+  }
+
+ private:
+  FloatRegister const result_;
+};
+
+
+class OutOfLineLoadDouble FINAL : public OutOfLineCode {
+ public:
+  OutOfLineLoadDouble(CodeGenerator* gen, DoubleRegister result)
+      : OutOfLineCode(gen), result_(result) {}
+
+  void Generate() FINAL {
+    __ Move(result_, std::numeric_limits<double>::quiet_NaN());
+  }
+
+ private:
+  DoubleRegister const result_;
+};
+
+
+class OutOfLineLoadInteger FINAL : public OutOfLineCode {
+ public:
+  OutOfLineLoadInteger(CodeGenerator* gen, Register result)
+      : OutOfLineCode(gen), result_(result) {}
+
+  void Generate() FINAL { __ mov(result_, zero_reg); }
+
+ private:
+  Register const result_;
+};
+
+
+class OutOfLineRound : public OutOfLineCode {
+ public:
+  OutOfLineRound(CodeGenerator* gen, DoubleRegister result)
+      : OutOfLineCode(gen), result_(result) {}
+
+  void Generate() FINAL {
+    // Handle rounding to zero case where sign has to be preserved.
+    // High bits of double input already in kScratchReg.
+    __ dsrl(at, kScratchReg, 31);
+    __ dsll(at, at, 31);
+    __ mthc1(at, result_);
+  }
+
+ private:
+  DoubleRegister const result_;
+};
+
+
+class OutOfLineTruncate FINAL : public OutOfLineRound {
+ public:
+  OutOfLineTruncate(CodeGenerator* gen, DoubleRegister result)
+      : OutOfLineRound(gen, result) {}
+};
+
+
+class OutOfLineFloor FINAL : public OutOfLineRound {
+ public:
+  OutOfLineFloor(CodeGenerator* gen, DoubleRegister result)
+      : OutOfLineRound(gen, result) {}
+};
+
+
+class OutOfLineCeil FINAL : public OutOfLineRound {
+ public:
+  OutOfLineCeil(CodeGenerator* gen, DoubleRegister result)
+      : OutOfLineRound(gen, result) {}
+};
+
+
+}  // namespace
+
+
+#define ASSEMBLE_CHECKED_LOAD_FLOAT(width, asm_instr)                         \
+  do {                                                                        \
+    auto result = i.Output##width##Register();                                \
+    auto ool = new (zone()) OutOfLineLoad##width(this, result);               \
+    if (instr->InputAt(0)->IsRegister()) {                                    \
+      auto offset = i.InputRegister(0);                                       \
+      __ Branch(USE_DELAY_SLOT, ool->entry(), hs, offset, i.InputOperand(1)); \
+      __ Daddu(at, i.InputRegister(2), offset);                               \
+      __ asm_instr(result, MemOperand(at, 0));                                \
+    } else {                                                                  \
+      auto offset = i.InputOperand(0).immediate();                            \
+      __ Branch(ool->entry(), ls, i.InputRegister(1), Operand(offset));       \
+      __ asm_instr(result, MemOperand(i.InputRegister(2), offset));           \
+    }                                                                         \
+    __ bind(ool->exit());                                                     \
+  } while (0)
+
+
+#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr)                              \
+  do {                                                                        \
+    auto result = i.OutputRegister();                                         \
+    auto ool = new (zone()) OutOfLineLoadInteger(this, result);               \
+    if (instr->InputAt(0)->IsRegister()) {                                    \
+      auto offset = i.InputRegister(0);                                       \
+      __ Branch(USE_DELAY_SLOT, ool->entry(), hs, offset, i.InputOperand(1)); \
+      __ Daddu(at, i.InputRegister(2), offset);                               \
+      __ asm_instr(result, MemOperand(at, 0));                                \
+    } else {                                                                  \
+      auto offset = i.InputOperand(0).immediate();                            \
+      __ Branch(ool->entry(), ls, i.InputRegister(1), Operand(offset));       \
+      __ asm_instr(result, MemOperand(i.InputRegister(2), offset));           \
+    }                                                                         \
+    __ bind(ool->exit());                                                     \
+  } while (0)
+
+
+#define ASSEMBLE_CHECKED_STORE_FLOAT(width, asm_instr)                 \
+  do {                                                                 \
+    Label done;                                                        \
+    if (instr->InputAt(0)->IsRegister()) {                             \
+      auto offset = i.InputRegister(0);                                \
+      auto value = i.Input##width##Register(2);                        \
+      __ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
+      __ Daddu(at, i.InputRegister(3), offset);                        \
+      __ asm_instr(value, MemOperand(at, 0));                          \
+    } else {                                                           \
+      auto offset = i.InputOperand(0).immediate();                     \
+      auto value = i.Input##width##Register(2);                        \
+      __ Branch(&done, ls, i.InputRegister(1), Operand(offset));       \
+      __ asm_instr(value, MemOperand(i.InputRegister(3), offset));     \
+    }                                                                  \
+    __ bind(&done);                                                    \
+  } while (0)
+
+
+#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr)                      \
+  do {                                                                 \
+    Label done;                                                        \
+    if (instr->InputAt(0)->IsRegister()) {                             \
+      auto offset = i.InputRegister(0);                                \
+      auto value = i.InputRegister(2);                                 \
+      __ Branch(USE_DELAY_SLOT, &done, hs, offset, i.InputOperand(1)); \
+      __ Daddu(at, i.InputRegister(3), offset);                        \
+      __ asm_instr(value, MemOperand(at, 0));                          \
+    } else {                                                           \
+      auto offset = i.InputOperand(0).immediate();                     \
+      auto value = i.InputRegister(2);                                 \
+      __ Branch(&done, ls, i.InputRegister(1), Operand(offset));       \
+      __ asm_instr(value, MemOperand(i.InputRegister(3), offset));     \
+    }                                                                  \
+    __ bind(&done);                                                    \
+  } while (0)
+
+
+#define ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(asm_instr, operation)                  \
+  do {                                                                         \
+    auto ool =                                                                 \
+        new (zone()) OutOfLine##operation(this, i.OutputDoubleRegister());     \
+    Label done;                                                                \
+    __ mfhc1(kScratchReg, i.InputDoubleRegister(0));                           \
+    __ Ext(at, kScratchReg, HeapNumber::kExponentShift,                        \
+           HeapNumber::kExponentBits);                                         \
+    __ Branch(USE_DELAY_SLOT, &done, hs, at,                                   \
+              Operand(HeapNumber::kExponentBias + HeapNumber::kMantissaBits)); \
+    __ mov_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));              \
+    __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0));          \
+    __ dmfc1(at, i.OutputDoubleRegister());                                    \
+    __ Branch(USE_DELAY_SLOT, ool->entry(), eq, at, Operand(zero_reg));        \
+    __ cvt_d_l(i.OutputDoubleRegister(), i.OutputDoubleRegister());            \
+    __ bind(ool->exit());                                                      \
+    __ bind(&done);                                                            \
+  } while (0)
+
+
+// Assembles an instruction after register allocation, producing machine code.
+void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
+  MipsOperandConverter i(this, instr);
+  InstructionCode opcode = instr->opcode();
+
+  switch (ArchOpcodeField::decode(opcode)) {
+    case kArchCallCodeObject: {
+      EnsureSpaceForLazyDeopt();
+      if (instr->InputAt(0)->IsImmediate()) {
+        __ Call(Handle<Code>::cast(i.InputHeapObject(0)),
+                RelocInfo::CODE_TARGET);
+      } else {
+        __ daddiu(at, i.InputRegister(0), Code::kHeaderSize - kHeapObjectTag);
+        __ Call(at);
+      }
+      AddSafepointAndDeopt(instr);
+      break;
+    }
+    case kArchCallJSFunction: {
+      EnsureSpaceForLazyDeopt();
+      Register func = i.InputRegister(0);
+      if (FLAG_debug_code) {
+        // Check the function's context matches the context argument.
+        __ ld(kScratchReg, FieldMemOperand(func, JSFunction::kContextOffset));
+        __ Assert(eq, kWrongFunctionContext, cp, Operand(kScratchReg));
+      }
+
+      __ ld(at, FieldMemOperand(func, JSFunction::kCodeEntryOffset));
+      __ Call(at);
+      AddSafepointAndDeopt(instr);
+      break;
+    }
+    case kArchJmp:
+      AssembleArchJump(i.InputRpo(0));
+      break;
+    case kArchNop:
+      // don't emit code for nops.
+      break;
+    case kArchRet:
+      AssembleReturn();
+      break;
+    case kArchStackPointer:
+      __ mov(i.OutputRegister(), sp);
+      break;
+    case kArchTruncateDoubleToI:
+      __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
+      break;
+    case kMips64Add:
+      __ Addu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kMips64Dadd:
+      __ Daddu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kMips64Sub:
+      __ Subu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kMips64Dsub:
+      __ Dsubu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kMips64Mul:
+      __ Mul(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kMips64MulHigh:
+      __ Mulh(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kMips64MulHighU:
+      __ Mulhu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kMips64Div:
+      __ Div(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kMips64DivU:
+      __ Divu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kMips64Mod:
+      __ Mod(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kMips64ModU:
+      __ Modu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kMips64Dmul:
+      __ Dmul(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kMips64Ddiv:
+      __ Ddiv(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kMips64DdivU:
+      __ Ddivu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kMips64Dmod:
+      __ Dmod(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kMips64DmodU:
+      __ Dmodu(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+      __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kMips64And:
+      __ And(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kMips64Or:
+      __ Or(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kMips64Xor:
+      __ Xor(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kMips64Shl:
+      if (instr->InputAt(1)->IsRegister()) {
+        __ sllv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+      } else {
+        int32_t imm = i.InputOperand(1).immediate();
+        __ sll(i.OutputRegister(), i.InputRegister(0), imm);
+      }
+      break;
+    case kMips64Shr:
+      if (instr->InputAt(1)->IsRegister()) {
+        __ srlv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+      } else {
+        int32_t imm = i.InputOperand(1).immediate();
+        __ srl(i.OutputRegister(), i.InputRegister(0), imm);
+      }
+      break;
+    case kMips64Sar:
+      if (instr->InputAt(1)->IsRegister()) {
+        __ srav(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+      } else {
+        int32_t imm = i.InputOperand(1).immediate();
+        __ sra(i.OutputRegister(), i.InputRegister(0), imm);
+      }
+      break;
+    case kMips64Ext:
+      __ Ext(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
+             i.InputInt8(2));
+      break;
+    case kMips64Dext:
+      __ Dext(i.OutputRegister(), i.InputRegister(0), i.InputInt8(1),
+              i.InputInt8(2));
+      break;
+    case kMips64Dshl:
+      if (instr->InputAt(1)->IsRegister()) {
+        __ dsllv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+      } else {
+        int32_t imm = i.InputOperand(1).immediate();
+        if (imm < 32) {
+          __ dsll(i.OutputRegister(), i.InputRegister(0), imm);
+        } else {
+          __ dsll32(i.OutputRegister(), i.InputRegister(0), imm - 32);
+        }
+      }
+      break;
+    case kMips64Dshr:
+      if (instr->InputAt(1)->IsRegister()) {
+        __ dsrlv(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+      } else {
+        int32_t imm = i.InputOperand(1).immediate();
+        if (imm < 32) {
+          __ dsrl(i.OutputRegister(), i.InputRegister(0), imm);
+        } else {
+          __ dsrl32(i.OutputRegister(), i.InputRegister(0), imm - 32);
+        }
+      }
+      break;
+    case kMips64Dsar:
+      if (instr->InputAt(1)->IsRegister()) {
+        __ dsrav(i.OutputRegister(), i.InputRegister(0), i.InputRegister(1));
+      } else {
+        int32_t imm = i.InputOperand(1).immediate();
+        if (imm < 32) {
+          __ dsra(i.OutputRegister(), i.InputRegister(0), imm);
+        } else {
+          __ dsra32(i.OutputRegister(), i.InputRegister(0), imm - 32);
+        }
+      }
+      break;
+    case kMips64Ror:
+      __ Ror(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kMips64Dror:
+      __ Dror(i.OutputRegister(), i.InputRegister(0), i.InputOperand(1));
+      break;
+    case kMips64Tst:
+    case kMips64Tst32:
+      // Pseudo-instruction used for cmp/branch. No opcode emitted here.
+      break;
+    case kMips64Cmp:
+    case kMips64Cmp32:
+      // Pseudo-instruction used for cmp/branch. No opcode emitted here.
+      break;
+    case kMips64Mov:
+      // TODO(plind): Should we combine mov/li like this, or use separate instr?
+      //    - Also see x64 ASSEMBLE_BINOP & RegisterOrOperandType
+      if (HasRegisterInput(instr, 0)) {
+        __ mov(i.OutputRegister(), i.InputRegister(0));
+      } else {
+        __ li(i.OutputRegister(), i.InputOperand(0));
+      }
+      break;
+
+    case kMips64CmpD:
+      // Psuedo-instruction used for FP cmp/branch. No opcode emitted here.
+      break;
+    case kMips64AddD:
+      // TODO(plind): add special case: combine mult & add.
+      __ add_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+               i.InputDoubleRegister(1));
+      break;
+    case kMips64SubD:
+      __ sub_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+               i.InputDoubleRegister(1));
+      break;
+    case kMips64MulD:
+      // TODO(plind): add special case: right op is -1.0, see arm port.
+      __ mul_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+               i.InputDoubleRegister(1));
+      break;
+    case kMips64DivD:
+      __ div_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0),
+               i.InputDoubleRegister(1));
+      break;
+    case kMips64ModD: {
+      // TODO(bmeurer): We should really get rid of this special instruction,
+      // and generate a CallAddress instruction instead.
+      FrameScope scope(masm(), StackFrame::MANUAL);
+      __ PrepareCallCFunction(0, 2, kScratchReg);
+      __ MovToFloatParameters(i.InputDoubleRegister(0),
+                              i.InputDoubleRegister(1));
+      __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
+                       0, 2);
+      // Move the result in the double result register.
+      __ MovFromFloatResult(i.OutputDoubleRegister());
+      break;
+    }
+    case kMips64Float64Floor: {
+      ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(floor_l_d, Floor);
+      break;
+    }
+    case kMips64Float64Ceil: {
+      ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(ceil_l_d, Ceil);
+      break;
+    }
+    case kMips64Float64RoundTruncate: {
+      ASSEMBLE_ROUND_DOUBLE_TO_DOUBLE(trunc_l_d, Truncate);
+      break;
+    }
+    case kMips64SqrtD: {
+      __ sqrt_d(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
+      break;
+    }
+    case kMips64CvtSD: {
+      __ cvt_s_d(i.OutputSingleRegister(), i.InputDoubleRegister(0));
+      break;
+    }
+    case kMips64CvtDS: {
+      __ cvt_d_s(i.OutputDoubleRegister(), i.InputSingleRegister(0));
+      break;
+    }
+    case kMips64CvtDW: {
+      FPURegister scratch = kScratchDoubleReg;
+      __ mtc1(i.InputRegister(0), scratch);
+      __ cvt_d_w(i.OutputDoubleRegister(), scratch);
+      break;
+    }
+    case kMips64CvtDUw: {
+      FPURegister scratch = kScratchDoubleReg;
+      __ Cvt_d_uw(i.OutputDoubleRegister(), i.InputRegister(0), scratch);
+      break;
+    }
+    case kMips64TruncWD: {
+      FPURegister scratch = kScratchDoubleReg;
+      // Other arches use round to zero here, so we follow.
+      __ trunc_w_d(scratch, i.InputDoubleRegister(0));
+      __ mfc1(i.OutputRegister(), scratch);
+      break;
+    }
+    case kMips64TruncUwD: {
+      FPURegister scratch = kScratchDoubleReg;
+      // TODO(plind): Fix wrong param order of Trunc_uw_d() macro-asm function.
+      __ Trunc_uw_d(i.InputDoubleRegister(0), i.OutputRegister(), scratch);
+      break;
+    }
+    // ... more basic instructions ...
+
+    case kMips64Lbu:
+      __ lbu(i.OutputRegister(), i.MemoryOperand());
+      break;
+    case kMips64Lb:
+      __ lb(i.OutputRegister(), i.MemoryOperand());
+      break;
+    case kMips64Sb:
+      __ sb(i.InputRegister(2), i.MemoryOperand());
+      break;
+    case kMips64Lhu:
+      __ lhu(i.OutputRegister(), i.MemoryOperand());
+      break;
+    case kMips64Lh:
+      __ lh(i.OutputRegister(), i.MemoryOperand());
+      break;
+    case kMips64Sh:
+      __ sh(i.InputRegister(2), i.MemoryOperand());
+      break;
+    case kMips64Lw:
+      __ lw(i.OutputRegister(), i.MemoryOperand());
+      break;
+    case kMips64Ld:
+      __ ld(i.OutputRegister(), i.MemoryOperand());
+      break;
+    case kMips64Sw:
+      __ sw(i.InputRegister(2), i.MemoryOperand());
+      break;
+    case kMips64Sd:
+      __ sd(i.InputRegister(2), i.MemoryOperand());
+      break;
+    case kMips64Lwc1: {
+      __ lwc1(i.OutputSingleRegister(), i.MemoryOperand());
+      break;
+    }
+    case kMips64Swc1: {
+      int index = 0;
+      MemOperand operand = i.MemoryOperand(&index);
+      __ swc1(i.InputSingleRegister(index), operand);
+      break;
+    }
+    case kMips64Ldc1:
+      __ ldc1(i.OutputDoubleRegister(), i.MemoryOperand());
+      break;
+    case kMips64Sdc1:
+      __ sdc1(i.InputDoubleRegister(2), i.MemoryOperand());
+      break;
+    case kMips64Push:
+      __ Push(i.InputRegister(0));
+      break;
+    case kMips64StackClaim: {
+      int words = MiscField::decode(instr->opcode());
+      __ Dsubu(sp, sp, Operand(words << kPointerSizeLog2));
+      break;
+    }
+    case kMips64StoreToStackSlot: {
+      int slot = MiscField::decode(instr->opcode());
+      __ sd(i.InputRegister(0), MemOperand(sp, slot << kPointerSizeLog2));
+      break;
+    }
+    case kMips64StoreWriteBarrier: {
+      Register object = i.InputRegister(0);
+      Register index = i.InputRegister(1);
+      Register value = i.InputRegister(2);
+      __ daddu(index, object, index);
+      __ sd(value, MemOperand(index));
+      SaveFPRegsMode mode =
+          frame()->DidAllocateDoubleRegisters() ? kSaveFPRegs : kDontSaveFPRegs;
+      RAStatus ra_status = kRAHasNotBeenSaved;
+      __ RecordWrite(object, index, value, ra_status, mode);
+      break;
+    }
+    case kCheckedLoadInt8:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(lb);
+      break;
+    case kCheckedLoadUint8:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(lbu);
+      break;
+    case kCheckedLoadInt16:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(lh);
+      break;
+    case kCheckedLoadUint16:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(lhu);
+      break;
+    case kCheckedLoadWord32:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(lw);
+      break;
+    case kCheckedLoadFloat32:
+      ASSEMBLE_CHECKED_LOAD_FLOAT(Single, lwc1);
+      break;
+    case kCheckedLoadFloat64:
+      ASSEMBLE_CHECKED_LOAD_FLOAT(Double, ldc1);
+      break;
+    case kCheckedStoreWord8:
+      ASSEMBLE_CHECKED_STORE_INTEGER(sb);
+      break;
+    case kCheckedStoreWord16:
+      ASSEMBLE_CHECKED_STORE_INTEGER(sh);
+      break;
+    case kCheckedStoreWord32:
+      ASSEMBLE_CHECKED_STORE_INTEGER(sw);
+      break;
+    case kCheckedStoreFloat32:
+      ASSEMBLE_CHECKED_STORE_FLOAT(Single, swc1);
+      break;
+    case kCheckedStoreFloat64:
+      ASSEMBLE_CHECKED_STORE_FLOAT(Double, sdc1);
+      break;
+  }
+}
+
+
+#define UNSUPPORTED_COND(opcode, condition)                                  \
+  OFStream out(stdout);                                                      \
+  out << "Unsupported " << #opcode << " condition: \"" << condition << "\""; \
+  UNIMPLEMENTED();
+
+// Assembles branches after an instruction.
+void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
+  MipsOperandConverter i(this, instr);
+  Label* tlabel = branch->true_label;
+  Label* flabel = branch->false_label;
+  Condition cc = kNoCondition;
+
+  // MIPS does not have condition code flags, so compare and branch are
+  // implemented differently than on the other arch's. The compare operations
+  // emit mips psuedo-instructions, which are handled here by branch
+  // instructions that do the actual comparison. Essential that the input
+  // registers to compare psuedo-op are not modified before this branch op, as
+  // they are tested here.
+  // TODO(plind): Add CHECK() to ensure that test/cmp and this branch were
+  //    not separated by other instructions.
+
+  if (instr->arch_opcode() == kMips64Tst) {
+    switch (branch->condition) {
+      case kNotEqual:
+        cc = ne;
+        break;
+      case kEqual:
+        cc = eq;
+        break;
+      default:
+        UNSUPPORTED_COND(kMips64Tst, branch->condition);
+        break;
+    }
+    __ And(at, i.InputRegister(0), i.InputOperand(1));
+    __ Branch(tlabel, cc, at, Operand(zero_reg));
+  } else if (instr->arch_opcode() == kMips64Tst32) {
+    switch (branch->condition) {
+      case kNotEqual:
+        cc = ne;
+        break;
+      case kEqual:
+        cc = eq;
+        break;
+      default:
+        UNSUPPORTED_COND(kMips64Tst32, branch->condition);
+        break;
+    }
+    // Zero-extend registers on MIPS64 only 64-bit operand
+    // branch and compare op. is available.
+    // This is a disadvantage to perform 32-bit operation on MIPS64.
+    // Try to force globally in front-end Word64 representation to be preferred
+    // for MIPS64 even for Word32.
+    __ And(at, i.InputRegister(0), i.InputOperand(1));
+    __ Dext(at, at, 0, 32);
+    __ Branch(tlabel, cc, at, Operand(zero_reg));
+  } else if (instr->arch_opcode() == kMips64Dadd ||
+             instr->arch_opcode() == kMips64Dsub) {
+    switch (branch->condition) {
+      case kOverflow:
+        cc = ne;
+        break;
+      case kNotOverflow:
+        cc = eq;
+        break;
+      default:
+        UNSUPPORTED_COND(kMips64Dadd, branch->condition);
+        break;
+    }
+
+    __ dsra32(kScratchReg, i.OutputRegister(), 0);
+    __ sra(at, i.OutputRegister(), 31);
+    __ Branch(tlabel, cc, at, Operand(kScratchReg));
+  } else if (instr->arch_opcode() == kMips64Cmp) {
+    switch (branch->condition) {
+      case kEqual:
+        cc = eq;
+        break;
+      case kNotEqual:
+        cc = ne;
+        break;
+      case kSignedLessThan:
+        cc = lt;
+        break;
+      case kSignedGreaterThanOrEqual:
+        cc = ge;
+        break;
+      case kSignedLessThanOrEqual:
+        cc = le;
+        break;
+      case kSignedGreaterThan:
+        cc = gt;
+        break;
+      case kUnsignedLessThan:
+        cc = lo;
+        break;
+      case kUnsignedGreaterThanOrEqual:
+        cc = hs;
+        break;
+      case kUnsignedLessThanOrEqual:
+        cc = ls;
+        break;
+      case kUnsignedGreaterThan:
+        cc = hi;
+        break;
+      default:
+        UNSUPPORTED_COND(kMips64Cmp, branch->condition);
+        break;
+    }
+    __ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
+
+    if (!branch->fallthru) __ Branch(flabel);  // no fallthru to flabel.
+
+  } else if (instr->arch_opcode() == kMips64Cmp32) {
+    switch (branch->condition) {
+      case kEqual:
+        cc = eq;
+        break;
+      case kNotEqual:
+        cc = ne;
+        break;
+      case kSignedLessThan:
+        cc = lt;
+        break;
+      case kSignedGreaterThanOrEqual:
+        cc = ge;
+        break;
+      case kSignedLessThanOrEqual:
+        cc = le;
+        break;
+      case kSignedGreaterThan:
+        cc = gt;
+        break;
+      case kUnsignedLessThan:
+        cc = lo;
+        break;
+      case kUnsignedGreaterThanOrEqual:
+        cc = hs;
+        break;
+      case kUnsignedLessThanOrEqual:
+        cc = ls;
+        break;
+      case kUnsignedGreaterThan:
+        cc = hi;
+        break;
+      default:
+        UNSUPPORTED_COND(kMips64Cmp32, branch->condition);
+        break;
+    }
+
+    switch (branch->condition) {
+      case kEqual:
+      case kNotEqual:
+      case kSignedLessThan:
+      case kSignedGreaterThanOrEqual:
+      case kSignedLessThanOrEqual:
+      case kSignedGreaterThan:
+        // Sign-extend registers on MIPS64 only 64-bit operand
+        // branch and compare op. is available.
+        __ sll(i.InputRegister(0), i.InputRegister(0), 0);
+        if (instr->InputAt(1)->IsRegister()) {
+          __ sll(i.InputRegister(1), i.InputRegister(1), 0);
+        }
+        break;
+      case kUnsignedLessThan:
+      case kUnsignedGreaterThanOrEqual:
+      case kUnsignedLessThanOrEqual:
+      case kUnsignedGreaterThan:
+        // Zero-extend registers on MIPS64 only 64-bit operand
+        // branch and compare op. is available.
+        __ Dext(i.InputRegister(0), i.InputRegister(0), 0, 32);
+        if (instr->InputAt(1)->IsRegister()) {
+          __ Dext(i.InputRegister(1), i.InputRegister(1), 0, 32);
+        }
+        break;
+      default:
+        UNSUPPORTED_COND(kMips64Cmp, branch->condition);
+        break;
+    }
+    __ Branch(tlabel, cc, i.InputRegister(0), i.InputOperand(1));
+
+    if (!branch->fallthru) __ Branch(flabel);  // no fallthru to flabel.
+  } else if (instr->arch_opcode() == kMips64CmpD) {
+    // TODO(dusmil) optimize unordered checks to use less instructions
+    // even if we have to unfold BranchF macro.
+    Label* nan = flabel;
+    switch (branch->condition) {
+      case kUnorderedEqual:
+        cc = eq;
+        break;
+      case kUnorderedNotEqual:
+        cc = ne;
+        nan = tlabel;
+        break;
+      case kUnorderedLessThan:
+        cc = lt;
+        break;
+      case kUnorderedGreaterThanOrEqual:
+        cc = ge;
+        nan = tlabel;
+        break;
+      case kUnorderedLessThanOrEqual:
+        cc = le;
+        break;
+      case kUnorderedGreaterThan:
+        cc = gt;
+        nan = tlabel;
+        break;
+      default:
+        UNSUPPORTED_COND(kMips64CmpD, branch->condition);
+        break;
+    }
+    __ BranchF(tlabel, nan, cc, i.InputDoubleRegister(0),
+               i.InputDoubleRegister(1));
+
+    if (!branch->fallthru) __ Branch(flabel);  // no fallthru to flabel.
+  } else {
+    PrintF("AssembleArchBranch Unimplemented arch_opcode: %d\n",
+           instr->arch_opcode());
+    UNIMPLEMENTED();
+  }
+}
+
+
+void CodeGenerator::AssembleArchJump(BasicBlock::RpoNumber target) {
+  if (!IsNextInAssemblyOrder(target)) __ Branch(GetLabel(target));
+}
+
+
+// Assembles boolean materializations after an instruction.
+void CodeGenerator::AssembleArchBoolean(Instruction* instr,
+                                        FlagsCondition condition) {
+  MipsOperandConverter i(this, instr);
+  Label done;
+
+  // Materialize a full 32-bit 1 or 0 value. The result register is always the
+  // last output of the instruction.
+  Label false_value;
+  DCHECK_NE(0, instr->OutputCount());
+  Register result = i.OutputRegister(instr->OutputCount() - 1);
+  Condition cc = kNoCondition;
+
+  // MIPS does not have condition code flags, so compare and branch are
+  // implemented differently than on the other arch's. The compare operations
+  // emit mips pseudo-instructions, which are checked and handled here.
+
+  // For materializations, we use delay slot to set the result true, and
+  // in the false case, where we fall through the branch, we reset the result
+  // false.
+
+  if (instr->arch_opcode() == kMips64Tst) {
+    switch (condition) {
+      case kNotEqual:
+        cc = ne;
+        break;
+      case kEqual:
+        cc = eq;
+        break;
+      default:
+        UNSUPPORTED_COND(kMips64Tst, condition);
+        break;
+    }
+    __ And(at, i.InputRegister(0), i.InputOperand(1));
+    __ Branch(USE_DELAY_SLOT, &done, cc, at, Operand(zero_reg));
+    __ li(result, Operand(1));  // In delay slot.
+  } else if (instr->arch_opcode() == kMips64Tst32) {
+    switch (condition) {
+      case kNotEqual:
+        cc = ne;
+        break;
+      case kEqual:
+        cc = eq;
+        break;
+      default:
+        UNSUPPORTED_COND(kMips64Tst, condition);
+        break;
+    }
+    // Zero-extend register on MIPS64 only 64-bit operand
+    // branch and compare op. is available.
+    __ And(at, i.InputRegister(0), i.InputOperand(1));
+    __ Dext(at, at, 0, 32);
+    __ Branch(USE_DELAY_SLOT, &done, cc, at, Operand(zero_reg));
+    __ li(result, Operand(1));  // In delay slot.
+  } else if (instr->arch_opcode() == kMips64Dadd ||
+             instr->arch_opcode() == kMips64Dsub) {
+    switch (condition) {
+      case kOverflow:
+        cc = ne;
+        break;
+      case kNotOverflow:
+        cc = eq;
+        break;
+      default:
+        UNSUPPORTED_COND(kMips64DAdd, condition);
+        break;
+    }
+    __ dsra32(kScratchReg, i.OutputRegister(), 0);
+    __ sra(at, i.OutputRegister(), 31);
+    __ Branch(USE_DELAY_SLOT, &done, cc, at, Operand(kScratchReg));
+    __ li(result, Operand(1));  // In delay slot.
+  } else if (instr->arch_opcode() == kMips64Cmp) {
+    Register left = i.InputRegister(0);
+    Operand right = i.InputOperand(1);
+    switch (condition) {
+      case kEqual:
+        cc = eq;
+        break;
+      case kNotEqual:
+        cc = ne;
+        break;
+      case kSignedLessThan:
+        cc = lt;
+        break;
+      case kSignedGreaterThanOrEqual:
+        cc = ge;
+        break;
+      case kSignedLessThanOrEqual:
+        cc = le;
+        break;
+      case kSignedGreaterThan:
+        cc = gt;
+        break;
+      case kUnsignedLessThan:
+        cc = lo;
+        break;
+      case kUnsignedGreaterThanOrEqual:
+        cc = hs;
+        break;
+      case kUnsignedLessThanOrEqual:
+        cc = ls;
+        break;
+      case kUnsignedGreaterThan:
+        cc = hi;
+        break;
+      default:
+        UNSUPPORTED_COND(kMips64Cmp, condition);
+        break;
+    }
+    __ Branch(USE_DELAY_SLOT, &done, cc, left, right);
+    __ li(result, Operand(1));  // In delay slot.
+  } else if (instr->arch_opcode() == kMips64Cmp32) {
+    Register left = i.InputRegister(0);
+    Operand right = i.InputOperand(1);
+    switch (condition) {
+      case kEqual:
+        cc = eq;
+        break;
+      case kNotEqual:
+        cc = ne;
+        break;
+      case kSignedLessThan:
+        cc = lt;
+        break;
+      case kSignedGreaterThanOrEqual:
+        cc = ge;
+        break;
+      case kSignedLessThanOrEqual:
+        cc = le;
+        break;
+      case kSignedGreaterThan:
+        cc = gt;
+        break;
+      case kUnsignedLessThan:
+        cc = lo;
+        break;
+      case kUnsignedGreaterThanOrEqual:
+        cc = hs;
+        break;
+      case kUnsignedLessThanOrEqual:
+        cc = ls;
+        break;
+      case kUnsignedGreaterThan:
+        cc = hi;
+        break;
+      default:
+        UNSUPPORTED_COND(kMips64Cmp, condition);
+        break;
+    }
+
+    switch (condition) {
+      case kEqual:
+      case kNotEqual:
+      case kSignedLessThan:
+      case kSignedGreaterThanOrEqual:
+      case kSignedLessThanOrEqual:
+      case kSignedGreaterThan:
+        // Sign-extend registers on MIPS64 only 64-bit operand
+        // branch and compare op. is available.
+        __ sll(left, left, 0);
+        if (instr->InputAt(1)->IsRegister()) {
+          __ sll(i.InputRegister(1), i.InputRegister(1), 0);
+        }
+        break;
+      case kUnsignedLessThan:
+      case kUnsignedGreaterThanOrEqual:
+      case kUnsignedLessThanOrEqual:
+      case kUnsignedGreaterThan:
+        // Zero-extend registers on MIPS64 only 64-bit operand
+        // branch and compare op. is available.
+        __ Dext(left, left, 0, 32);
+        if (instr->InputAt(1)->IsRegister()) {
+          __ Dext(i.InputRegister(1), i.InputRegister(1), 0, 32);
+        }
+        break;
+      default:
+        UNSUPPORTED_COND(kMips64Cmp32, condition);
+        break;
+    }
+    __ Branch(USE_DELAY_SLOT, &done, cc, left, right);
+    __ li(result, Operand(1));  // In delay slot.
+  } else if (instr->arch_opcode() == kMips64CmpD) {
+    FPURegister left = i.InputDoubleRegister(0);
+    FPURegister right = i.InputDoubleRegister(1);
+    // TODO(plind): Provide NaN-testing macro-asm function without need for
+    // BranchF.
+    FPURegister dummy1 = f0;
+    FPURegister dummy2 = f2;
+    switch (condition) {
+      case kUnorderedEqual:
+        // TODO(plind):  improve the NaN testing throughout this function.
+        __ BranchF(NULL, &false_value, kNoCondition, dummy1, dummy2);
+        cc = eq;
+        break;
+      case kUnorderedNotEqual:
+        __ BranchF(USE_DELAY_SLOT, NULL, &done, kNoCondition, dummy1, dummy2);
+        __ li(result, Operand(1));  // In delay slot - returns 1 on NaN.
+        cc = ne;
+        break;
+      case kUnorderedLessThan:
+        __ BranchF(NULL, &false_value, kNoCondition, dummy1, dummy2);
+        cc = lt;
+        break;
+      case kUnorderedGreaterThanOrEqual:
+        __ BranchF(USE_DELAY_SLOT, NULL, &done, kNoCondition, dummy1, dummy2);
+        __ li(result, Operand(1));  // In delay slot - returns 1 on NaN.
+        cc = ge;
+        break;
+      case kUnorderedLessThanOrEqual:
+        __ BranchF(NULL, &false_value, kNoCondition, dummy1, dummy2);
+        cc = le;
+        break;
+      case kUnorderedGreaterThan:
+        __ BranchF(USE_DELAY_SLOT, NULL, &done, kNoCondition, dummy1, dummy2);
+        __ li(result, Operand(1));  // In delay slot - returns 1 on NaN.
+        cc = gt;
+        break;
+      default:
+        UNSUPPORTED_COND(kMips64Cmp, condition);
+        break;
+    }
+    __ BranchF(USE_DELAY_SLOT, &done, NULL, cc, left, right);
+    __ li(result, Operand(1));  // In delay slot - branch taken returns 1.
+                                // Fall-thru (branch not taken) returns 0.
+
+  } else {
+    PrintF("AssembleArchBranch Unimplemented arch_opcode is : %d\n",
+           instr->arch_opcode());
+    TRACE_UNIMPL();
+    UNIMPLEMENTED();
+  }
+  // Fallthru case is the false materialization.
+  __ bind(&false_value);
+  __ li(result, Operand(static_cast<int64_t>(0)));
+  __ bind(&done);
+}
+
+
+void CodeGenerator::AssembleDeoptimizerCall(int deoptimization_id) {
+  Address deopt_entry = Deoptimizer::GetDeoptimizationEntry(
+      isolate(), deoptimization_id, Deoptimizer::LAZY);
+  __ Call(deopt_entry, RelocInfo::RUNTIME_ENTRY);
+}
+
+
+void CodeGenerator::AssemblePrologue() {
+  CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+  if (descriptor->kind() == CallDescriptor::kCallAddress) {
+    __ Push(ra, fp);
+    __ mov(fp, sp);
+    const RegList saves = descriptor->CalleeSavedRegisters();
+    if (saves != 0) {  // Save callee-saved registers.
+      // TODO(plind): make callee save size const, possibly DCHECK it.
+      int register_save_area_size = 0;
+      for (int i = Register::kNumRegisters - 1; i >= 0; i--) {
+        if (!((1 << i) & saves)) continue;
+        register_save_area_size += kPointerSize;
+      }
+      frame()->SetRegisterSaveAreaSize(register_save_area_size);
+      __ MultiPush(saves);
+    }
+  } else if (descriptor->IsJSFunctionCall()) {
+    CompilationInfo* info = this->info();
+    __ Prologue(info->IsCodePreAgingActive());
+    frame()->SetRegisterSaveAreaSize(
+        StandardFrameConstants::kFixedFrameSizeFromFp);
+
+    // Sloppy mode functions and builtins need to replace the receiver with the
+    // global proxy when called as functions (without an explicit receiver
+    // object).
+    // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
+    if (info->strict_mode() == SLOPPY && !info->is_native()) {
+      Label ok;
+      // +2 for return address and saved frame pointer.
+      int receiver_slot = info->scope()->num_parameters() + 2;
+      __ ld(a2, MemOperand(fp, receiver_slot * kPointerSize));
+      __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
+      __ Branch(&ok, ne, a2, Operand(at));
+
+      __ ld(a2, GlobalObjectOperand());
+      __ ld(a2, FieldMemOperand(a2, GlobalObject::kGlobalProxyOffset));
+      __ sd(a2, MemOperand(fp, receiver_slot * kPointerSize));
+      __ bind(&ok);
+    }
+  } else {
+    __ StubPrologue();
+    frame()->SetRegisterSaveAreaSize(
+        StandardFrameConstants::kFixedFrameSizeFromFp);
+  }
+  int stack_slots = frame()->GetSpillSlotCount();
+  if (stack_slots > 0) {
+    __ Dsubu(sp, sp, Operand(stack_slots * kPointerSize));
+  }
+}
+
+
+void CodeGenerator::AssembleReturn() {
+  CallDescriptor* descriptor = linkage()->GetIncomingDescriptor();
+  if (descriptor->kind() == CallDescriptor::kCallAddress) {
+    if (frame()->GetRegisterSaveAreaSize() > 0) {
+      // Remove this frame's spill slots first.
+      int stack_slots = frame()->GetSpillSlotCount();
+      if (stack_slots > 0) {
+        __ Daddu(sp, sp, Operand(stack_slots * kPointerSize));
+      }
+      // Restore registers.
+      const RegList saves = descriptor->CalleeSavedRegisters();
+      if (saves != 0) {
+        __ MultiPop(saves);
+      }
+    }
+    __ mov(sp, fp);
+    __ Pop(ra, fp);
+    __ Ret();
+  } else {
+    __ mov(sp, fp);
+    __ Pop(ra, fp);
+    int pop_count = descriptor->IsJSFunctionCall()
+                        ? static_cast<int>(descriptor->JSParameterCount())
+                        : 0;
+    __ DropAndRet(pop_count);
+  }
+}
+
+
+void CodeGenerator::AssembleMove(InstructionOperand* source,
+                                 InstructionOperand* destination) {
+  MipsOperandConverter g(this, NULL);
+  // Dispatch on the source and destination operand kinds.  Not all
+  // combinations are possible.
+  if (source->IsRegister()) {
+    DCHECK(destination->IsRegister() || destination->IsStackSlot());
+    Register src = g.ToRegister(source);
+    if (destination->IsRegister()) {
+      __ mov(g.ToRegister(destination), src);
+    } else {
+      __ sd(src, g.ToMemOperand(destination));
+    }
+  } else if (source->IsStackSlot()) {
+    DCHECK(destination->IsRegister() || destination->IsStackSlot());
+    MemOperand src = g.ToMemOperand(source);
+    if (destination->IsRegister()) {
+      __ ld(g.ToRegister(destination), src);
+    } else {
+      Register temp = kScratchReg;
+      __ ld(temp, src);
+      __ sd(temp, g.ToMemOperand(destination));
+    }
+  } else if (source->IsConstant()) {
+    Constant src = g.ToConstant(source);
+    if (destination->IsRegister() || destination->IsStackSlot()) {
+      Register dst =
+          destination->IsRegister() ? g.ToRegister(destination) : kScratchReg;
+      switch (src.type()) {
+        case Constant::kInt32:
+          __ li(dst, Operand(src.ToInt32()));
+          break;
+        case Constant::kFloat32:
+          __ li(dst, isolate()->factory()->NewNumber(src.ToFloat32(), TENURED));
+          break;
+        case Constant::kInt64:
+          __ li(dst, Operand(src.ToInt64()));
+          break;
+        case Constant::kFloat64:
+          __ li(dst, isolate()->factory()->NewNumber(src.ToFloat64(), TENURED));
+          break;
+        case Constant::kExternalReference:
+          __ li(dst, Operand(src.ToExternalReference()));
+          break;
+        case Constant::kHeapObject:
+          __ li(dst, src.ToHeapObject());
+          break;
+        case Constant::kRpoNumber:
+          UNREACHABLE();  // TODO(titzer): loading RPO numbers on mips64.
+          break;
+      }
+      if (destination->IsStackSlot()) __ sd(dst, g.ToMemOperand(destination));
+    } else if (src.type() == Constant::kFloat32) {
+      if (destination->IsDoubleStackSlot()) {
+        MemOperand dst = g.ToMemOperand(destination);
+        __ li(at, Operand(bit_cast<int32_t>(src.ToFloat32())));
+        __ sw(at, dst);
+      } else {
+        FloatRegister dst = g.ToSingleRegister(destination);
+        __ Move(dst, src.ToFloat32());
+      }
+    } else {
+      DCHECK_EQ(Constant::kFloat64, src.type());
+      DoubleRegister dst = destination->IsDoubleRegister()
+                               ? g.ToDoubleRegister(destination)
+                               : kScratchDoubleReg;
+      __ Move(dst, src.ToFloat64());
+      if (destination->IsDoubleStackSlot()) {
+        __ sdc1(dst, g.ToMemOperand(destination));
+      }
+    }
+  } else if (source->IsDoubleRegister()) {
+    FPURegister src = g.ToDoubleRegister(source);
+    if (destination->IsDoubleRegister()) {
+      FPURegister dst = g.ToDoubleRegister(destination);
+      __ Move(dst, src);
+    } else {
+      DCHECK(destination->IsDoubleStackSlot());
+      __ sdc1(src, g.ToMemOperand(destination));
+    }
+  } else if (source->IsDoubleStackSlot()) {
+    DCHECK(destination->IsDoubleRegister() || destination->IsDoubleStackSlot());
+    MemOperand src = g.ToMemOperand(source);
+    if (destination->IsDoubleRegister()) {
+      __ ldc1(g.ToDoubleRegister(destination), src);
+    } else {
+      FPURegister temp = kScratchDoubleReg;
+      __ ldc1(temp, src);
+      __ sdc1(temp, g.ToMemOperand(destination));
+    }
+  } else {
+    UNREACHABLE();
+  }
+}
+
+
+void CodeGenerator::AssembleSwap(InstructionOperand* source,
+                                 InstructionOperand* destination) {
+  MipsOperandConverter g(this, NULL);
+  // Dispatch on the source and destination operand kinds.  Not all
+  // combinations are possible.
+  if (source->IsRegister()) {
+    // Register-register.
+    Register temp = kScratchReg;
+    Register src = g.ToRegister(source);
+    if (destination->IsRegister()) {
+      Register dst = g.ToRegister(destination);
+      __ Move(temp, src);
+      __ Move(src, dst);
+      __ Move(dst, temp);
+    } else {
+      DCHECK(destination->IsStackSlot());
+      MemOperand dst = g.ToMemOperand(destination);
+      __ mov(temp, src);
+      __ ld(src, dst);
+      __ sd(temp, dst);
+    }
+  } else if (source->IsStackSlot()) {
+    DCHECK(destination->IsStackSlot());
+    Register temp_0 = kScratchReg;
+    Register temp_1 = kScratchReg2;
+    MemOperand src = g.ToMemOperand(source);
+    MemOperand dst = g.ToMemOperand(destination);
+    __ ld(temp_0, src);
+    __ ld(temp_1, dst);
+    __ sd(temp_0, dst);
+    __ sd(temp_1, src);
+  } else if (source->IsDoubleRegister()) {
+    FPURegister temp = kScratchDoubleReg;
+    FPURegister src = g.ToDoubleRegister(source);
+    if (destination->IsDoubleRegister()) {
+      FPURegister dst = g.ToDoubleRegister(destination);
+      __ Move(temp, src);
+      __ Move(src, dst);
+      __ Move(dst, temp);
+    } else {
+      DCHECK(destination->IsDoubleStackSlot());
+      MemOperand dst = g.ToMemOperand(destination);
+      __ Move(temp, src);
+      __ ldc1(src, dst);
+      __ sdc1(temp, dst);
+    }
+  } else if (source->IsDoubleStackSlot()) {
+    DCHECK(destination->IsDoubleStackSlot());
+    Register temp_0 = kScratchReg;
+    FPURegister temp_1 = kScratchDoubleReg;
+    MemOperand src0 = g.ToMemOperand(source);
+    MemOperand src1(src0.rm(), src0.offset() + kPointerSize);
+    MemOperand dst0 = g.ToMemOperand(destination);
+    MemOperand dst1(dst0.rm(), dst0.offset() + kPointerSize);
+    __ ldc1(temp_1, dst0);  // Save destination in temp_1.
+    __ lw(temp_0, src0);    // Then use temp_0 to copy source to destination.
+    __ sw(temp_0, dst0);
+    __ lw(temp_0, src1);
+    __ sw(temp_0, dst1);
+    __ sdc1(temp_1, src0);
+  } else {
+    // No other combinations are possible.
+    UNREACHABLE();
+  }
+}
+
+
+void CodeGenerator::AddNopForSmiCodeInlining() {
+  // Unused on 32-bit ARM. Still exists on 64-bit arm.
+  // TODO(plind): Unclear when this is called now. Understand, fix if needed.
+  __ nop();  // Maybe PROPERTY_ACCESS_INLINED?
+}
+
+
+void CodeGenerator::EnsureSpaceForLazyDeopt() {
+  int space_needed = Deoptimizer::patch_size();
+  if (!info()->IsStub()) {
+    // Ensure that we have enough space after the previous lazy-bailout
+    // instruction for patching the code here.
+    int current_pc = masm()->pc_offset();
+    if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+      // Block tramoline pool emission for duration of padding.
+      v8::internal::Assembler::BlockTrampolinePoolScope block_trampoline_pool(
+          masm());
+      int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+      DCHECK_EQ(0, padding_size % v8::internal::Assembler::kInstrSize);
+      while (padding_size > 0) {
+        __ nop();
+        padding_size -= v8::internal::Assembler::kInstrSize;
+      }
+    }
+  }
+  MarkLazyDeoptSite();
+}
+
+#undef __
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/deps/v8/src/compiler/mips64/instruction-codes-mips64.h b/deps/v8/src/compiler/mips64/instruction-codes-mips64.h
new file mode 100644 (file)
index 0000000..dd019f9
--- /dev/null
@@ -0,0 +1,108 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_MIPS_INSTRUCTION_CODES_MIPS_H_
+#define V8_COMPILER_MIPS_INSTRUCTION_CODES_MIPS_H_
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+// MIPS64-specific opcodes that specify which assembly sequence to emit.
+// Most opcodes specify a single instruction.
+#define TARGET_ARCH_OPCODE_LIST(V) \
+  V(Mips64Add)                     \
+  V(Mips64Dadd)                    \
+  V(Mips64Sub)                     \
+  V(Mips64Dsub)                    \
+  V(Mips64Mul)                     \
+  V(Mips64MulHigh)                 \
+  V(Mips64MulHighU)                \
+  V(Mips64Dmul)                    \
+  V(Mips64Div)                     \
+  V(Mips64Ddiv)                    \
+  V(Mips64DivU)                    \
+  V(Mips64DdivU)                   \
+  V(Mips64Mod)                     \
+  V(Mips64Dmod)                    \
+  V(Mips64ModU)                    \
+  V(Mips64DmodU)                   \
+  V(Mips64And)                     \
+  V(Mips64Or)                      \
+  V(Mips64Xor)                     \
+  V(Mips64Shl)                     \
+  V(Mips64Shr)                     \
+  V(Mips64Sar)                     \
+  V(Mips64Ext)                     \
+  V(Mips64Dext)                    \
+  V(Mips64Dshl)                    \
+  V(Mips64Dshr)                    \
+  V(Mips64Dsar)                    \
+  V(Mips64Ror)                     \
+  V(Mips64Dror)                    \
+  V(Mips64Mov)                     \
+  V(Mips64Tst)                     \
+  V(Mips64Tst32)                   \
+  V(Mips64Cmp)                     \
+  V(Mips64Cmp32)                   \
+  V(Mips64CmpD)                    \
+  V(Mips64AddD)                    \
+  V(Mips64SubD)                    \
+  V(Mips64MulD)                    \
+  V(Mips64DivD)                    \
+  V(Mips64ModD)                    \
+  V(Mips64SqrtD)                   \
+  V(Mips64Float64Floor)            \
+  V(Mips64Float64Ceil)             \
+  V(Mips64Float64RoundTruncate)    \
+  V(Mips64CvtSD)                   \
+  V(Mips64CvtDS)                   \
+  V(Mips64TruncWD)                 \
+  V(Mips64TruncUwD)                \
+  V(Mips64CvtDW)                   \
+  V(Mips64CvtDUw)                  \
+  V(Mips64Lb)                      \
+  V(Mips64Lbu)                     \
+  V(Mips64Sb)                      \
+  V(Mips64Lh)                      \
+  V(Mips64Lhu)                     \
+  V(Mips64Sh)                      \
+  V(Mips64Ld)                      \
+  V(Mips64Lw)                      \
+  V(Mips64Sw)                      \
+  V(Mips64Sd)                      \
+  V(Mips64Lwc1)                    \
+  V(Mips64Swc1)                    \
+  V(Mips64Ldc1)                    \
+  V(Mips64Sdc1)                    \
+  V(Mips64Push)                    \
+  V(Mips64StoreToStackSlot)        \
+  V(Mips64StackClaim)              \
+  V(Mips64StoreWriteBarrier)
+
+
+// Addressing modes represent the "shape" of inputs to an instruction.
+// Many instructions support multiple addressing modes. Addressing modes
+// are encoded into the InstructionCode of the instruction and tell the
+// code generator after register allocation which assembler method to call.
+//
+// We use the following local notation for addressing modes:
+//
+// R = register
+// O = register or stack slot
+// D = double register
+// I = immediate (handle, external, int32)
+// MRI = [register + immediate]
+// MRR = [register + register]
+// TODO(plind): Add the new r6 address modes.
+#define TARGET_ADDRESSING_MODE_LIST(V) \
+  V(MRI) /* [%r0 + K] */               \
+  V(MRR) /* [%r0 + %r1] */
+
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_MIPS_INSTRUCTION_CODES_MIPS_H_
diff --git a/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc b/deps/v8/src/compiler/mips64/instruction-selector-mips64.cc
new file mode 100644 (file)
index 0000000..35ad16b
--- /dev/null
@@ -0,0 +1,1079 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/bits.h"
+#include "src/compiler/instruction-selector-impl.h"
+#include "src/compiler/node-matchers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define TRACE_UNIMPL() \
+  PrintF("UNIMPLEMENTED instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
+
+#define TRACE() PrintF("instr_sel: %s at line %d\n", __FUNCTION__, __LINE__)
+
+
+// Adds Mips-specific methods for generating InstructionOperands.
+class Mips64OperandGenerator FINAL : public OperandGenerator {
+ public:
+  explicit Mips64OperandGenerator(InstructionSelector* selector)
+      : OperandGenerator(selector) {}
+
+  InstructionOperand* UseOperand(Node* node, InstructionCode opcode) {
+    if (CanBeImmediate(node, opcode)) {
+      return UseImmediate(node);
+    }
+    return UseRegister(node);
+  }
+
+  bool CanBeImmediate(Node* node, InstructionCode opcode) {
+    int64_t value;
+    if (node->opcode() == IrOpcode::kInt32Constant)
+      value = OpParameter<int32_t>(node);
+    else if (node->opcode() == IrOpcode::kInt64Constant)
+      value = OpParameter<int64_t>(node);
+    else
+      return false;
+    switch (ArchOpcodeField::decode(opcode)) {
+      case kMips64Shl:
+      case kMips64Sar:
+      case kMips64Shr:
+        return is_uint5(value);
+      case kMips64Dshl:
+      case kMips64Dsar:
+      case kMips64Dshr:
+        return is_uint6(value);
+      case kMips64Xor:
+        return is_uint16(value);
+      case kMips64Ldc1:
+      case kMips64Sdc1:
+        return is_int16(value + kIntSize);
+      default:
+        return is_int16(value);
+    }
+  }
+
+
+  bool CanBeImmediate(Node* node, InstructionCode opcode,
+                      FlagsContinuation* cont) {
+    int64_t value;
+    if (node->opcode() == IrOpcode::kInt32Constant)
+      value = OpParameter<int32_t>(node);
+    else if (node->opcode() == IrOpcode::kInt64Constant)
+      value = OpParameter<int64_t>(node);
+    else
+      return false;
+    switch (ArchOpcodeField::decode(opcode)) {
+      case kMips64Cmp32:
+        switch (cont->condition()) {
+          case kUnsignedLessThan:
+          case kUnsignedGreaterThanOrEqual:
+          case kUnsignedLessThanOrEqual:
+          case kUnsignedGreaterThan:
+            // Immediate operands for unsigned 32-bit compare operations
+            // should not be sign-extended.
+            return is_uint15(value);
+          default:
+            return false;
+        }
+      default:
+        return is_int16(value);
+    }
+  }
+
+
+ private:
+  bool ImmediateFitsAddrMode1Instruction(int32_t imm) const {
+    TRACE_UNIMPL();
+    return false;
+  }
+};
+
+
+static void VisitRR(InstructionSelector* selector, ArchOpcode opcode,
+                    Node* node) {
+  Mips64OperandGenerator g(selector);
+  selector->Emit(opcode, g.DefineAsRegister(node),
+                 g.UseRegister(node->InputAt(0)));
+}
+
+
+static void VisitRRR(InstructionSelector* selector, ArchOpcode opcode,
+                     Node* node) {
+  Mips64OperandGenerator g(selector);
+  selector->Emit(opcode, g.DefineAsRegister(node),
+                 g.UseRegister(node->InputAt(0)),
+                 g.UseRegister(node->InputAt(1)));
+}
+
+
+static void VisitRRO(InstructionSelector* selector, ArchOpcode opcode,
+                     Node* node) {
+  Mips64OperandGenerator g(selector);
+  selector->Emit(opcode, g.DefineAsRegister(node),
+                 g.UseRegister(node->InputAt(0)),
+                 g.UseOperand(node->InputAt(1), opcode));
+}
+
+
+static void VisitBinop(InstructionSelector* selector, Node* node,
+                       InstructionCode opcode, FlagsContinuation* cont) {
+  Mips64OperandGenerator g(selector);
+  Int32BinopMatcher m(node);
+  InstructionOperand* inputs[4];
+  size_t input_count = 0;
+  InstructionOperand* outputs[2];
+  size_t output_count = 0;
+
+  inputs[input_count++] = g.UseRegister(m.left().node());
+  inputs[input_count++] = g.UseOperand(m.right().node(), opcode);
+
+  if (cont->IsBranch()) {
+    inputs[input_count++] = g.Label(cont->true_block());
+    inputs[input_count++] = g.Label(cont->false_block());
+  }
+
+  outputs[output_count++] = g.DefineAsRegister(node);
+  if (cont->IsSet()) {
+    outputs[output_count++] = g.DefineAsRegister(cont->result());
+  }
+
+  DCHECK_NE(0, input_count);
+  DCHECK_NE(0, output_count);
+  DCHECK_GE(arraysize(inputs), input_count);
+  DCHECK_GE(arraysize(outputs), output_count);
+
+  Instruction* instr = selector->Emit(cont->Encode(opcode), output_count,
+                                      outputs, input_count, inputs);
+  if (cont->IsBranch()) instr->MarkAsControl();
+}
+
+
+static void VisitBinop(InstructionSelector* selector, Node* node,
+                       InstructionCode opcode) {
+  FlagsContinuation cont;
+  VisitBinop(selector, node, opcode, &cont);
+}
+
+
+void InstructionSelector::VisitLoad(Node* node) {
+  MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
+  MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
+  Mips64OperandGenerator g(this);
+  Node* base = node->InputAt(0);
+  Node* index = node->InputAt(1);
+
+  ArchOpcode opcode;
+  switch (rep) {
+    case kRepFloat32:
+      opcode = kMips64Lwc1;
+      break;
+    case kRepFloat64:
+      opcode = kMips64Ldc1;
+      break;
+    case kRepBit:  // Fall through.
+    case kRepWord8:
+      opcode = typ == kTypeUint32 ? kMips64Lbu : kMips64Lb;
+      break;
+    case kRepWord16:
+      opcode = typ == kTypeUint32 ? kMips64Lhu : kMips64Lh;
+      break;
+    case kRepWord32:
+      opcode = kMips64Lw;
+      break;
+    case kRepTagged:  // Fall through.
+    case kRepWord64:
+      opcode = kMips64Ld;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+
+  if (g.CanBeImmediate(index, opcode)) {
+    Emit(opcode | AddressingModeField::encode(kMode_MRI),
+         g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
+  } else {
+    InstructionOperand* addr_reg = g.TempRegister();
+    Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
+         g.UseRegister(index), g.UseRegister(base));
+    // Emit desired load opcode, using temp addr_reg.
+    Emit(opcode | AddressingModeField::encode(kMode_MRI),
+         g.DefineAsRegister(node), addr_reg, g.TempImmediate(0));
+  }
+}
+
+
+void InstructionSelector::VisitStore(Node* node) {
+  Mips64OperandGenerator g(this);
+  Node* base = node->InputAt(0);
+  Node* index = node->InputAt(1);
+  Node* value = node->InputAt(2);
+
+  StoreRepresentation store_rep = OpParameter<StoreRepresentation>(node);
+  MachineType rep = RepresentationOf(store_rep.machine_type());
+  if (store_rep.write_barrier_kind() == kFullWriteBarrier) {
+    DCHECK(rep == kRepTagged);
+    // TODO(dcarney): refactor RecordWrite function to take temp registers
+    //                and pass them here instead of using fixed regs
+    // TODO(dcarney): handle immediate indices.
+    InstructionOperand* temps[] = {g.TempRegister(t1), g.TempRegister(t2)};
+    Emit(kMips64StoreWriteBarrier, NULL, g.UseFixed(base, t0),
+         g.UseFixed(index, t1), g.UseFixed(value, t2), arraysize(temps), temps);
+    return;
+  }
+  DCHECK_EQ(kNoWriteBarrier, store_rep.write_barrier_kind());
+
+  ArchOpcode opcode;
+  switch (rep) {
+    case kRepFloat32:
+      opcode = kMips64Swc1;
+      break;
+    case kRepFloat64:
+      opcode = kMips64Sdc1;
+      break;
+    case kRepBit:  // Fall through.
+    case kRepWord8:
+      opcode = kMips64Sb;
+      break;
+    case kRepWord16:
+      opcode = kMips64Sh;
+      break;
+    case kRepWord32:
+      opcode = kMips64Sw;
+      break;
+    case kRepTagged:  // Fall through.
+    case kRepWord64:
+      opcode = kMips64Sd;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+
+  if (g.CanBeImmediate(index, opcode)) {
+    Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL,
+         g.UseRegister(base), g.UseImmediate(index), g.UseRegister(value));
+  } else {
+    InstructionOperand* addr_reg = g.TempRegister();
+    Emit(kMips64Dadd | AddressingModeField::encode(kMode_None), addr_reg,
+         g.UseRegister(index), g.UseRegister(base));
+    // Emit desired store opcode, using temp addr_reg.
+    Emit(opcode | AddressingModeField::encode(kMode_MRI), NULL, addr_reg,
+         g.TempImmediate(0), g.UseRegister(value));
+  }
+}
+
+
+void InstructionSelector::VisitWord32And(Node* node) {
+  VisitBinop(this, node, kMips64And);
+}
+
+
+void InstructionSelector::VisitWord64And(Node* node) {
+  VisitBinop(this, node, kMips64And);
+}
+
+
+void InstructionSelector::VisitWord32Or(Node* node) {
+  VisitBinop(this, node, kMips64Or);
+}
+
+
+void InstructionSelector::VisitWord64Or(Node* node) {
+  VisitBinop(this, node, kMips64Or);
+}
+
+
+void InstructionSelector::VisitWord32Xor(Node* node) {
+  VisitBinop(this, node, kMips64Xor);
+}
+
+
+void InstructionSelector::VisitWord64Xor(Node* node) {
+  VisitBinop(this, node, kMips64Xor);
+}
+
+
+void InstructionSelector::VisitWord32Shl(Node* node) {
+  VisitRRO(this, kMips64Shl, node);
+}
+
+
+void InstructionSelector::VisitWord32Shr(Node* node) {
+  VisitRRO(this, kMips64Shr, node);
+}
+
+
+void InstructionSelector::VisitWord32Sar(Node* node) {
+  VisitRRO(this, kMips64Sar, node);
+}
+
+
+void InstructionSelector::VisitWord64Shl(Node* node) {
+  VisitRRO(this, kMips64Dshl, node);
+}
+
+
+void InstructionSelector::VisitWord64Shr(Node* node) {
+  VisitRRO(this, kMips64Dshr, node);
+}
+
+
+void InstructionSelector::VisitWord64Sar(Node* node) {
+  VisitRRO(this, kMips64Dsar, node);
+}
+
+
+void InstructionSelector::VisitWord32Ror(Node* node) {
+  VisitRRO(this, kMips64Ror, node);
+}
+
+
+void InstructionSelector::VisitWord64Ror(Node* node) {
+  VisitRRO(this, kMips64Dror, node);
+}
+
+
+void InstructionSelector::VisitInt32Add(Node* node) {
+  Mips64OperandGenerator g(this);
+  // TODO(plind): Consider multiply & add optimization from arm port.
+  VisitBinop(this, node, kMips64Add);
+}
+
+
+void InstructionSelector::VisitInt64Add(Node* node) {
+  Mips64OperandGenerator g(this);
+  // TODO(plind): Consider multiply & add optimization from arm port.
+  VisitBinop(this, node, kMips64Dadd);
+}
+
+
+void InstructionSelector::VisitInt32Sub(Node* node) {
+  VisitBinop(this, node, kMips64Sub);
+}
+
+
+void InstructionSelector::VisitInt64Sub(Node* node) {
+  VisitBinop(this, node, kMips64Dsub);
+}
+
+
+void InstructionSelector::VisitInt32Mul(Node* node) {
+  Mips64OperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  if (m.right().HasValue() && m.right().Value() > 0) {
+    int32_t value = m.right().Value();
+    if (base::bits::IsPowerOfTwo32(value)) {
+      Emit(kMips64Shl | AddressingModeField::encode(kMode_None),
+           g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+           g.TempImmediate(WhichPowerOf2(value)));
+      return;
+    }
+    if (base::bits::IsPowerOfTwo32(value - 1)) {
+      InstructionOperand* temp = g.TempRegister();
+      Emit(kMips64Shl | AddressingModeField::encode(kMode_None), temp,
+           g.UseRegister(m.left().node()),
+           g.TempImmediate(WhichPowerOf2(value - 1)));
+      Emit(kMips64Add | AddressingModeField::encode(kMode_None),
+           g.DefineAsRegister(node), g.UseRegister(m.left().node()), temp);
+      return;
+    }
+    if (base::bits::IsPowerOfTwo32(value + 1)) {
+      InstructionOperand* temp = g.TempRegister();
+      Emit(kMips64Shl | AddressingModeField::encode(kMode_None), temp,
+           g.UseRegister(m.left().node()),
+           g.TempImmediate(WhichPowerOf2(value + 1)));
+      Emit(kMips64Sub | AddressingModeField::encode(kMode_None),
+           g.DefineAsRegister(node), temp, g.UseRegister(m.left().node()));
+      return;
+    }
+  }
+  Emit(kMips64Mul, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+       g.UseRegister(m.right().node()));
+}
+
+
+void InstructionSelector::VisitInt32MulHigh(Node* node) {
+  Mips64OperandGenerator g(this);
+  Emit(kMips64MulHigh, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)), g.UseRegister(node->InputAt(1)));
+}
+
+
+void InstructionSelector::VisitUint32MulHigh(Node* node) {
+  Mips64OperandGenerator g(this);
+  InstructionOperand* const dmul_operand = g.TempRegister();
+  Emit(kMips64MulHighU, dmul_operand, g.UseRegister(node->InputAt(0)),
+       g.UseRegister(node->InputAt(1)));
+  Emit(kMips64Ext, g.DefineAsRegister(node), dmul_operand, g.TempImmediate(0),
+       g.TempImmediate(32));
+}
+
+
+void InstructionSelector::VisitInt64Mul(Node* node) {
+  Mips64OperandGenerator g(this);
+  Int64BinopMatcher m(node);
+  // TODO(dusmil): Add optimization for shifts larger than 32.
+  if (m.right().HasValue() && m.right().Value() > 0) {
+    int64_t value = m.right().Value();
+    if (base::bits::IsPowerOfTwo32(value)) {
+      Emit(kMips64Dshl | AddressingModeField::encode(kMode_None),
+           g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+           g.TempImmediate(WhichPowerOf2(value)));
+      return;
+    }
+    if (base::bits::IsPowerOfTwo32(value - 1)) {
+      InstructionOperand* temp = g.TempRegister();
+      Emit(kMips64Dshl | AddressingModeField::encode(kMode_None), temp,
+           g.UseRegister(m.left().node()),
+           g.TempImmediate(WhichPowerOf2(value - 1)));
+      Emit(kMips64Dadd | AddressingModeField::encode(kMode_None),
+           g.DefineAsRegister(node), g.UseRegister(m.left().node()), temp);
+      return;
+    }
+    if (base::bits::IsPowerOfTwo32(value + 1)) {
+      InstructionOperand* temp = g.TempRegister();
+      Emit(kMips64Dshl | AddressingModeField::encode(kMode_None), temp,
+           g.UseRegister(m.left().node()),
+           g.TempImmediate(WhichPowerOf2(value + 1)));
+      Emit(kMips64Dsub | AddressingModeField::encode(kMode_None),
+           g.DefineAsRegister(node), temp, g.UseRegister(m.left().node()));
+      return;
+    }
+  }
+  Emit(kMips64Dmul, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+       g.UseRegister(m.right().node()));
+}
+
+
+void InstructionSelector::VisitInt32Div(Node* node) {
+  Mips64OperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  Emit(kMips64Div, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+       g.UseRegister(m.right().node()));
+}
+
+
+void InstructionSelector::VisitUint32Div(Node* node) {
+  Mips64OperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  Emit(kMips64DivU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+       g.UseRegister(m.right().node()));
+}
+
+
+void InstructionSelector::VisitInt32Mod(Node* node) {
+  Mips64OperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  Emit(kMips64Mod, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+       g.UseRegister(m.right().node()));
+}
+
+
+void InstructionSelector::VisitUint32Mod(Node* node) {
+  Mips64OperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  Emit(kMips64ModU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+       g.UseRegister(m.right().node()));
+}
+
+
+void InstructionSelector::VisitInt64Div(Node* node) {
+  Mips64OperandGenerator g(this);
+  Int64BinopMatcher m(node);
+  Emit(kMips64Ddiv, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+       g.UseRegister(m.right().node()));
+}
+
+
+void InstructionSelector::VisitUint64Div(Node* node) {
+  Mips64OperandGenerator g(this);
+  Int64BinopMatcher m(node);
+  Emit(kMips64DdivU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+       g.UseRegister(m.right().node()));
+}
+
+
+void InstructionSelector::VisitInt64Mod(Node* node) {
+  Mips64OperandGenerator g(this);
+  Int64BinopMatcher m(node);
+  Emit(kMips64Dmod, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+       g.UseRegister(m.right().node()));
+}
+
+
+void InstructionSelector::VisitUint64Mod(Node* node) {
+  Mips64OperandGenerator g(this);
+  Int64BinopMatcher m(node);
+  Emit(kMips64DmodU, g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+       g.UseRegister(m.right().node()));
+}
+
+
+void InstructionSelector::VisitChangeFloat32ToFloat64(Node* node) {
+  Mips64OperandGenerator g(this);
+  Emit(kMips64CvtDS, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeInt32ToFloat64(Node* node) {
+  Mips64OperandGenerator g(this);
+  Emit(kMips64CvtDW, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeUint32ToFloat64(Node* node) {
+  Mips64OperandGenerator g(this);
+  Emit(kMips64CvtDUw, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeFloat64ToInt32(Node* node) {
+  Mips64OperandGenerator g(this);
+  Emit(kMips64TruncWD, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeFloat64ToUint32(Node* node) {
+  Mips64OperandGenerator g(this);
+  Emit(kMips64TruncUwD, g.DefineAsRegister(node),
+       g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitChangeInt32ToInt64(Node* node) {
+  Mips64OperandGenerator g(this);
+  Emit(kMips64Shl, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
+       g.TempImmediate(0));
+}
+
+
+void InstructionSelector::VisitChangeUint32ToUint64(Node* node) {
+  Mips64OperandGenerator g(this);
+  Emit(kMips64Dext, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
+       g.TempImmediate(0), g.TempImmediate(32));
+}
+
+
+void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
+  Mips64OperandGenerator g(this);
+  Emit(kMips64Ext, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)),
+       g.TempImmediate(0), g.TempImmediate(32));
+}
+
+
+void InstructionSelector::VisitTruncateFloat64ToFloat32(Node* node) {
+  Mips64OperandGenerator g(this);
+  Emit(kMips64CvtSD, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64Add(Node* node) {
+  VisitRRR(this, kMips64AddD, node);
+}
+
+
+void InstructionSelector::VisitFloat64Sub(Node* node) {
+  VisitRRR(this, kMips64SubD, node);
+}
+
+
+void InstructionSelector::VisitFloat64Mul(Node* node) {
+  VisitRRR(this, kMips64MulD, node);
+}
+
+
+void InstructionSelector::VisitFloat64Div(Node* node) {
+  VisitRRR(this, kMips64DivD, node);
+}
+
+
+void InstructionSelector::VisitFloat64Mod(Node* node) {
+  Mips64OperandGenerator g(this);
+  Emit(kMips64ModD, g.DefineAsFixed(node, f0),
+       g.UseFixed(node->InputAt(0), f12),
+       g.UseFixed(node->InputAt(1), f14))->MarkAsCall();
+}
+
+
+void InstructionSelector::VisitFloat64Sqrt(Node* node) {
+  Mips64OperandGenerator g(this);
+  Emit(kMips64SqrtD, g.DefineAsRegister(node), g.UseRegister(node->InputAt(0)));
+}
+
+
+void InstructionSelector::VisitFloat64Floor(Node* node) {
+  VisitRR(this, kMips64Float64Floor, node);
+}
+
+
+void InstructionSelector::VisitFloat64Ceil(Node* node) {
+  VisitRR(this, kMips64Float64Ceil, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundTruncate(Node* node) {
+  VisitRR(this, kMips64Float64RoundTruncate, node);
+}
+
+
+void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
+  UNREACHABLE();
+}
+
+
+void InstructionSelector::VisitCall(Node* node) {
+  Mips64OperandGenerator g(this);
+  const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
+
+  FrameStateDescriptor* frame_state_descriptor = NULL;
+  if (descriptor->NeedsFrameState()) {
+    frame_state_descriptor =
+        GetFrameStateDescriptor(node->InputAt(descriptor->InputCount()));
+  }
+
+  CallBuffer buffer(zone(), descriptor, frame_state_descriptor);
+
+  // Compute InstructionOperands for inputs and outputs.
+  InitializeCallBuffer(node, &buffer, true, false);
+
+  int push_count = buffer.pushed_nodes.size();
+  if (push_count > 0) {
+    Emit(kMips64StackClaim | MiscField::encode(push_count), NULL);
+  }
+  int slot = buffer.pushed_nodes.size() - 1;
+  for (NodeVectorRIter input = buffer.pushed_nodes.rbegin();
+       input != buffer.pushed_nodes.rend(); input++) {
+    Emit(kMips64StoreToStackSlot | MiscField::encode(slot), NULL,
+         g.UseRegister(*input));
+    slot--;
+  }
+
+  // Select the appropriate opcode based on the call type.
+  InstructionCode opcode;
+  switch (descriptor->kind()) {
+    case CallDescriptor::kCallCodeObject: {
+      opcode = kArchCallCodeObject;
+      break;
+    }
+    case CallDescriptor::kCallJSFunction:
+      opcode = kArchCallJSFunction;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+  opcode |= MiscField::encode(descriptor->flags());
+
+  // Emit the call instruction.
+  Instruction* call_instr =
+      Emit(opcode, buffer.outputs.size(), &buffer.outputs.front(),
+           buffer.instruction_args.size(), &buffer.instruction_args.front());
+
+  call_instr->MarkAsCall();
+}
+
+
+void InstructionSelector::VisitCheckedLoad(Node* node) {
+  MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
+  MachineType typ = TypeOf(OpParameter<MachineType>(node));
+  Mips64OperandGenerator g(this);
+  Node* const buffer = node->InputAt(0);
+  Node* const offset = node->InputAt(1);
+  Node* const length = node->InputAt(2);
+  ArchOpcode opcode;
+  switch (rep) {
+    case kRepWord8:
+      opcode = typ == kTypeInt32 ? kCheckedLoadInt8 : kCheckedLoadUint8;
+      break;
+    case kRepWord16:
+      opcode = typ == kTypeInt32 ? kCheckedLoadInt16 : kCheckedLoadUint16;
+      break;
+    case kRepWord32:
+      opcode = kCheckedLoadWord32;
+      break;
+    case kRepFloat32:
+      opcode = kCheckedLoadFloat32;
+      break;
+    case kRepFloat64:
+      opcode = kCheckedLoadFloat64;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+  InstructionOperand* offset_operand = g.CanBeImmediate(offset, opcode)
+                                           ? g.UseImmediate(offset)
+                                           : g.UseRegister(offset);
+
+  InstructionOperand* length_operand =
+      (!g.CanBeImmediate(offset, opcode)) ? g.CanBeImmediate(length, opcode)
+      ? g.UseImmediate(length)
+      : g.UseRegister(length)
+      : g.UseRegister(length);
+
+  Emit(opcode | AddressingModeField::encode(kMode_MRI),
+       g.DefineAsRegister(node), offset_operand, length_operand,
+       g.UseRegister(buffer));
+}
+
+
+void InstructionSelector::VisitCheckedStore(Node* node) {
+  MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
+  Mips64OperandGenerator g(this);
+  Node* const buffer = node->InputAt(0);
+  Node* const offset = node->InputAt(1);
+  Node* const length = node->InputAt(2);
+  Node* const value = node->InputAt(3);
+  ArchOpcode opcode;
+  switch (rep) {
+    case kRepWord8:
+      opcode = kCheckedStoreWord8;
+      break;
+    case kRepWord16:
+      opcode = kCheckedStoreWord16;
+      break;
+    case kRepWord32:
+      opcode = kCheckedStoreWord32;
+      break;
+    case kRepFloat32:
+      opcode = kCheckedStoreFloat32;
+      break;
+    case kRepFloat64:
+      opcode = kCheckedStoreFloat64;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+  InstructionOperand* offset_operand = g.CanBeImmediate(offset, opcode)
+                                           ? g.UseImmediate(offset)
+                                           : g.UseRegister(offset);
+
+  InstructionOperand* length_operand =
+      (!g.CanBeImmediate(offset, opcode)) ? g.CanBeImmediate(length, opcode)
+      ? g.UseImmediate(length)
+      : g.UseRegister(length)
+      : g.UseRegister(length);
+
+  Emit(opcode | AddressingModeField::encode(kMode_MRI), nullptr, offset_operand,
+       length_operand, g.UseRegister(value), g.UseRegister(buffer));
+}
+
+
+namespace {
+
+// Shared routine for multiple compare operations.
+static void VisitCompare(InstructionSelector* selector, InstructionCode opcode,
+                         InstructionOperand* left, InstructionOperand* right,
+                         FlagsContinuation* cont) {
+  Mips64OperandGenerator g(selector);
+  opcode = cont->Encode(opcode);
+  if (cont->IsBranch()) {
+    selector->Emit(opcode, NULL, left, right, g.Label(cont->true_block()),
+                   g.Label(cont->false_block()))->MarkAsControl();
+  } else {
+    DCHECK(cont->IsSet());
+    selector->Emit(opcode, g.DefineAsRegister(cont->result()), left, right);
+  }
+}
+
+
+// Shared routine for multiple float compare operations.
+void VisitFloat64Compare(InstructionSelector* selector, Node* node,
+                         FlagsContinuation* cont) {
+  Mips64OperandGenerator g(selector);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
+  VisitCompare(selector, kMips64CmpD, g.UseRegister(left), g.UseRegister(right),
+               cont);
+}
+
+
+// Shared routine for multiple word compare operations.
+void VisitWordCompare(InstructionSelector* selector, Node* node,
+                      InstructionCode opcode, FlagsContinuation* cont,
+                      bool commutative) {
+  Mips64OperandGenerator g(selector);
+  Node* left = node->InputAt(0);
+  Node* right = node->InputAt(1);
+
+  // Match immediates on left or right side of comparison.
+  if (g.CanBeImmediate(right, opcode, cont)) {
+    VisitCompare(selector, opcode, g.UseRegister(left), g.UseImmediate(right),
+                 cont);
+  } else if (g.CanBeImmediate(left, opcode, cont)) {
+    if (!commutative) cont->Commute();
+    VisitCompare(selector, opcode, g.UseRegister(right), g.UseImmediate(left),
+                 cont);
+  } else {
+    VisitCompare(selector, opcode, g.UseRegister(left), g.UseRegister(right),
+                 cont);
+  }
+}
+
+
+void VisitWord32Compare(InstructionSelector* selector, Node* node,
+                        FlagsContinuation* cont) {
+  VisitWordCompare(selector, node, kMips64Cmp32, cont, false);
+}
+
+
+void VisitWord64Compare(InstructionSelector* selector, Node* node,
+                        FlagsContinuation* cont) {
+  VisitWordCompare(selector, node, kMips64Cmp, cont, false);
+}
+
+}  // namespace
+
+
+void EmitWordCompareZero(InstructionSelector* selector, InstructionCode opcode,
+                         Node* value, FlagsContinuation* cont) {
+  Mips64OperandGenerator g(selector);
+  opcode = cont->Encode(opcode);
+  InstructionOperand* const value_operand = g.UseRegister(value);
+  if (cont->IsBranch()) {
+    selector->Emit(opcode, nullptr, value_operand, g.TempImmediate(0),
+                   g.Label(cont->true_block()),
+                   g.Label(cont->false_block()))->MarkAsControl();
+  } else {
+    selector->Emit(opcode, g.DefineAsRegister(cont->result()), value_operand,
+                   g.TempImmediate(0));
+  }
+}
+
+
+// Shared routine for word comparisons against zero.
+void VisitWordCompareZero(InstructionSelector* selector, Node* user,
+                          Node* value, FlagsContinuation* cont) {
+  // Initially set comparison against 0 to be 64-bit variant for branches that
+  // cannot combine.
+  InstructionCode opcode = kMips64Cmp;
+  while (selector->CanCover(user, value)) {
+    if (user->opcode() == IrOpcode::kWord32Equal) {
+      opcode = kMips64Cmp32;
+    }
+    switch (value->opcode()) {
+      case IrOpcode::kWord32Equal: {
+        // Combine with comparisons against 0 by simply inverting the
+        // continuation.
+        Int32BinopMatcher m(value);
+        if (m.right().Is(0)) {
+          user = value;
+          value = m.left().node();
+          cont->Negate();
+          opcode = kMips64Cmp32;
+          continue;
+        }
+        cont->OverwriteAndNegateIfEqual(kEqual);
+        return VisitWord32Compare(selector, value, cont);
+      }
+      case IrOpcode::kInt32LessThan:
+        cont->OverwriteAndNegateIfEqual(kSignedLessThan);
+        return VisitWord32Compare(selector, value, cont);
+      case IrOpcode::kInt32LessThanOrEqual:
+        cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+        return VisitWord32Compare(selector, value, cont);
+      case IrOpcode::kUint32LessThan:
+        cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+        return VisitWord32Compare(selector, value, cont);
+      case IrOpcode::kUint32LessThanOrEqual:
+        cont->OverwriteAndNegateIfEqual(kUnsignedLessThanOrEqual);
+        return VisitWord32Compare(selector, value, cont);
+      case IrOpcode::kWord64Equal: {
+        // Combine with comparisons against 0 by simply inverting the
+        // continuation.
+        Int64BinopMatcher m(value);
+        if (m.right().Is(0)) {
+          user = value;
+          value = m.left().node();
+          cont->Negate();
+          continue;
+        }
+        cont->OverwriteAndNegateIfEqual(kEqual);
+        return VisitWord64Compare(selector, value, cont);
+      }
+      case IrOpcode::kInt64LessThan:
+        cont->OverwriteAndNegateIfEqual(kSignedLessThan);
+        return VisitWord64Compare(selector, value, cont);
+      case IrOpcode::kInt64LessThanOrEqual:
+        cont->OverwriteAndNegateIfEqual(kSignedLessThanOrEqual);
+        return VisitWord64Compare(selector, value, cont);
+      case IrOpcode::kUint64LessThan:
+        cont->OverwriteAndNegateIfEqual(kUnsignedLessThan);
+        return VisitWord64Compare(selector, value, cont);
+      case IrOpcode::kFloat64Equal:
+        cont->OverwriteAndNegateIfEqual(kUnorderedEqual);
+        return VisitFloat64Compare(selector, value, cont);
+      case IrOpcode::kFloat64LessThan:
+        cont->OverwriteAndNegateIfEqual(kUnorderedLessThan);
+        return VisitFloat64Compare(selector, value, cont);
+      case IrOpcode::kFloat64LessThanOrEqual:
+        cont->OverwriteAndNegateIfEqual(kUnorderedLessThanOrEqual);
+        return VisitFloat64Compare(selector, value, cont);
+      case IrOpcode::kProjection:
+        // Check if this is the overflow output projection of an
+        // <Operation>WithOverflow node.
+        if (OpParameter<size_t>(value) == 1u) {
+          // We cannot combine the <Operation>WithOverflow with this branch
+          // unless the 0th projection (the use of the actual value of the
+          // <Operation> is either NULL, which means there's no use of the
+          // actual value, or was already defined, which means it is scheduled
+          // *AFTER* this branch).
+          Node* node = value->InputAt(0);
+          Node* result = node->FindProjection(0);
+          if (result == NULL || selector->IsDefined(result)) {
+            switch (node->opcode()) {
+              case IrOpcode::kInt32AddWithOverflow:
+                cont->OverwriteAndNegateIfEqual(kOverflow);
+                return VisitBinop(selector, node, kMips64Dadd, cont);
+              case IrOpcode::kInt32SubWithOverflow:
+                cont->OverwriteAndNegateIfEqual(kOverflow);
+                return VisitBinop(selector, node, kMips64Dsub, cont);
+              default:
+                break;
+            }
+          }
+        }
+        break;
+      case IrOpcode::kWord32And:
+        return VisitWordCompare(selector, value, kMips64Tst32, cont, true);
+      case IrOpcode::kWord64And:
+        return VisitWordCompare(selector, value, kMips64Tst, cont, true);
+      default:
+        break;
+    }
+    break;
+  }
+
+  // Continuation could not be combined with a compare, emit compare against 0.
+  EmitWordCompareZero(selector, opcode, value, cont);
+}
+
+
+void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
+                                      BasicBlock* fbranch) {
+  FlagsContinuation cont(kNotEqual, tbranch, fbranch);
+  VisitWordCompareZero(this, branch, branch->InputAt(0), &cont);
+}
+
+
+void InstructionSelector::VisitWord32Equal(Node* const node) {
+  FlagsContinuation cont(kEqual, node);
+  Int32BinopMatcher m(node);
+  if (m.right().Is(0)) {
+    return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
+  }
+
+  VisitWord32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitInt32LessThan(Node* node) {
+  FlagsContinuation cont(kSignedLessThan, node);
+  VisitWord32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitInt32LessThanOrEqual(Node* node) {
+  FlagsContinuation cont(kSignedLessThanOrEqual, node);
+  VisitWord32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitUint32LessThan(Node* node) {
+  FlagsContinuation cont(kUnsignedLessThan, node);
+  VisitWord32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitUint32LessThanOrEqual(Node* node) {
+  FlagsContinuation cont(kUnsignedLessThanOrEqual, node);
+  VisitWord32Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitInt32AddWithOverflow(Node* node) {
+  if (Node* ovf = node->FindProjection(1)) {
+    FlagsContinuation cont(kOverflow, ovf);
+    return VisitBinop(this, node, kMips64Dadd, &cont);
+  }
+  FlagsContinuation cont;
+  VisitBinop(this, node, kMips64Dadd, &cont);
+}
+
+
+void InstructionSelector::VisitInt32SubWithOverflow(Node* node) {
+  if (Node* ovf = node->FindProjection(1)) {
+    FlagsContinuation cont(kOverflow, ovf);
+    return VisitBinop(this, node, kMips64Dsub, &cont);
+  }
+  FlagsContinuation cont;
+  VisitBinop(this, node, kMips64Dsub, &cont);
+}
+
+
+void InstructionSelector::VisitWord64Equal(Node* const node) {
+  FlagsContinuation cont(kEqual, node);
+  Int64BinopMatcher m(node);
+  if (m.right().Is(0)) {
+    return VisitWordCompareZero(this, m.node(), m.left().node(), &cont);
+  }
+
+  VisitWord64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitInt64LessThan(Node* node) {
+  FlagsContinuation cont(kSignedLessThan, node);
+  VisitWord64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitInt64LessThanOrEqual(Node* node) {
+  FlagsContinuation cont(kSignedLessThanOrEqual, node);
+  VisitWord64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitUint64LessThan(Node* node) {
+  FlagsContinuation cont(kUnsignedLessThan, node);
+  VisitWord64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat64Equal(Node* node) {
+  FlagsContinuation cont(kUnorderedEqual, node);
+  VisitFloat64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat64LessThan(Node* node) {
+  FlagsContinuation cont(kUnorderedLessThan, node);
+  VisitFloat64Compare(this, node, &cont);
+}
+
+
+void InstructionSelector::VisitFloat64LessThanOrEqual(Node* node) {
+  FlagsContinuation cont(kUnorderedLessThanOrEqual, node);
+  VisitFloat64Compare(this, node, &cont);
+}
+
+
+// static
+MachineOperatorBuilder::Flags
+InstructionSelector::SupportedMachineOperatorFlags() {
+  return MachineOperatorBuilder::kFloat64Floor |
+         MachineOperatorBuilder::kFloat64Ceil |
+         MachineOperatorBuilder::kFloat64RoundTruncate;
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/deps/v8/src/compiler/mips64/linkage-mips64.cc b/deps/v8/src/compiler/mips64/linkage-mips64.cc
new file mode 100644 (file)
index 0000000..0e1a590
--- /dev/null
@@ -0,0 +1,67 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/assembler.h"
+#include "src/code-stubs.h"
+#include "src/compiler/linkage.h"
+#include "src/compiler/linkage-impl.h"
+#include "src/zone.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+struct MipsLinkageHelperTraits {
+  static Register ReturnValueReg() { return v0; }
+  static Register ReturnValue2Reg() { return v1; }
+  static Register JSCallFunctionReg() { return a1; }
+  static Register ContextReg() { return cp; }
+  static Register RuntimeCallFunctionReg() { return a1; }
+  static Register RuntimeCallArgCountReg() { return a0; }
+  static RegList CCalleeSaveRegisters() {
+    return s0.bit() | s1.bit() | s2.bit() | s3.bit() | s4.bit() | s5.bit() |
+           s6.bit() | s7.bit();
+  }
+  static Register CRegisterParameter(int i) {
+    static Register register_parameters[] = {a0, a1, a2, a3, a4, a5, a6, a7};
+    return register_parameters[i];
+  }
+  static int CRegisterParametersLength() { return 8; }
+};
+
+
+typedef LinkageHelper<MipsLinkageHelperTraits> LH;
+
+CallDescriptor* Linkage::GetJSCallDescriptor(int parameter_count, Zone* zone,
+                                             CallDescriptor::Flags flags) {
+  return LH::GetJSCallDescriptor(zone, parameter_count, flags);
+}
+
+
+CallDescriptor* Linkage::GetRuntimeCallDescriptor(
+    Runtime::FunctionId function, int parameter_count,
+    Operator::Properties properties, Zone* zone) {
+  return LH::GetRuntimeCallDescriptor(zone, function, parameter_count,
+                                      properties);
+}
+
+
+CallDescriptor* Linkage::GetStubCallDescriptor(
+    const CallInterfaceDescriptor& descriptor, int stack_parameter_count,
+    CallDescriptor::Flags flags, Operator::Properties properties, Zone* zone) {
+  return LH::GetStubCallDescriptor(zone, descriptor, stack_parameter_count,
+                                   flags, properties);
+}
+
+
+CallDescriptor* Linkage::GetSimplifiedCDescriptor(Zone* zone,
+                                                  MachineSignature* sig) {
+  return LH::GetSimplifiedCDescriptor(zone, sig);
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/deps/v8/src/compiler/move-optimizer.cc b/deps/v8/src/compiler/move-optimizer.cc
new file mode 100644 (file)
index 0000000..330f32f
--- /dev/null
@@ -0,0 +1,205 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/move-optimizer.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+MoveOptimizer::MoveOptimizer(Zone* local_zone, InstructionSequence* code)
+    : local_zone_(local_zone),
+      code_(code),
+      temp_vector_0_(local_zone),
+      temp_vector_1_(local_zone) {}
+
+
+void MoveOptimizer::Run() {
+  // First smash all consecutive moves into the left most move slot.
+  for (auto* block : code()->instruction_blocks()) {
+    GapInstruction* prev_gap = nullptr;
+    for (int index = block->code_start(); index < block->code_end(); ++index) {
+      auto instr = code()->instructions()[index];
+      if (!instr->IsGapMoves()) {
+        if (instr->IsSourcePosition() || instr->IsNop()) continue;
+        FinalizeMoves(&temp_vector_0_, &temp_vector_1_, prev_gap);
+        prev_gap = nullptr;
+        continue;
+      }
+      auto gap = GapInstruction::cast(instr);
+      // Find first non-empty slot.
+      int i = GapInstruction::FIRST_INNER_POSITION;
+      for (; i <= GapInstruction::LAST_INNER_POSITION; i++) {
+        auto move = gap->parallel_moves()[i];
+        if (move == nullptr) continue;
+        auto move_ops = move->move_operands();
+        auto op = move_ops->begin();
+        for (; op != move_ops->end(); ++op) {
+          if (!op->IsRedundant()) break;
+        }
+        if (op == move_ops->end()) {
+          move_ops->Rewind(0);  // Clear this redundant move.
+        } else {
+          break;  // Found index of first non-redundant move.
+        }
+      }
+      // Nothing to do here.
+      if (i == GapInstruction::LAST_INNER_POSITION + 1) {
+        if (prev_gap != nullptr) {
+          // Slide prev_gap down so we always know where to look for it.
+          std::swap(prev_gap->parallel_moves()[0], gap->parallel_moves()[0]);
+          prev_gap = gap;
+        }
+        continue;
+      }
+      // Move the first non-empty gap to position 0.
+      std::swap(gap->parallel_moves()[0], gap->parallel_moves()[i]);
+      auto left = gap->parallel_moves()[0];
+      // Compress everything into position 0.
+      for (++i; i <= GapInstruction::LAST_INNER_POSITION; ++i) {
+        auto move = gap->parallel_moves()[i];
+        if (move == nullptr) continue;
+        CompressMoves(&temp_vector_0_, left, move);
+      }
+      if (prev_gap != nullptr) {
+        // Smash left into prev_gap, killing left.
+        auto pred_moves = prev_gap->parallel_moves()[0];
+        CompressMoves(&temp_vector_0_, pred_moves, left);
+        std::swap(prev_gap->parallel_moves()[0], gap->parallel_moves()[0]);
+      }
+      prev_gap = gap;
+    }
+    FinalizeMoves(&temp_vector_0_, &temp_vector_1_, prev_gap);
+  }
+}
+
+
+static MoveOperands* PrepareInsertAfter(ParallelMove* left, MoveOperands* move,
+                                        Zone* zone) {
+  auto move_ops = left->move_operands();
+  MoveOperands* replacement = nullptr;
+  MoveOperands* to_eliminate = nullptr;
+  for (auto curr = move_ops->begin(); curr != move_ops->end(); ++curr) {
+    if (curr->IsEliminated()) continue;
+    if (curr->destination()->Equals(move->source())) {
+      DCHECK_EQ(nullptr, replacement);
+      replacement = curr;
+      if (to_eliminate != nullptr) break;
+    } else if (curr->destination()->Equals(move->destination())) {
+      DCHECK_EQ(nullptr, to_eliminate);
+      to_eliminate = curr;
+      if (replacement != nullptr) break;
+    }
+  }
+  DCHECK(!(replacement == to_eliminate && replacement != nullptr));
+  if (replacement != nullptr) {
+    auto new_source = new (zone) InstructionOperand(
+        replacement->source()->kind(), replacement->source()->index());
+    move->set_source(new_source);
+  }
+  return to_eliminate;
+}
+
+
+void MoveOptimizer::CompressMoves(MoveOpVector* eliminated, ParallelMove* left,
+                                  ParallelMove* right) {
+  DCHECK(eliminated->empty());
+  auto move_ops = right->move_operands();
+  // Modify the right moves in place and collect moves that will be killed by
+  // merging the two gaps.
+  for (auto op = move_ops->begin(); op != move_ops->end(); ++op) {
+    if (op->IsRedundant()) continue;
+    MoveOperands* to_eliminate = PrepareInsertAfter(left, op, code_zone());
+    if (to_eliminate != nullptr) {
+      eliminated->push_back(to_eliminate);
+    }
+  }
+  // Eliminate dead moves.  Must happen before insertion of new moves as the
+  // contents of eliminated are pointers into a list.
+  for (auto to_eliminate : *eliminated) {
+    to_eliminate->Eliminate();
+  }
+  eliminated->clear();
+  // Add all possibly modified moves from right side.
+  for (auto op = move_ops->begin(); op != move_ops->end(); ++op) {
+    if (op->IsRedundant()) continue;
+    left->move_operands()->Add(*op, code_zone());
+  }
+  // Nuke right.
+  move_ops->Rewind(0);
+}
+
+
+void MoveOptimizer::FinalizeMoves(MoveOpVector* loads, MoveOpVector* new_moves,
+                                  GapInstruction* gap) {
+  DCHECK(loads->empty());
+  DCHECK(new_moves->empty());
+  if (gap == nullptr) return;
+  // Split multiple loads of the same constant or stack slot off into the second
+  // slot and keep remaining moves in the first slot.
+  auto move_ops = gap->parallel_moves()[0]->move_operands();
+  for (auto move = move_ops->begin(); move != move_ops->end(); ++move) {
+    if (move->IsRedundant()) {
+      move->Eliminate();
+      continue;
+    }
+    if (!(move->source()->IsConstant() || move->source()->IsStackSlot() ||
+          move->source()->IsDoubleStackSlot()))
+      continue;
+    // Search for existing move to this slot.
+    MoveOperands* found = nullptr;
+    for (auto load : *loads) {
+      if (load->source()->Equals(move->source())) {
+        found = load;
+        break;
+      }
+    }
+    // Not found so insert.
+    if (found == nullptr) {
+      loads->push_back(move);
+      // Replace source with copy for later use.
+      auto dest = move->destination();
+      move->set_destination(new (code_zone())
+                            InstructionOperand(dest->kind(), dest->index()));
+      continue;
+    }
+    if ((found->destination()->IsStackSlot() ||
+         found->destination()->IsDoubleStackSlot()) &&
+        !(move->destination()->IsStackSlot() ||
+          move->destination()->IsDoubleStackSlot())) {
+      // Found a better source for this load.  Smash it in place to affect other
+      // loads that have already been split.
+      InstructionOperand::Kind found_kind = found->destination()->kind();
+      int found_index = found->destination()->index();
+      auto next_dest =
+          new (code_zone()) InstructionOperand(found_kind, found_index);
+      auto dest = move->destination();
+      found->destination()->ConvertTo(dest->kind(), dest->index());
+      move->set_destination(next_dest);
+    }
+    // move from load destination.
+    move->set_source(found->destination());
+    new_moves->push_back(move);
+  }
+  loads->clear();
+  if (new_moves->empty()) return;
+  // Insert all new moves into slot 1.
+  auto slot_1 = gap->GetOrCreateParallelMove(
+      static_cast<GapInstruction::InnerPosition>(1), code_zone());
+  DCHECK(slot_1->move_operands()->is_empty());
+  slot_1->move_operands()->AddBlock(MoveOperands(nullptr, nullptr),
+                                    static_cast<int>(new_moves->size()),
+                                    code_zone());
+  auto it = slot_1->move_operands()->begin();
+  for (auto new_move : *new_moves) {
+    std::swap(*new_move, *it);
+    ++it;
+  }
+  DCHECK_EQ(it, slot_1->move_operands()->end());
+  new_moves->clear();
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/deps/v8/src/compiler/move-optimizer.h b/deps/v8/src/compiler/move-optimizer.h
new file mode 100644 (file)
index 0000000..bbce686
--- /dev/null
@@ -0,0 +1,44 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_COMPILER_MOVE_OPTIMIZER_
+#define V8_COMPILER_MOVE_OPTIMIZER_
+
+#include "src/compiler/instruction.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class MoveOptimizer FINAL {
+ public:
+  MoveOptimizer(Zone* local_zone, InstructionSequence* code);
+  void Run();
+
+ private:
+  typedef ZoneVector<MoveOperands*> MoveOpVector;
+
+  InstructionSequence* code() const { return code_; }
+  Zone* local_zone() const { return local_zone_; }
+  Zone* code_zone() const { return code()->zone(); }
+
+  void CompressMoves(MoveOpVector* eliminated, ParallelMove* left,
+                     ParallelMove* right);
+  void FinalizeMoves(MoveOpVector* loads, MoveOpVector* new_moves,
+                     GapInstruction* gap);
+
+  Zone* const local_zone_;
+  InstructionSequence* const code_;
+  MoveOpVector temp_vector_0_;
+  MoveOpVector temp_vector_1_;
+
+  DISALLOW_COPY_AND_ASSIGN(MoveOptimizer);
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_COMPILER_MOVE_OPTIMIZER_
index 35d03ce..92a3fa0 100644 (file)
@@ -13,6 +13,13 @@ namespace v8 {
 namespace internal {
 namespace compiler {
 
+namespace {
+
+enum { kInitialSize = 16u, kLinearProbe = 5u };
+
+}  // namespace
+
+
 template <typename Key, typename Hash, typename Pred>
 struct NodeCache<Key, Hash, Pred>::Entry {
   Key key_;
@@ -92,17 +99,21 @@ Node** NodeCache<Key, Hash, Pred>::Find(Zone* zone, Key key) {
 
 
 template <typename Key, typename Hash, typename Pred>
-void NodeCache<Key, Hash, Pred>::GetCachedNodes(NodeVector* nodes) {
+void NodeCache<Key, Hash, Pred>::GetCachedNodes(ZoneVector<Node*>* nodes) {
   if (entries_) {
     for (size_t i = 0; i < size_ + kLinearProbe; i++) {
-      if (entries_[i].value_ != NULL) nodes->push_back(entries_[i].value_);
+      if (entries_[i].value_) nodes->push_back(entries_[i].value_);
     }
   }
 }
 
-template class NodeCache<int64_t>;
+
+// -----------------------------------------------------------------------------
+// Instantiations
+
+
 template class NodeCache<int32_t>;
-template class NodeCache<void*>;
+template class NodeCache<int64_t>;
 
 }  // namespace compiler
 }  // namespace internal
index 2622e4d..b123922 100644 (file)
@@ -7,20 +7,31 @@
 
 #include "src/base/functional.h"
 #include "src/base/macros.h"
-#include "src/compiler/node.h"
 
 namespace v8 {
 namespace internal {
+
+// Forward declarations.
+class Zone;
+template <typename>
+class ZoneVector;
+
+
 namespace compiler {
 
+// Forward declarations.
+class Node;
+
+
 // A cache for nodes based on a key. Useful for implementing canonicalization of
 // nodes such as constants, parameters, etc.
 template <typename Key, typename Hash = base::hash<Key>,
           typename Pred = std::equal_to<Key> >
 class NodeCache FINAL {
  public:
-  explicit NodeCache(size_t max = 256)
+  explicit NodeCache(unsigned max = 256)
       : entries_(nullptr), size_(0), max_(max) {}
+  ~NodeCache() {}
 
   // Search for node associated with {key} and return a pointer to a memory
   // location in this cache that stores an entry for the key. If the location
@@ -31,11 +42,10 @@ class NodeCache FINAL {
   // too full or encounters too many hash collisions.
   Node** Find(Zone* zone, Key key);
 
-  void GetCachedNodes(NodeVector* nodes);
+  // Appends all nodes from this cache to {nodes}.
+  void GetCachedNodes(ZoneVector<Node*>* nodes);
 
  private:
-  enum { kInitialSize = 16u, kLinearProbe = 5u };
-
   struct Entry;
 
   Entry* entries_;  // lazily-allocated hash entries.
@@ -45,12 +55,18 @@ class NodeCache FINAL {
   Pred pred_;
 
   bool Resize(Zone* zone);
+
+  DISALLOW_COPY_AND_ASSIGN(NodeCache);
 };
 
 // Various default cache types.
-typedef NodeCache<int64_t> Int64NodeCache;
 typedef NodeCache<int32_t> Int32NodeCache;
-typedef NodeCache<void*> PtrNodeCache;
+typedef NodeCache<int64_t> Int64NodeCache;
+#if V8_HOST_ARCH_32_BIT
+typedef Int32NodeCache IntPtrNodeCache;
+#else
+typedef Int64NodeCache IntPtrNodeCache;
+#endif
 
 }  // namespace compiler
 }  // namespace internal
index a55e7bf..fc11a0a 100644 (file)
@@ -5,6 +5,8 @@
 #ifndef V8_COMPILER_NODE_MATCHERS_H_
 #define V8_COMPILER_NODE_MATCHERS_H_
 
+#include <cmath>
+
 #include "src/compiler/node.h"
 #include "src/compiler/operator.h"
 #include "src/unique.h"
@@ -39,6 +41,8 @@ struct NodeMatcher {
 // A pattern matcher for abitrary value constants.
 template <typename T, IrOpcode::Value kOpcode>
 struct ValueMatcher : public NodeMatcher {
+  typedef T ValueType;
+
   explicit ValueMatcher(Node* node)
       : NodeMatcher(node), value_(), has_value_(opcode() == kOpcode) {
     if (has_value_) {
@@ -66,15 +70,49 @@ struct ValueMatcher : public NodeMatcher {
 };
 
 
+template <>
+inline ValueMatcher<int64_t, IrOpcode::kInt64Constant>::ValueMatcher(Node* node)
+    : NodeMatcher(node), value_(), has_value_(false) {
+  if (opcode() == IrOpcode::kInt32Constant) {
+    value_ = OpParameter<int32_t>(node);
+    has_value_ = true;
+  } else if (opcode() == IrOpcode::kInt64Constant) {
+    value_ = OpParameter<int64_t>(node);
+    has_value_ = true;
+  }
+}
+
+
+template <>
+inline ValueMatcher<uint64_t, IrOpcode::kInt64Constant>::ValueMatcher(
+    Node* node)
+    : NodeMatcher(node), value_(), has_value_(false) {
+  if (opcode() == IrOpcode::kInt32Constant) {
+    value_ = OpParameter<uint32_t>(node);
+    has_value_ = true;
+  } else if (opcode() == IrOpcode::kInt64Constant) {
+    value_ = OpParameter<uint64_t>(node);
+    has_value_ = true;
+  }
+}
+
+
 // A pattern matcher for integer constants.
 template <typename T, IrOpcode::Value kOpcode>
 struct IntMatcher FINAL : public ValueMatcher<T, kOpcode> {
   explicit IntMatcher(Node* node) : ValueMatcher<T, kOpcode>(node) {}
 
+  bool IsMultipleOf(T n) const {
+    return this->HasValue() && (this->Value() % n) == 0;
+  }
   bool IsPowerOf2() const {
     return this->HasValue() && this->Value() > 0 &&
            (this->Value() & (this->Value() - 1)) == 0;
   }
+  bool IsNegativePowerOf2() const {
+    return this->HasValue() && this->Value() < 0 &&
+           (-this->Value() & (-this->Value() - 1)) == 0;
+  }
 };
 
 typedef IntMatcher<int32_t, IrOpcode::kInt32Constant> Int32Matcher;
@@ -95,6 +133,9 @@ template <typename T, IrOpcode::Value kOpcode>
 struct FloatMatcher FINAL : public ValueMatcher<T, kOpcode> {
   explicit FloatMatcher(Node* node) : ValueMatcher<T, kOpcode>(node) {}
 
+  bool IsMinusZero() const {
+    return this->Is(0.0) && std::signbit(this->Value());
+  }
   bool IsNaN() const { return this->HasValue() && std::isnan(this->Value()); }
 };
 
@@ -116,11 +157,18 @@ struct HeapObjectMatcher FINAL
 // right hand sides of a binary operation and can put constants on the right
 // if they appear on the left hand side of a commutative operation.
 template <typename Left, typename Right>
-struct BinopMatcher FINAL : public NodeMatcher {
+struct BinopMatcher : public NodeMatcher {
   explicit BinopMatcher(Node* node)
       : NodeMatcher(node), left_(InputAt(0)), right_(InputAt(1)) {
     if (HasProperty(Operator::kCommutative)) PutConstantOnRight();
   }
+  BinopMatcher(Node* node, bool allow_input_swap)
+      : NodeMatcher(node), left_(InputAt(0)), right_(InputAt(1)) {
+    if (allow_input_swap) PutConstantOnRight();
+  }
+
+  typedef Left LeftMatcher;
+  typedef Right RightMatcher;
 
   const Left& left() const { return left_; }
   const Right& right() const { return right_; }
@@ -128,12 +176,17 @@ struct BinopMatcher FINAL : public NodeMatcher {
   bool IsFoldable() const { return left().HasValue() && right().HasValue(); }
   bool LeftEqualsRight() const { return left().node() == right().node(); }
 
+ protected:
+  void SwapInputs() {
+    std::swap(left_, right_);
+    node()->ReplaceInput(0, left().node());
+    node()->ReplaceInput(1, right().node());
+  }
+
  private:
   void PutConstantOnRight() {
     if (left().HasValue() && !right().HasValue()) {
-      std::swap(left_, right_);
-      node()->ReplaceInput(0, left().node());
-      node()->ReplaceInput(1, right().node());
+      SwapInputs();
     }
   }
 
@@ -150,6 +203,314 @@ typedef BinopMatcher<UintPtrMatcher, UintPtrMatcher> UintPtrBinopMatcher;
 typedef BinopMatcher<Float64Matcher, Float64Matcher> Float64BinopMatcher;
 typedef BinopMatcher<NumberMatcher, NumberMatcher> NumberBinopMatcher;
 
+
+template <class BinopMatcher, IrOpcode::Value kMulOpcode,
+          IrOpcode::Value kShiftOpcode>
+struct ScaleMatcher {
+  explicit ScaleMatcher(Node* node, bool allow_power_of_two_plus_one = false)
+      : scale_(-1), power_of_two_plus_one_(false) {
+    if (node->InputCount() < 2) return;
+    BinopMatcher m(node);
+    if (node->opcode() == kShiftOpcode) {
+      if (m.right().HasValue()) {
+        typename BinopMatcher::RightMatcher::ValueType value =
+            m.right().Value();
+        if (value >= 0 && value <= 3) {
+          scale_ = static_cast<int>(value);
+        }
+      }
+    } else if (node->opcode() == kMulOpcode) {
+      if (m.right().HasValue()) {
+        typename BinopMatcher::RightMatcher::ValueType value =
+            m.right().Value();
+        if (value == 1) {
+          scale_ = 0;
+        } else if (value == 2) {
+          scale_ = 1;
+        } else if (value == 4) {
+          scale_ = 2;
+        } else if (value == 8) {
+          scale_ = 3;
+        } else if (allow_power_of_two_plus_one) {
+          if (value == 3) {
+            scale_ = 1;
+            power_of_two_plus_one_ = true;
+          } else if (value == 5) {
+            scale_ = 2;
+            power_of_two_plus_one_ = true;
+          } else if (value == 9) {
+            scale_ = 3;
+            power_of_two_plus_one_ = true;
+          }
+        }
+      }
+    }
+  }
+
+  bool matches() const { return scale_ != -1; }
+  int scale() const { return scale_; }
+  bool power_of_two_plus_one() const { return power_of_two_plus_one_; }
+
+ private:
+  int scale_;
+  bool power_of_two_plus_one_;
+};
+
+typedef ScaleMatcher<Int32BinopMatcher, IrOpcode::kInt32Mul,
+                     IrOpcode::kWord32Shl> Int32ScaleMatcher;
+typedef ScaleMatcher<Int64BinopMatcher, IrOpcode::kInt64Mul,
+                     IrOpcode::kWord64Shl> Int64ScaleMatcher;
+
+
+template <class BinopMatcher, IrOpcode::Value kAddOpcode,
+          IrOpcode::Value kMulOpcode, IrOpcode::Value kShiftOpcode>
+struct AddMatcher : public BinopMatcher {
+  static const IrOpcode::Value kOpcode = kAddOpcode;
+  typedef ScaleMatcher<BinopMatcher, kMulOpcode, kShiftOpcode> Matcher;
+
+  AddMatcher(Node* node, bool allow_input_swap)
+      : BinopMatcher(node, allow_input_swap),
+        scale_(-1),
+        power_of_two_plus_one_(false) {
+    Initialize(node, allow_input_swap);
+  }
+  explicit AddMatcher(Node* node)
+      : BinopMatcher(node, node->op()->HasProperty(Operator::kCommutative)),
+        scale_(-1),
+        power_of_two_plus_one_(false) {
+    Initialize(node, node->op()->HasProperty(Operator::kCommutative));
+  }
+
+  bool HasIndexInput() const { return scale_ != -1; }
+  Node* IndexInput() const {
+    DCHECK(HasIndexInput());
+    return this->left().node()->InputAt(0);
+  }
+  int scale() const {
+    DCHECK(HasIndexInput());
+    return scale_;
+  }
+  bool power_of_two_plus_one() const { return power_of_two_plus_one_; }
+
+ private:
+  void Initialize(Node* node, bool allow_input_swap) {
+    Matcher left_matcher(this->left().node(), true);
+    if (left_matcher.matches()) {
+      scale_ = left_matcher.scale();
+      power_of_two_plus_one_ = left_matcher.power_of_two_plus_one();
+      return;
+    }
+
+    if (!allow_input_swap) {
+      return;
+    }
+
+    Matcher right_matcher(this->right().node(), true);
+    if (right_matcher.matches()) {
+      scale_ = right_matcher.scale();
+      power_of_two_plus_one_ = right_matcher.power_of_two_plus_one();
+      this->SwapInputs();
+      return;
+    }
+
+    if (this->right().opcode() == kAddOpcode &&
+        this->left().opcode() != kAddOpcode) {
+      this->SwapInputs();
+    }
+  }
+
+  int scale_;
+  bool power_of_two_plus_one_;
+};
+
+typedef AddMatcher<Int32BinopMatcher, IrOpcode::kInt32Add, IrOpcode::kInt32Mul,
+                   IrOpcode::kWord32Shl> Int32AddMatcher;
+typedef AddMatcher<Int64BinopMatcher, IrOpcode::kInt64Add, IrOpcode::kInt64Mul,
+                   IrOpcode::kWord64Shl> Int64AddMatcher;
+
+
+template <class AddMatcher>
+struct BaseWithIndexAndDisplacementMatcher {
+  BaseWithIndexAndDisplacementMatcher(Node* node, bool allow_input_swap)
+      : matches_(false),
+        index_(NULL),
+        scale_(0),
+        base_(NULL),
+        displacement_(NULL) {
+    Initialize(node, allow_input_swap);
+  }
+
+  explicit BaseWithIndexAndDisplacementMatcher(Node* node)
+      : matches_(false),
+        index_(NULL),
+        scale_(0),
+        base_(NULL),
+        displacement_(NULL) {
+    Initialize(node, node->op()->HasProperty(Operator::kCommutative));
+  }
+
+  bool matches() const { return matches_; }
+  Node* index() const { return index_; }
+  int scale() const { return scale_; }
+  Node* base() const { return base_; }
+  Node* displacement() const { return displacement_; }
+
+ private:
+  bool matches_;
+  Node* index_;
+  int scale_;
+  Node* base_;
+  Node* displacement_;
+
+  void Initialize(Node* node, bool allow_input_swap) {
+    // The BaseWithIndexAndDisplacementMatcher canonicalizes the order of
+    // displacements and scale factors that are used as inputs, so instead of
+    // enumerating all possible patterns by brute force, checking for node
+    // clusters using the following templates in the following order suffices to
+    // find all of the interesting cases (S = index * scale, B = base input, D =
+    // displacement input):
+    // (S + (B + D))
+    // (S + (B + B))
+    // (S + D)
+    // (S + B)
+    // ((S + D) + B)
+    // ((S + B) + D)
+    // ((B + D) + B)
+    // ((B + B) + D)
+    // (B + D)
+    // (B + B)
+    if (node->InputCount() < 2) return;
+    AddMatcher m(node, allow_input_swap);
+    Node* left = m.left().node();
+    Node* right = m.right().node();
+    Node* displacement = NULL;
+    Node* base = NULL;
+    Node* index = NULL;
+    Node* scale_expression = NULL;
+    bool power_of_two_plus_one = false;
+    int scale = 0;
+    if (m.HasIndexInput() && left->OwnedBy(node)) {
+      index = m.IndexInput();
+      scale = m.scale();
+      scale_expression = left;
+      power_of_two_plus_one = m.power_of_two_plus_one();
+      if (right->opcode() == AddMatcher::kOpcode && right->OwnedBy(node)) {
+        AddMatcher right_matcher(right);
+        if (right_matcher.right().HasValue()) {
+          // (S + (B + D))
+          base = right_matcher.left().node();
+          displacement = right_matcher.right().node();
+        } else {
+          // (S + (B + B))
+          base = right;
+        }
+      } else if (m.right().HasValue()) {
+        // (S + D)
+        displacement = right;
+      } else {
+        // (S + B)
+        base = right;
+      }
+    } else {
+      if (left->opcode() == AddMatcher::kOpcode && left->OwnedBy(node)) {
+        AddMatcher left_matcher(left);
+        Node* left_left = left_matcher.left().node();
+        Node* left_right = left_matcher.right().node();
+        if (left_matcher.HasIndexInput() && left_left->OwnedBy(left)) {
+          if (left_matcher.right().HasValue()) {
+            // ((S + D) + B)
+            index = left_matcher.IndexInput();
+            scale = left_matcher.scale();
+            scale_expression = left_left;
+            power_of_two_plus_one = left_matcher.power_of_two_plus_one();
+            displacement = left_right;
+            base = right;
+          } else if (m.right().HasValue()) {
+            // ((S + B) + D)
+            index = left_matcher.IndexInput();
+            scale = left_matcher.scale();
+            scale_expression = left_left;
+            power_of_two_plus_one = left_matcher.power_of_two_plus_one();
+            base = left_right;
+            displacement = right;
+          } else {
+            // (B + B)
+            index = left;
+            base = right;
+          }
+        } else {
+          if (left_matcher.right().HasValue()) {
+            // ((B + D) + B)
+            index = left_left;
+            displacement = left_right;
+            base = right;
+          } else if (m.right().HasValue()) {
+            // ((B + B) + D)
+            index = left_left;
+            base = left_right;
+            displacement = right;
+          } else {
+            // (B + B)
+            index = left;
+            base = right;
+          }
+        }
+      } else {
+        if (m.right().HasValue()) {
+          // (B + D)
+          base = left;
+          displacement = right;
+        } else {
+          // (B + B)
+          base = left;
+          index = right;
+        }
+      }
+    }
+    int64_t value = 0;
+    if (displacement != NULL) {
+      switch (displacement->opcode()) {
+        case IrOpcode::kInt32Constant: {
+          value = OpParameter<int32_t>(displacement);
+          break;
+        }
+        case IrOpcode::kInt64Constant: {
+          value = OpParameter<int64_t>(displacement);
+          break;
+        }
+        default:
+          UNREACHABLE();
+          break;
+      }
+      if (value == 0) {
+        displacement = NULL;
+      }
+    }
+    if (power_of_two_plus_one) {
+      if (base != NULL) {
+        // If the scale requires explicitly using the index as the base, but a
+        // base is already part of the match, then the (1 << N + 1) scale factor
+        // can't be folded into the match and the entire index * scale
+        // calculation must be computed separately.
+        index = scale_expression;
+        scale = 0;
+      } else {
+        base = index;
+      }
+    }
+    base_ = base;
+    displacement_ = displacement;
+    index_ = index;
+    scale_ = scale;
+    matches_ = true;
+  }
+};
+
+typedef BaseWithIndexAndDisplacementMatcher<Int32AddMatcher>
+    BaseWithIndexAndDisplacement32Matcher;
+typedef BaseWithIndexAndDisplacementMatcher<Int64AddMatcher>
+    BaseWithIndexAndDisplacement64Matcher;
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
index 541c302..0d29614 100644 (file)
@@ -8,11 +8,9 @@
 #include "src/v8.h"
 
 #include "src/compiler/common-operator.h"
-#include "src/compiler/generic-node-inl.h"
 #include "src/compiler/node-properties.h"
 #include "src/compiler/opcodes.h"
 #include "src/compiler/operator.h"
-#include "src/compiler/operator-properties-inl.h"
 #include "src/compiler/operator-properties.h"
 
 namespace v8 {
@@ -102,7 +100,7 @@ inline int NodeProperties::GetFrameStateIndex(Node* node) {
 // -----------------------------------------------------------------------------
 // Edge kinds.
 
-inline bool NodeProperties::IsInputRange(Node::Edge edge, int first, int num) {
+inline bool NodeProperties::IsInputRange(Edge edge, int first, int num) {
   // TODO(titzer): edge.index() is linear time;
   // edges maybe need to be marked as value/effect/control.
   if (num == 0) return false;
@@ -110,25 +108,25 @@ inline bool NodeProperties::IsInputRange(Node::Edge edge, int first, int num) {
   return first <= index && index < first + num;
 }
 
-inline bool NodeProperties::IsValueEdge(Node::Edge edge) {
+inline bool NodeProperties::IsValueEdge(Edge edge) {
   Node* node = edge.from();
   return IsInputRange(edge, FirstValueIndex(node),
                       node->op()->ValueInputCount());
 }
 
-inline bool NodeProperties::IsContextEdge(Node::Edge edge) {
+inline bool NodeProperties::IsContextEdge(Edge edge) {
   Node* node = edge.from();
   return IsInputRange(edge, FirstContextIndex(node),
                       OperatorProperties::GetContextInputCount(node->op()));
 }
 
-inline bool NodeProperties::IsEffectEdge(Node::Edge edge) {
+inline bool NodeProperties::IsEffectEdge(Edge edge) {
   Node* node = edge.from();
   return IsInputRange(edge, FirstEffectIndex(node),
                       node->op()->EffectInputCount());
 }
 
-inline bool NodeProperties::IsControlEdge(Node::Edge edge) {
+inline bool NodeProperties::IsControlEdge(Edge edge) {
   Node* node = edge.from();
   return IsInputRange(edge, FirstControlIndex(node),
                       node->op()->ControlInputCount());
@@ -177,13 +175,12 @@ inline void NodeProperties::ReplaceWithValue(Node* node, Node* value,
   }
 
   // Requires distinguishing between value and effect edges.
-  UseIter iter = node->uses().begin();
-  while (iter != node->uses().end()) {
-    if (NodeProperties::IsEffectEdge(iter.edge())) {
+  for (Edge edge : node->use_edges()) {
+    if (NodeProperties::IsEffectEdge(edge)) {
       DCHECK_NE(NULL, effect);
-      iter = iter.UpdateToAndIncrement(effect);
+      edge.UpdateTo(effect);
     } else {
-      iter = iter.UpdateToAndIncrement(value);
+      edge.UpdateTo(value);
     }
   }
 }
@@ -203,6 +200,11 @@ inline Bounds NodeProperties::GetBounds(Node* node) {
   return node->bounds();
 }
 
+inline void NodeProperties::RemoveBounds(Node* node) {
+  Bounds empty;
+  node->set_bounds(empty);
+}
+
 inline void NodeProperties::SetBounds(Node* node, Bounds b) {
   DCHECK(b.lower != NULL && b.upper != NULL);
   node->set_bounds(b);
index 2c3468f..025be78 100644 (file)
@@ -25,10 +25,10 @@ class NodeProperties {
 
   static inline int GetFrameStateIndex(Node* node);
 
-  static inline bool IsValueEdge(Node::Edge edge);
-  static inline bool IsContextEdge(Node::Edge edge);
-  static inline bool IsEffectEdge(Node::Edge edge);
-  static inline bool IsControlEdge(Node::Edge edge);
+  static inline bool IsValueEdge(Edge edge);
+  static inline bool IsContextEdge(Edge edge);
+  static inline bool IsEffectEdge(Edge edge);
+  static inline bool IsControlEdge(Edge edge);
 
   static inline bool IsControl(Node* node);
 
@@ -43,6 +43,7 @@ class NodeProperties {
   static inline bool IsTyped(Node* node);
   static inline Bounds GetBounds(Node* node);
   static inline void SetBounds(Node* node, Bounds bounds);
+  static inline void RemoveBounds(Node* node);
   static inline bool AllValueInputsAreTyped(Node* node);
 
   static inline int FirstValueIndex(Node* node);
@@ -56,7 +57,7 @@ class NodeProperties {
   static inline int PastEffectIndex(Node* node);
   static inline int PastControlIndex(Node* node);
 
-  static inline bool IsInputRange(Node::Edge edge, int first, int count);
+  static inline bool IsInputRange(Edge edge, int first, int count);
 };
 
 }  // namespace compiler
index 673ef32..8f44c24 100644 (file)
@@ -4,12 +4,54 @@
 
 #include "src/compiler/node.h"
 
-#include "src/compiler/generic-node-inl.h"
+#include "src/compiler/graph.h"
+#include "src/zone.h"
 
 namespace v8 {
 namespace internal {
 namespace compiler {
 
+Node::Node(NodeId id, int input_count, int reserved_input_count)
+    : id_(id),
+      bit_field_(InputCountField::encode(input_count) |
+                 ReservedInputCountField::encode(reserved_input_count) |
+                 HasAppendableInputsField::encode(false)),
+      first_use_(nullptr),
+      last_use_(nullptr) {
+  inputs_.static_ = reinterpret_cast<Input*>(this + 1);
+}
+
+
+Node* Node::New(Graph* graph, int input_count, Node** inputs,
+                bool has_extensible_inputs) {
+  size_t node_size = sizeof(Node);
+  int reserve_input_count = has_extensible_inputs ? kDefaultReservedInputs : 0;
+  size_t inputs_size = (input_count + reserve_input_count) * sizeof(Input);
+  size_t uses_size = input_count * sizeof(Use);
+  int size = static_cast<int>(node_size + inputs_size + uses_size);
+  Zone* zone = graph->zone();
+  void* buffer = zone->New(size);
+  Node* result =
+      new (buffer) Node(graph->NextNodeID(), input_count, reserve_input_count);
+  Input* input =
+      reinterpret_cast<Input*>(reinterpret_cast<char*>(buffer) + node_size);
+  Use* use =
+      reinterpret_cast<Use*>(reinterpret_cast<char*>(input) + inputs_size);
+
+  for (int current = 0; current < input_count; ++current) {
+    Node* to = *inputs++;
+    input->to = to;
+    input->use = use;
+    use->input_index = current;
+    use->from = result;
+    to->AppendUse(use);
+    ++use;
+    ++input;
+  }
+  return result;
+}
+
+
 void Node::Kill() {
   DCHECK_NOT_NULL(op());
   RemoveAllInputs();
@@ -42,6 +84,26 @@ Node* Node::FindProjection(size_t projection_index) {
 }
 
 
+int Node::UseCount() const {
+  int use_count = 0;
+  for (const Use* use = first_use_; use; use = use->next) {
+    ++use_count;
+  }
+  return use_count;
+}
+
+
+Node* Node::UseAt(int index) const {
+  DCHECK_LE(0, index);
+  DCHECK_LT(index, UseCount());
+  Use* current = first_use_;
+  while (index-- != 0) {
+    current = current->next;
+  }
+  return current->from;
+}
+
+
 std::ostream& operator<<(std::ostream& os, const Node& n) {
   os << n.id() << ": " << *n.op();
   if (n.InputCount() > 0) {
index 3a5afd2..2295b7b 100644 (file)
@@ -9,20 +9,55 @@
 #include <set>
 #include <vector>
 
-#include "src/compiler/generic-algorithm.h"
-#include "src/compiler/generic-node.h"
+#include "src/v8.h"
+
 #include "src/compiler/opcodes.h"
 #include "src/compiler/operator.h"
 #include "src/types.h"
 #include "src/zone.h"
 #include "src/zone-allocator.h"
+#include "src/zone-containers.h"
 
 namespace v8 {
 namespace internal {
 namespace compiler {
 
-class NodeData {
+// Forward declarations.
+class Edge;
+class Graph;
+
+
+// Marks are used during traversal of the graph to distinguish states of nodes.
+// Each node has a mark which is a monotonically increasing integer, and a
+// {NodeMarker} has a range of values that indicate states of a node.
+typedef uint32_t Mark;
+
+// NodeIds are identifying numbers for nodes that can be used to index auxiliary
+// out-of-line data associated with each node.
+typedef int NodeId;
+
+// A Node is the basic primitive of graphs. Nodes are chained together by
+// input/use chains but by default otherwise contain only an identifying number
+// which specific applications of graphs and nodes can use to index auxiliary
+// out-of-line data, especially transient data.
+//
+// In addition Nodes only contain a mutable Operator that may change during
+// compilation, e.g. during lowering passes. Other information that needs to be
+// associated with Nodes during compilation must be stored out-of-line indexed
+// by the Node's id.
+class Node FINAL {
  public:
+  void Initialize(const Operator* op) {
+    set_op(op);
+    set_mark(0);
+  }
+
+  bool IsDead() const { return InputCount() > 0 && InputAt(0) == NULL; }
+  void Kill();
+
+  void CollectProjections(ZoneVector<Node*>* projections);
+  Node* FindProjection(size_t projection_index);
+
   const Operator* op() const { return op_; }
   void set_op(const Operator* op) { op_ = op; }
 
@@ -31,43 +66,387 @@ class NodeData {
     return static_cast<IrOpcode::Value>(op_->opcode());
   }
 
+  NodeId id() const { return id_; }
+
+  int InputCount() const { return input_count(); }
+  Node* InputAt(int index) const { return GetInputRecordPtr(index)->to; }
+  inline void ReplaceInput(int index, Node* new_input);
+  inline void AppendInput(Zone* zone, Node* new_input);
+  inline void InsertInput(Zone* zone, int index, Node* new_input);
+  inline void RemoveInput(int index);
+
+  int UseCount() const;
+  Node* UseAt(int index) const;
+  inline void ReplaceUses(Node* replace_to);
+  template <class UnaryPredicate>
+  inline void ReplaceUsesIf(UnaryPredicate pred, Node* replace_to);
+  inline void RemoveAllInputs();
+
+  inline void TrimInputCount(int input_count);
+
+  class InputEdges {
+   public:
+    class iterator;
+    iterator begin() const;
+    iterator end() const;
+    bool empty() const;
+
+    explicit InputEdges(Node* node) : node_(node) {}
+
+   private:
+    Node* node_;
+  };
+
+  class Inputs {
+   public:
+    class iterator;
+    iterator begin() const;
+    iterator end() const;
+    bool empty() const;
+
+    explicit Inputs(Node* node) : node_(node) {}
+
+   private:
+    Node* node_;
+  };
+
+  Inputs inputs() { return Inputs(this); }
+  InputEdges input_edges() { return InputEdges(this); }
+
+  class UseEdges {
+   public:
+    class iterator;
+    iterator begin() const;
+    iterator end() const;
+    bool empty() const;
+
+    explicit UseEdges(Node* node) : node_(node) {}
+
+   private:
+    Node* node_;
+  };
+
+  class Uses {
+   public:
+    class iterator;
+    iterator begin() const;
+    iterator end() const;
+    bool empty() const;
+
+    explicit Uses(Node* node) : node_(node) {}
+
+   private:
+    Node* node_;
+  };
+
+  Uses uses() { return Uses(this); }
+  UseEdges use_edges() { return UseEdges(this); }
+
+  bool OwnedBy(Node* owner) const;
+
+  static Node* New(Graph* graph, int input_count, Node** inputs,
+                   bool has_extensible_inputs);
+
  protected:
-  const Operator* op_;
-  Bounds bounds_;
-  explicit NodeData(Zone* zone) {}
+  friend class Graph;
+  friend class Edge;
+
+  class Use : public ZoneObject {
+   public:
+    Node* from;
+    Use* next;
+    Use* prev;
+    int input_index;
+  };
+
+  class Input {
+   public:
+    Node* to;
+    Use* use;
+
+    void Update(Node* new_to);
+  };
+
+  void EnsureAppendableInputs(Zone* zone);
+
+  Input* GetInputRecordPtr(int index) const {
+    if (has_appendable_inputs()) {
+      return &((*inputs_.appendable_)[index]);
+    } else {
+      return &inputs_.static_[index];
+    }
+  }
+
+  inline void AppendUse(Use* use);
+  inline void RemoveUse(Use* use);
+
+  void* operator new(size_t, void* location) { return location; }
+
+ private:
+  inline Node(NodeId id, int input_count, int reserve_input_count);
+
+  typedef ZoneDeque<Input> InputDeque;
 
   friend class NodeProperties;
+  template <typename State>
+  friend class NodeMarker;
+
+  // Only NodeProperties should manipulate the bounds.
   Bounds bounds() { return bounds_; }
   void set_bounds(Bounds b) { bounds_ = b; }
+
+  // Only NodeMarkers should manipulate the marks on nodes.
+  Mark mark() { return mark_; }
+  void set_mark(Mark mark) { mark_ = mark; }
+
+  int input_count() const { return InputCountField::decode(bit_field_); }
+  void set_input_count(int input_count) {
+    DCHECK_LE(0, input_count);
+    bit_field_ = InputCountField::update(bit_field_, input_count);
+  }
+
+  int reserved_input_count() const {
+    return ReservedInputCountField::decode(bit_field_);
+  }
+  void set_reserved_input_count(int reserved_input_count) {
+    DCHECK_LE(0, reserved_input_count);
+    bit_field_ =
+        ReservedInputCountField::update(bit_field_, reserved_input_count);
+  }
+
+  bool has_appendable_inputs() const {
+    return HasAppendableInputsField::decode(bit_field_);
+  }
+  void set_has_appendable_inputs(bool has_appendable_inputs) {
+    bit_field_ =
+        HasAppendableInputsField::update(bit_field_, has_appendable_inputs);
+  }
+
+  typedef BitField<unsigned, 0, 29> InputCountField;
+  typedef BitField<unsigned, 29, 2> ReservedInputCountField;
+  typedef BitField<unsigned, 31, 1> HasAppendableInputsField;
+  static const int kDefaultReservedInputs = ReservedInputCountField::kMax;
+
+  const Operator* op_;
+  Bounds bounds_;
+  Mark mark_;
+  NodeId id_;
+  unsigned bit_field_;
+  union {
+    // When a node is initially allocated, it uses a static buffer to hold its
+    // inputs under the assumption that the number of outputs will not increase.
+    // When the first input is appended, the static buffer is converted into a
+    // deque to allow for space-efficient growing.
+    Input* static_;
+    InputDeque* appendable_;
+  } inputs_;
+  Use* first_use_;
+  Use* last_use_;
+
+  DISALLOW_COPY_AND_ASSIGN(Node);
 };
 
-// A Node is the basic primitive of an IR graph. In addition to the members
-// inherited from Vector, Nodes only contain a mutable Operator that may change
-// during compilation, e.g. during lowering passes.  Other information that
-// needs to be associated with Nodes during compilation must be stored
-// out-of-line indexed by the Node's id.
-class Node FINAL : public GenericNode<NodeData, Node> {
+
+// An encapsulation for information associated with a single use of node as a
+// input from another node, allowing access to both the defining node and
+// the node having the input.
+class Edge {
  public:
-  Node(GenericGraphBase* graph, int input_count, int reserve_input_count)
-      : GenericNode<NodeData, Node>(graph, input_count, reserve_input_count) {}
+  Node* from() const { return input_->use->from; }
+  Node* to() const { return input_->to; }
+  int index() const {
+    int index = input_->use->input_index;
+    DCHECK(index < input_->use->from->input_count());
+    return index;
+  }
 
-  void Initialize(const Operator* op) { set_op(op); }
+  bool operator==(const Edge& other) { return input_ == other.input_; }
+  bool operator!=(const Edge& other) { return !(*this == other); }
 
-  bool IsDead() const { return InputCount() > 0 && InputAt(0) == NULL; }
-  void Kill();
+  void UpdateTo(Node* new_to) { input_->Update(new_to); }
 
-  void CollectProjections(ZoneVector<Node*>* projections);
-  Node* FindProjection(size_t projection_index);
+ private:
+  friend class Node::Uses::iterator;
+  friend class Node::Inputs::iterator;
+  friend class Node::UseEdges::iterator;
+  friend class Node::InputEdges::iterator;
+
+  explicit Edge(Node::Input* input) : input_(input) {}
+
+  Node::Input* input_;
 };
 
-std::ostream& operator<<(std::ostream& os, const Node& n);
 
-typedef GenericGraphVisit::NullNodeVisitor<NodeData, Node> NullNodeVisitor;
+// A forward iterator to visit the edges for the input dependencies of a node..
+class Node::InputEdges::iterator {
+ public:
+  typedef std::forward_iterator_tag iterator_category;
+  typedef int difference_type;
+  typedef Edge value_type;
+  typedef Edge* pointer;
+  typedef Edge& reference;
+  iterator(const Node::InputEdges::iterator& other)  // NOLINT
+      : input_(other.input_) {}
+  iterator() : input_(NULL) {}
+
+  Edge operator*() const { return Edge(input_); }
+  bool operator==(const iterator& other) const { return Equals(other); }
+  bool operator!=(const iterator& other) const { return !Equals(other); }
+  iterator& operator++() {
+    DCHECK(input_ != NULL);
+    Edge edge(input_);
+    Node* from = edge.from();
+    SetInput(from, input_->use->input_index + 1);
+    return *this;
+  }
+  iterator operator++(int) {
+    iterator result(*this);
+    ++(*this);
+    return result;
+  }
+
+ private:
+  friend class Node;
+
+  explicit iterator(Node* from, int index = 0) : input_(NULL) {
+    SetInput(from, index);
+  }
+
+  bool Equals(const iterator& other) const { return other.input_ == input_; }
+  void SetInput(Node* from, int index) {
+    DCHECK(index >= 0 && index <= from->InputCount());
+    if (index < from->InputCount()) {
+      input_ = from->GetInputRecordPtr(index);
+    } else {
+      input_ = NULL;
+    }
+  }
+
+  Input* input_;
+};
+
+
+// A forward iterator to visit the inputs of a node.
+class Node::Inputs::iterator {
+ public:
+  typedef std::forward_iterator_tag iterator_category;
+  typedef int difference_type;
+  typedef Node* value_type;
+  typedef Node** pointer;
+  typedef Node*& reference;
+
+  iterator(const Node::Inputs::iterator& other)  // NOLINT
+      : iter_(other.iter_) {}
+
+  Node* operator*() const { return (*iter_).to(); }
+  bool operator==(const iterator& other) const { return Equals(other); }
+  bool operator!=(const iterator& other) const { return !Equals(other); }
+  iterator& operator++() {
+    ++iter_;
+    return *this;
+  }
+  iterator operator++(int) {
+    iterator result(*this);
+    ++(*this);
+    return result;
+  }
+
+
+ private:
+  friend class Node::Inputs;
+
+  explicit iterator(Node* node, int index) : iter_(node, index) {}
+
+  bool Equals(const iterator& other) const { return other.iter_ == iter_; }
+
+  Node::InputEdges::iterator iter_;
+};
+
+// A forward iterator to visit the uses edges of a node. The edges are returned
+// in
+// the order in which they were added as inputs.
+class Node::UseEdges::iterator {
+ public:
+  iterator(const Node::UseEdges::iterator& other)  // NOLINT
+      : current_(other.current_),
+        next_(other.next_) {}
+
+  Edge operator*() const { return Edge(CurrentInput()); }
+
+  bool operator==(const iterator& other) { return Equals(other); }
+  bool operator!=(const iterator& other) { return !Equals(other); }
+  iterator& operator++() {
+    DCHECK(current_ != NULL);
+    current_ = next_;
+    next_ = (current_ == NULL) ? NULL : current_->next;
+    return *this;
+  }
+  iterator operator++(int) {
+    iterator result(*this);
+    ++(*this);
+    return result;
+  }
+
+ private:
+  friend class Node::UseEdges;
+
+  iterator() : current_(NULL), next_(NULL) {}
+  explicit iterator(Node* node)
+      : current_(node->first_use_),
+        next_(current_ == NULL ? NULL : current_->next) {}
+
+  bool Equals(const iterator& other) const {
+    return other.current_ == current_;
+  }
+
+  Input* CurrentInput() const {
+    return current_->from->GetInputRecordPtr(current_->input_index);
+  }
+
+  Node::Use* current_;
+  Node::Use* next_;
+};
+
+
+// A forward iterator to visit the uses of a node. The uses are returned in
+// the order in which they were added as inputs.
+class Node::Uses::iterator {
+ public:
+  iterator(const Node::Uses::iterator& other)  // NOLINT
+      : current_(other.current_) {}
+
+  Node* operator*() { return current_->from; }
+
+  bool operator==(const iterator& other) { return other.current_ == current_; }
+  bool operator!=(const iterator& other) { return other.current_ != current_; }
+  iterator& operator++() {
+    DCHECK(current_ != NULL);
+    current_ = current_->next;
+    return *this;
+  }
+
+ private:
+  friend class Node::Uses;
+
+  iterator() : current_(NULL) {}
+  explicit iterator(Node* node) : current_(node->first_use_) {}
+
+  Input* CurrentInput() const {
+    return current_->from->GetInputRecordPtr(current_->input_index);
+  }
+
+  Node::Use* current_;
+};
+
+
+std::ostream& operator<<(std::ostream& os, const Node& n);
 
 typedef std::set<Node*, std::less<Node*>, zone_allocator<Node*> > NodeSet;
 typedef NodeSet::iterator NodeSetIter;
 typedef NodeSet::reverse_iterator NodeSetRIter;
 
+typedef ZoneDeque<Node*> NodeDeque;
+
 typedef ZoneVector<Node*> NodeVector;
 typedef NodeVector::iterator NodeVectorIter;
 typedef NodeVector::const_iterator NodeVectorConstIter;
@@ -86,6 +465,195 @@ static inline const T& OpParameter(const Node* node) {
   return OpParameter<T>(node->op());
 }
 
+inline Node::InputEdges::iterator Node::InputEdges::begin() const {
+  return Node::InputEdges::iterator(this->node_, 0);
+}
+
+inline Node::InputEdges::iterator Node::InputEdges::end() const {
+  return Node::InputEdges::iterator(this->node_, this->node_->InputCount());
+}
+
+inline Node::Inputs::iterator Node::Inputs::begin() const {
+  return Node::Inputs::iterator(this->node_, 0);
+}
+
+inline Node::Inputs::iterator Node::Inputs::end() const {
+  return Node::Inputs::iterator(this->node_, this->node_->InputCount());
+}
+
+inline Node::UseEdges::iterator Node::UseEdges::begin() const {
+  return Node::UseEdges::iterator(this->node_);
+}
+
+inline Node::UseEdges::iterator Node::UseEdges::end() const {
+  return Node::UseEdges::iterator();
+}
+
+inline Node::Uses::iterator Node::Uses::begin() const {
+  return Node::Uses::iterator(this->node_);
+}
+
+inline Node::Uses::iterator Node::Uses::end() const {
+  return Node::Uses::iterator();
+}
+
+inline bool Node::InputEdges::empty() const { return begin() == end(); }
+inline bool Node::Uses::empty() const { return begin() == end(); }
+inline bool Node::UseEdges::empty() const { return begin() == end(); }
+inline bool Node::Inputs::empty() const { return begin() == end(); }
+
+inline void Node::ReplaceUses(Node* replace_to) {
+  for (Use* use = first_use_; use != NULL; use = use->next) {
+    use->from->GetInputRecordPtr(use->input_index)->to = replace_to;
+  }
+  if (replace_to->last_use_ == NULL) {
+    DCHECK_EQ(NULL, replace_to->first_use_);
+    replace_to->first_use_ = first_use_;
+    replace_to->last_use_ = last_use_;
+  } else if (first_use_ != NULL) {
+    DCHECK_NE(NULL, replace_to->first_use_);
+    replace_to->last_use_->next = first_use_;
+    first_use_->prev = replace_to->last_use_;
+    replace_to->last_use_ = last_use_;
+  }
+  first_use_ = NULL;
+  last_use_ = NULL;
+}
+
+template <class UnaryPredicate>
+inline void Node::ReplaceUsesIf(UnaryPredicate pred, Node* replace_to) {
+  for (Use* use = first_use_; use != NULL;) {
+    Use* next = use->next;
+    if (pred(use->from)) {
+      RemoveUse(use);
+      replace_to->AppendUse(use);
+      use->from->GetInputRecordPtr(use->input_index)->to = replace_to;
+    }
+    use = next;
+  }
+}
+
+inline void Node::RemoveAllInputs() {
+  for (Edge edge : input_edges()) {
+    edge.UpdateTo(NULL);
+  }
+}
+
+inline void Node::TrimInputCount(int new_input_count) {
+  if (new_input_count == input_count()) return;  // Nothing to do.
+
+  DCHECK(new_input_count < input_count());
+
+  // Update inline inputs.
+  for (int i = new_input_count; i < input_count(); i++) {
+    Node::Input* input = GetInputRecordPtr(i);
+    input->Update(NULL);
+  }
+  set_input_count(new_input_count);
+}
+
+inline void Node::ReplaceInput(int index, Node* new_to) {
+  Input* input = GetInputRecordPtr(index);
+  input->Update(new_to);
+}
+
+inline void Node::Input::Update(Node* new_to) {
+  Node* old_to = this->to;
+  if (new_to == old_to) return;  // Nothing to do.
+  // Snip out the use from where it used to be
+  if (old_to != NULL) {
+    old_to->RemoveUse(use);
+  }
+  to = new_to;
+  // And put it into the new node's use list.
+  if (new_to != NULL) {
+    new_to->AppendUse(use);
+  } else {
+    use->next = NULL;
+    use->prev = NULL;
+  }
+}
+
+inline void Node::EnsureAppendableInputs(Zone* zone) {
+  if (!has_appendable_inputs()) {
+    void* deque_buffer = zone->New(sizeof(InputDeque));
+    InputDeque* deque = new (deque_buffer) InputDeque(zone);
+    for (int i = 0; i < input_count(); ++i) {
+      deque->push_back(inputs_.static_[i]);
+    }
+    inputs_.appendable_ = deque;
+    set_has_appendable_inputs(true);
+  }
+}
+
+inline void Node::AppendInput(Zone* zone, Node* to_append) {
+  Use* new_use = new (zone) Use;
+  Input new_input;
+  new_input.to = to_append;
+  new_input.use = new_use;
+  if (reserved_input_count() > 0) {
+    DCHECK(!has_appendable_inputs());
+    set_reserved_input_count(reserved_input_count() - 1);
+    inputs_.static_[input_count()] = new_input;
+  } else {
+    EnsureAppendableInputs(zone);
+    inputs_.appendable_->push_back(new_input);
+  }
+  new_use->input_index = input_count();
+  new_use->from = this;
+  to_append->AppendUse(new_use);
+  set_input_count(input_count() + 1);
+}
+
+inline void Node::InsertInput(Zone* zone, int index, Node* to_insert) {
+  DCHECK(index >= 0 && index < InputCount());
+  // TODO(turbofan): Optimize this implementation!
+  AppendInput(zone, InputAt(InputCount() - 1));
+  for (int i = InputCount() - 1; i > index; --i) {
+    ReplaceInput(i, InputAt(i - 1));
+  }
+  ReplaceInput(index, to_insert);
+}
+
+inline void Node::RemoveInput(int index) {
+  DCHECK(index >= 0 && index < InputCount());
+  // TODO(turbofan): Optimize this implementation!
+  for (; index < InputCount() - 1; ++index) {
+    ReplaceInput(index, InputAt(index + 1));
+  }
+  TrimInputCount(InputCount() - 1);
+}
+
+inline void Node::AppendUse(Use* use) {
+  use->next = NULL;
+  use->prev = last_use_;
+  if (last_use_ == NULL) {
+    first_use_ = use;
+  } else {
+    last_use_->next = use;
+  }
+  last_use_ = use;
+}
+
+inline void Node::RemoveUse(Use* use) {
+  if (last_use_ == use) {
+    last_use_ = use->prev;
+  }
+  if (use->prev != NULL) {
+    use->prev->next = use->next;
+  } else {
+    first_use_ = use->next;
+  }
+  if (use->next != NULL) {
+    use->next->prev = use->prev;
+  }
+}
+
+inline bool Node::OwnedBy(Node* owner) const {
+  return first_use_ != NULL && first_use_->from == owner &&
+         first_use_->next == NULL;
+}
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/deps/v8/src/compiler/opcodes.cc b/deps/v8/src/compiler/opcodes.cc
new file mode 100644 (file)
index 0000000..044395c
--- /dev/null
@@ -0,0 +1,34 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/opcodes.h"
+
+#include <algorithm>
+
+#include "src/base/macros.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+
+char const* const kMnemonics[] = {
+#define DECLARE_MNEMONIC(x) #x,
+    ALL_OP_LIST(DECLARE_MNEMONIC)
+#undef DECLARE_MNEMONIC
+        "UnknownOpcode"};
+
+}  // namespace
+
+
+// static
+char const* IrOpcode::Mnemonic(Value value) {
+  size_t const n = std::max<size_t>(value, arraysize(kMnemonics) - 1);
+  return kMnemonics[n];
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
index 132f9cc..3f00e6a 100644 (file)
   V(JSCreateWithContext)      \
   V(JSCreateBlockContext)     \
   V(JSCreateModuleContext)    \
-  V(JSCreateGlobalContext)
+  V(JSCreateScriptContext)
 
 #define JS_OTHER_OP_LIST(V) \
   V(JSCallConstruct)        \
   V(ChangeBoolToBit)          \
   V(ChangeBitToBool)          \
   V(LoadField)                \
+  V(LoadBuffer)               \
   V(LoadElement)              \
   V(StoreField)               \
+  V(StoreBuffer)              \
   V(StoreElement)             \
   V(ObjectIsSmi)              \
   V(ObjectIsNonNegativeSmi)
   V(Float64Ceil)              \
   V(Float64RoundTruncate)     \
   V(Float64RoundTiesAway)     \
-  V(LoadStackPointer)
+  V(LoadStackPointer)         \
+  V(CheckedLoad)              \
+  V(CheckedStore)
 
 #define VALUE_OP_LIST(V) \
   COMMON_OP_LIST(V)      \
@@ -264,18 +268,7 @@ class IrOpcode {
   };
 
   // Returns the mnemonic name of an opcode.
-  static const char* Mnemonic(Value val) {
-    // TODO(turbofan): make this a table lookup.
-    switch (val) {
-#define RETURN_NAME(x) \
-  case k##x:           \
-    return #x;
-      ALL_OP_LIST(RETURN_NAME)
-#undef RETURN_NAME
-      default:
-        return "UnknownOpcode";
-    }
-  }
+  static char const* Mnemonic(Value value);
 
   static bool IsJsOpcode(Value val) {
     switch (val) {
similarity index 71%
rename from deps/v8/src/compiler/operator-properties-inl.h
rename to deps/v8/src/compiler/operator-properties.cc
index f958e74..abfc5fd 100644 (file)
@@ -1,30 +1,29 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
+// Copyright 2014 the V8 project authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef V8_COMPILER_OPERATOR_PROPERTIES_INL_H_
-#define V8_COMPILER_OPERATOR_PROPERTIES_INL_H_
+#include "src/compiler/operator-properties.h"
 
-#include "src/compiler/common-operator.h"
 #include "src/compiler/js-operator.h"
 #include "src/compiler/linkage.h"
 #include "src/compiler/opcodes.h"
-#include "src/compiler/operator-properties.h"
 
 namespace v8 {
 namespace internal {
 namespace compiler {
 
-inline bool OperatorProperties::HasContextInput(const Operator* op) {
+// static
+bool OperatorProperties::HasContextInput(const Operator* op) {
   IrOpcode::Value opcode = static_cast<IrOpcode::Value>(op->opcode());
   return IrOpcode::IsJsOpcode(opcode);
 }
 
-inline bool OperatorProperties::HasFrameStateInput(const Operator* op) {
+
+// static
+bool OperatorProperties::HasFrameStateInput(const Operator* op) {
   if (!FLAG_turbo_deoptimization) {
     return false;
   }
-
   switch (op->opcode()) {
     case IrOpcode::kFrameState:
       return true;
@@ -81,25 +80,18 @@ inline bool OperatorProperties::HasFrameStateInput(const Operator* op) {
   }
 }
 
-inline int OperatorProperties::GetContextInputCount(const Operator* op) {
-  return OperatorProperties::HasContextInput(op) ? 1 : 0;
-}
-
-inline int OperatorProperties::GetFrameStateInputCount(const Operator* op) {
-  return OperatorProperties::HasFrameStateInput(op) ? 1 : 0;
-}
 
-inline int OperatorProperties::GetTotalInputCount(const Operator* op) {
+// static
+int OperatorProperties::GetTotalInputCount(const Operator* op) {
   return op->ValueInputCount() + GetContextInputCount(op) +
          GetFrameStateInputCount(op) + op->EffectInputCount() +
          op->ControlInputCount();
 }
 
-// -----------------------------------------------------------------------------
-// Output properties.
 
-inline bool OperatorProperties::IsBasicBlockBegin(const Operator* op) {
-  uint8_t opcode = op->opcode();
+// static
+bool OperatorProperties::IsBasicBlockBegin(const Operator* op) {
+  Operator::Opcode const opcode = op->opcode();
   return opcode == IrOpcode::kStart || opcode == IrOpcode::kEnd ||
          opcode == IrOpcode::kDead || opcode == IrOpcode::kLoop ||
          opcode == IrOpcode::kMerge || opcode == IrOpcode::kIfTrue ||
@@ -109,5 +101,3 @@ inline bool OperatorProperties::IsBasicBlockBegin(const Operator* op) {
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
-
-#endif  // V8_COMPILER_OPERATOR_PROPERTIES_INL_H_
index 70186c9..37c9755 100644 (file)
@@ -5,22 +5,33 @@
 #ifndef V8_COMPILER_OPERATOR_PROPERTIES_H_
 #define V8_COMPILER_OPERATOR_PROPERTIES_H_
 
+#include "src/base/macros.h"
+
 namespace v8 {
 namespace internal {
 namespace compiler {
 
+// Forward declarations.
 class Operator;
 
-class OperatorProperties {
+
+class OperatorProperties FINAL {
  public:
-  static inline bool HasContextInput(const Operator* op);
-  static inline bool HasFrameStateInput(const Operator* op);
+  static bool HasContextInput(const Operator* op);
+  static bool HasFrameStateInput(const Operator* op);
+
+  static int GetContextInputCount(const Operator* op) {
+    return HasContextInput(op) ? 1 : 0;
+  }
+  static int GetFrameStateInputCount(const Operator* op) {
+    return HasFrameStateInput(op) ? 1 : 0;
+  }
+  static int GetTotalInputCount(const Operator* op);
 
-  static inline int GetContextInputCount(const Operator* op);
-  static inline int GetFrameStateInputCount(const Operator* op);
-  static inline int GetTotalInputCount(const Operator* op);
+  static bool IsBasicBlockBegin(const Operator* op);
 
-  static inline bool IsBasicBlockBegin(const Operator* op);
+ private:
+  DISALLOW_COPY_AND_ASSIGN(OperatorProperties);
 };
 
 }  // namespace compiler
index 74b714b..fb144ce 100644 (file)
@@ -82,14 +82,6 @@ class Operator : public ZoneObject {
     return (properties() & property) == property;
   }
 
-  // Number of data inputs to the operator, for verifying graph structure.
-  // TODO(titzer): convert callers to ValueInputCount();
-  int InputCount() const { return ValueInputCount(); }
-
-  // Number of data outputs from the operator, for verifying graph structure.
-  // TODO(titzer): convert callers to ValueOutputCount();
-  int OutputCount() const { return ValueOutputCount(); }
-
   Properties properties() const { return properties_; }
 
   // TODO(titzer): convert return values here to size_t.
@@ -149,12 +141,12 @@ class Operator1 : public Operator {
 
   T const& parameter() const { return parameter_; }
 
-  virtual bool Equals(const Operator* other) const FINAL {
+  bool Equals(const Operator* other) const FINAL {
     if (opcode() != other->opcode()) return false;
     const Operator1<T>* that = static_cast<const Operator1<T>*>(other);
     return this->pred_(this->parameter(), that->parameter());
   }
-  virtual size_t HashCode() const FINAL {
+  size_t HashCode() const FINAL {
     return base::hash_combine(this->opcode(), this->hash_(this->parameter()));
   }
   virtual void PrintParameter(std::ostream& os) const {
@@ -162,7 +154,7 @@ class Operator1 : public Operator {
   }
 
  protected:
-  virtual void PrintTo(std::ostream& os) const FINAL {
+  void PrintTo(std::ostream& os) const FINAL {
     os << mnemonic();
     PrintParameter(os);
   }
@@ -180,6 +172,20 @@ inline T const& OpParameter(const Operator* op) {
   return static_cast<const Operator1<T>*>(op)->parameter();
 }
 
+// NOTE: We have to be careful to use the right equal/hash functions below, for
+// float/double we always use the ones operating on the bit level.
+template <>
+inline float const& OpParameter(const Operator* op) {
+  return static_cast<const Operator1<float, base::bit_equal_to<float>,
+                                     base::bit_hash<float>>*>(op)->parameter();
+}
+
+template <>
+inline double const& OpParameter(const Operator* op) {
+  return static_cast<const Operator1<double, base::bit_equal_to<double>,
+                                     base::bit_hash<double>>*>(op)->parameter();
+}
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
diff --git a/deps/v8/src/compiler/phi-reducer.h b/deps/v8/src/compiler/phi-reducer.h
deleted file mode 100644 (file)
index 787db1c..0000000
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#ifndef V8_COMPILER_PHI_REDUCER_H_
-#define V8_COMPILER_PHI_REDUCER_H_
-
-#include "src/compiler/generic-node-inl.h"
-#include "src/compiler/graph-reducer.h"
-
-namespace v8 {
-namespace internal {
-namespace compiler {
-
-// Replaces redundant phis if all the inputs are the same or the phi itself.
-class PhiReducer FINAL : public Reducer {
- public:
-  virtual Reduction Reduce(Node* node) OVERRIDE {
-    if (node->opcode() != IrOpcode::kPhi &&
-        node->opcode() != IrOpcode::kEffectPhi)
-      return NoChange();
-
-    int n = node->op()->InputCount();
-    if (n == 1) return Replace(node->InputAt(0));
-
-    Node* replacement = NULL;
-    Node::Inputs inputs = node->inputs();
-    for (InputIter it = inputs.begin(); n > 0; --n, ++it) {
-      Node* input = *it;
-      if (input != node && input != replacement) {
-        if (replacement != NULL) return NoChange();
-        replacement = input;
-      }
-    }
-    DCHECK_NE(node, replacement);
-    return Replace(replacement);
-  }
-};
-}
-}
-}  // namespace v8::internal::compiler
-
-#endif  // V8_COMPILER_PHI_REDUCER_H_
index f2571e3..0e80623 100644 (file)
@@ -9,21 +9,28 @@
 
 #include "src/base/platform/elapsed-timer.h"
 #include "src/compiler/ast-graph-builder.h"
+#include "src/compiler/ast-loop-assignment-analyzer.h"
 #include "src/compiler/basic-block-instrumentor.h"
 #include "src/compiler/change-lowering.h"
 #include "src/compiler/code-generator.h"
+#include "src/compiler/common-operator-reducer.h"
 #include "src/compiler/control-reducer.h"
 #include "src/compiler/graph-replay.h"
 #include "src/compiler/graph-visualizer.h"
 #include "src/compiler/instruction.h"
 #include "src/compiler/instruction-selector.h"
+#include "src/compiler/js-builtin-reducer.h"
 #include "src/compiler/js-context-specialization.h"
 #include "src/compiler/js-generic-lowering.h"
 #include "src/compiler/js-inlining.h"
 #include "src/compiler/js-typed-lowering.h"
+#include "src/compiler/jump-threading.h"
+#include "src/compiler/load-elimination.h"
 #include "src/compiler/machine-operator-reducer.h"
+#include "src/compiler/move-optimizer.h"
 #include "src/compiler/pipeline-statistics.h"
 #include "src/compiler/register-allocator.h"
+#include "src/compiler/register-allocator-verifier.h"
 #include "src/compiler/schedule.h"
 #include "src/compiler/scheduler.h"
 #include "src/compiler/select-lowering.h"
@@ -42,54 +49,83 @@ namespace compiler {
 
 class PipelineData {
  public:
-  explicit PipelineData(CompilationInfo* info, ZonePool* zone_pool,
-                        PipelineStatistics* pipeline_statistics)
+  explicit PipelineData(ZonePool* zone_pool, CompilationInfo* info)
       : isolate_(info->zone()->isolate()),
-        outer_zone_(info->zone()),
+        info_(info),
+        outer_zone_(nullptr),
         zone_pool_(zone_pool),
-        pipeline_statistics_(pipeline_statistics),
+        pipeline_statistics_(nullptr),
+        compilation_failed_(false),
+        code_(Handle<Code>::null()),
         graph_zone_scope_(zone_pool_),
-        graph_zone_(graph_zone_scope_.zone()),
-        graph_(new (graph_zone()) Graph(graph_zone())),
-        source_positions_(new SourcePositionTable(graph())),
-        machine_(new (graph_zone()) MachineOperatorBuilder(
-            kMachPtr, InstructionSelector::SupportedMachineOperatorFlags())),
-        common_(new (graph_zone()) CommonOperatorBuilder(graph_zone())),
-        javascript_(new (graph_zone()) JSOperatorBuilder(graph_zone())),
-        jsgraph_(new (graph_zone())
-                 JSGraph(graph(), common(), javascript(), machine())),
-        typer_(new Typer(graph(), info->context())),
-        schedule_(NULL),
+        graph_zone_(nullptr),
+        graph_(nullptr),
+        loop_assignment_(nullptr),
+        machine_(nullptr),
+        common_(nullptr),
+        javascript_(nullptr),
+        jsgraph_(nullptr),
+        typer_(nullptr),
+        context_node_(nullptr),
+        schedule_(nullptr),
         instruction_zone_scope_(zone_pool_),
-        instruction_zone_(instruction_zone_scope_.zone()) {}
-
-  // For machine graph testing only.
-  PipelineData(Graph* graph, Schedule* schedule, ZonePool* zone_pool)
-      : isolate_(graph->zone()->isolate()),
-        outer_zone_(NULL),
-        zone_pool_(zone_pool),
-        pipeline_statistics_(NULL),
-        graph_zone_scope_(zone_pool_),
-        graph_zone_(NULL),
-        graph_(graph),
-        source_positions_(new SourcePositionTable(graph)),
-        machine_(NULL),
-        common_(NULL),
-        javascript_(NULL),
-        jsgraph_(NULL),
-        typer_(NULL),
-        schedule_(schedule),
-        instruction_zone_scope_(zone_pool_),
-        instruction_zone_(instruction_zone_scope_.zone()) {}
+        instruction_zone_(nullptr),
+        sequence_(nullptr),
+        frame_(nullptr),
+        register_allocator_(nullptr) {}
 
   ~PipelineData() {
     DeleteInstructionZone();
     DeleteGraphZone();
   }
 
+  // For main entry point.
+  void Initialize(PipelineStatistics* pipeline_statistics) {
+    PhaseScope scope(pipeline_statistics, "init pipeline data");
+    outer_zone_ = info()->zone();
+    pipeline_statistics_ = pipeline_statistics;
+    graph_zone_ = graph_zone_scope_.zone();
+    graph_ = new (graph_zone()) Graph(graph_zone());
+    source_positions_.Reset(new SourcePositionTable(graph()));
+    machine_ = new (graph_zone()) MachineOperatorBuilder(
+        graph_zone(), kMachPtr,
+        InstructionSelector::SupportedMachineOperatorFlags());
+    common_ = new (graph_zone()) CommonOperatorBuilder(graph_zone());
+    javascript_ = new (graph_zone()) JSOperatorBuilder(graph_zone());
+    jsgraph_ =
+        new (graph_zone()) JSGraph(graph(), common(), javascript(), machine());
+    typer_.Reset(new Typer(graph(), info()->context()));
+    instruction_zone_ = instruction_zone_scope_.zone();
+  }
+
+  // For machine graph testing entry point.
+  void InitializeTorTesting(Graph* graph, Schedule* schedule) {
+    graph_ = graph;
+    source_positions_.Reset(new SourcePositionTable(graph));
+    schedule_ = schedule;
+    instruction_zone_ = instruction_zone_scope_.zone();
+  }
+
+  // For register allocation testing entry point.
+  void InitializeTorTesting(InstructionSequence* sequence) {
+    instruction_zone_ = sequence->zone();
+    sequence_ = sequence;
+  }
+
   Isolate* isolate() const { return isolate_; }
+  CompilationInfo* info() const { return info_; }
   ZonePool* zone_pool() const { return zone_pool_; }
   PipelineStatistics* pipeline_statistics() { return pipeline_statistics_; }
+  bool compilation_failed() const { return compilation_failed_; }
+  void set_compilation_failed() { compilation_failed_ = true; }
+  Handle<Code> code() { return code_; }
+  void set_code(Handle<Code> code) {
+    DCHECK(code_.is_null());
+    code_ = code;
+  }
+
+  // RawMachineAssembler generally produces graphs which cannot be verified.
+  bool MayHaveUnverifiableGraph() const { return outer_zone_ == nullptr; }
 
   Zone* graph_zone() const { return graph_zone_; }
   Graph* graph() const { return graph_; }
@@ -101,55 +137,100 @@ class PipelineData {
   JSOperatorBuilder* javascript() const { return javascript_; }
   JSGraph* jsgraph() const { return jsgraph_; }
   Typer* typer() const { return typer_.get(); }
+
+  LoopAssignmentAnalysis* loop_assignment() const { return loop_assignment_; }
+  void set_loop_assignment(LoopAssignmentAnalysis* loop_assignment) {
+    DCHECK_EQ(nullptr, loop_assignment_);
+    loop_assignment_ = loop_assignment;
+  }
+
+  Node* context_node() const { return context_node_; }
+  void set_context_node(Node* context_node) {
+    DCHECK_EQ(nullptr, context_node_);
+    context_node_ = context_node;
+  }
+
   Schedule* schedule() const { return schedule_; }
   void set_schedule(Schedule* schedule) {
-    DCHECK_EQ(NULL, schedule_);
+    DCHECK_EQ(nullptr, schedule_);
     schedule_ = schedule;
   }
 
   Zone* instruction_zone() const { return instruction_zone_; }
+  InstructionSequence* sequence() const { return sequence_; }
+  Frame* frame() const { return frame_; }
+  RegisterAllocator* register_allocator() const { return register_allocator_; }
 
   void DeleteGraphZone() {
     // Destroy objects with destructors first.
-    source_positions_.Reset(NULL);
-    typer_.Reset(NULL);
-    if (graph_zone_ == NULL) return;
+    source_positions_.Reset(nullptr);
+    typer_.Reset(nullptr);
+    if (graph_zone_ == nullptr) return;
     // Destroy zone and clear pointers.
     graph_zone_scope_.Destroy();
-    graph_zone_ = NULL;
-    graph_ = NULL;
-    machine_ = NULL;
-    common_ = NULL;
-    javascript_ = NULL;
-    jsgraph_ = NULL;
-    schedule_ = NULL;
+    graph_zone_ = nullptr;
+    graph_ = nullptr;
+    loop_assignment_ = nullptr;
+    machine_ = nullptr;
+    common_ = nullptr;
+    javascript_ = nullptr;
+    jsgraph_ = nullptr;
+    context_node_ = nullptr;
+    schedule_ = nullptr;
   }
 
   void DeleteInstructionZone() {
-    if (instruction_zone_ == NULL) return;
+    if (instruction_zone_ == nullptr) return;
     instruction_zone_scope_.Destroy();
-    instruction_zone_ = NULL;
+    instruction_zone_ = nullptr;
+    sequence_ = nullptr;
+    frame_ = nullptr;
+    register_allocator_ = nullptr;
+  }
+
+  void InitializeInstructionSequence() {
+    DCHECK_EQ(nullptr, sequence_);
+    InstructionBlocks* instruction_blocks =
+        InstructionSequence::InstructionBlocksFor(instruction_zone(),
+                                                  schedule());
+    sequence_ = new (instruction_zone())
+        InstructionSequence(instruction_zone(), instruction_blocks);
+  }
+
+  void InitializeRegisterAllocator(Zone* local_zone,
+                                   const RegisterConfiguration* config,
+                                   const char* debug_name) {
+    DCHECK_EQ(nullptr, register_allocator_);
+    DCHECK_EQ(nullptr, frame_);
+    frame_ = new (instruction_zone()) Frame();
+    register_allocator_ = new (instruction_zone())
+        RegisterAllocator(config, local_zone, frame(), sequence(), debug_name);
   }
 
  private:
   Isolate* isolate_;
+  CompilationInfo* info_;
   Zone* outer_zone_;
-  ZonePool* zone_pool_;
+  ZonePool* const zone_pool_;
   PipelineStatistics* pipeline_statistics_;
+  bool compilation_failed_;
+  Handle<Code> code_;
 
-  ZonePool::Scope graph_zone_scope_;
-  Zone* graph_zone_;
   // All objects in the following group of fields are allocated in graph_zone_.
   // They are all set to NULL when the graph_zone_ is destroyed.
+  ZonePool::Scope graph_zone_scope_;
+  Zone* graph_zone_;
   Graph* graph_;
   // TODO(dcarney): make this into a ZoneObject.
   SmartPointer<SourcePositionTable> source_positions_;
+  LoopAssignmentAnalysis* loop_assignment_;
   MachineOperatorBuilder* machine_;
   CommonOperatorBuilder* common_;
   JSOperatorBuilder* javascript_;
   JSGraph* jsgraph_;
   // TODO(dcarney): make this into a ZoneObject.
   SmartPointer<Typer> typer_;
+  Node* context_node_;
   Schedule* schedule_;
 
   // All objects in the following group of fields are allocated in
@@ -157,6 +238,9 @@ class PipelineData {
   // destroyed.
   ZonePool::Scope instruction_zone_scope_;
   Zone* instruction_zone_;
+  InstructionSequence* sequence_;
+  Frame* frame_;
+  RegisterAllocator* register_allocator_;
 
   DISALLOW_COPY_AND_ASSIGN(PipelineData);
 };
@@ -178,58 +262,38 @@ struct TurboCfgFile : public std::ofstream {
 };
 
 
-void Pipeline::VerifyAndPrintGraph(
-    Graph* graph, const char* phase, bool untyped) {
-  if (FLAG_trace_turbo) {
-    char buffer[256];
-    Vector<char> filename(buffer, sizeof(buffer));
-    SmartArrayPointer<char> functionname;
-    if (!info_->shared_info().is_null()) {
-      functionname = info_->shared_info()->DebugName()->ToCString();
-      if (strlen(functionname.get()) > 0) {
-        SNPrintF(filename, "turbo-%s-%s", functionname.get(), phase);
-      } else {
-        SNPrintF(filename, "turbo-%p-%s", static_cast<void*>(info_), phase);
-      }
-    } else {
-      SNPrintF(filename, "turbo-none-%s", phase);
-    }
-    std::replace(filename.start(), filename.start() + filename.length(), ' ',
-                 '_');
+static void TraceSchedule(Schedule* schedule) {
+  if (!FLAG_trace_turbo_graph && !FLAG_trace_turbo_scheduler) return;
+  OFStream os(stdout);
+  os << "-- Schedule --------------------------------------\n" << *schedule;
+}
 
-    char dot_buffer[256];
-    Vector<char> dot_filename(dot_buffer, sizeof(dot_buffer));
-    SNPrintF(dot_filename, "%s.dot", filename.start());
-    FILE* dot_file = base::OS::FOpen(dot_filename.start(), "w+");
-    OFStream dot_of(dot_file);
-    dot_of << AsDOT(*graph);
-    fclose(dot_file);
-
-    char json_buffer[256];
-    Vector<char> json_filename(json_buffer, sizeof(json_buffer));
-    SNPrintF(json_filename, "%s.json", filename.start());
-    FILE* json_file = base::OS::FOpen(json_filename.start(), "w+");
-    OFStream json_of(json_file);
-    json_of << AsJSON(*graph);
-    fclose(json_file);
 
-    OFStream os(stdout);
-    os << "-- " << phase << " graph printed to file " << filename.start()
-       << "\n";
-  }
-  if (VerifyGraphs()) {
-    Verifier::Run(graph,
-        FLAG_turbo_types && !untyped ? Verifier::TYPED : Verifier::UNTYPED);
+static SmartArrayPointer<char> GetDebugName(CompilationInfo* info) {
+  SmartArrayPointer<char> name;
+  if (info->IsStub()) {
+    if (info->code_stub() != NULL) {
+      CodeStub::Major major_key = info->code_stub()->MajorKey();
+      const char* major_name = CodeStub::MajorName(major_key, false);
+      size_t len = strlen(major_name);
+      name.Reset(new char[len]);
+      memcpy(name.get(), major_name, len);
+    }
+  } else {
+    AllowHandleDereference allow_deref;
+    name = info->function()->debug_name()->ToCString();
   }
+  return name;
 }
 
 
 class AstGraphBuilderWithPositions : public AstGraphBuilder {
  public:
-  explicit AstGraphBuilderWithPositions(Zone* local_zone, CompilationInfo* info,
-                                        JSGraph* jsgraph,
-                                        SourcePositionTable* source_positions)
-      : AstGraphBuilder(local_zone, info, jsgraph),
+  AstGraphBuilderWithPositions(Zone* local_zone, CompilationInfo* info,
+                               JSGraph* jsgraph,
+                               LoopAssignmentAnalysis* loop_assignment,
+                               SourcePositionTable* source_positions)
+      : AstGraphBuilder(local_zone, info, jsgraph, loop_assignment),
         source_positions_(source_positions) {}
 
   bool CreateGraph() {
@@ -239,7 +303,7 @@ class AstGraphBuilderWithPositions : public AstGraphBuilder {
   }
 
 #define DEF_VISIT(type)                                               \
-  virtual void Visit##type(type* node) OVERRIDE {                     \
+  void Visit##type(type* node) OVERRIDE {                             \
     SourcePositionTable::Scope pos(source_positions_,                 \
                                    SourcePosition(node->position())); \
     AstGraphBuilder::Visit##type(node);                               \
@@ -247,33 +311,438 @@ class AstGraphBuilderWithPositions : public AstGraphBuilder {
   AST_NODE_LIST(DEF_VISIT)
 #undef DEF_VISIT
 
+  Node* GetFunctionContext() { return AstGraphBuilder::GetFunctionContext(); }
+
  private:
   SourcePositionTable* source_positions_;
 };
 
 
-static void TraceSchedule(Schedule* schedule) {
-  if (!FLAG_trace_turbo) return;
-  OFStream os(stdout);
-  os << "-- Schedule --------------------------------------\n" << *schedule;
+class PipelineRunScope {
+ public:
+  PipelineRunScope(PipelineData* data, const char* phase_name)
+      : phase_scope_(
+            phase_name == nullptr ? nullptr : data->pipeline_statistics(),
+            phase_name),
+        zone_scope_(data->zone_pool()) {}
+
+  Zone* zone() { return zone_scope_.zone(); }
+
+ private:
+  PhaseScope phase_scope_;
+  ZonePool::Scope zone_scope_;
+};
+
+
+template <typename Phase>
+void Pipeline::Run() {
+  PipelineRunScope scope(this->data_, Phase::phase_name());
+  Phase phase;
+  phase.Run(this->data_, scope.zone());
 }
 
 
-static SmartArrayPointer<char> GetDebugName(CompilationInfo* info) {
-  SmartArrayPointer<char> name;
-  if (info->IsStub()) {
-    if (info->code_stub() != NULL) {
-      CodeStub::Major major_key = info->code_stub()->MajorKey();
-      const char* major_name = CodeStub::MajorName(major_key, false);
-      size_t len = strlen(major_name);
-      name.Reset(new char[len]);
-      memcpy(name.get(), major_name, len);
+template <typename Phase, typename Arg0>
+void Pipeline::Run(Arg0 arg_0) {
+  PipelineRunScope scope(this->data_, Phase::phase_name());
+  Phase phase;
+  phase.Run(this->data_, scope.zone(), arg_0);
+}
+
+
+struct LoopAssignmentAnalysisPhase {
+  static const char* phase_name() { return "loop assignment analysis"; }
+
+  void Run(PipelineData* data, Zone* temp_zone) {
+    AstLoopAssignmentAnalyzer analyzer(data->graph_zone(), data->info());
+    LoopAssignmentAnalysis* loop_assignment = analyzer.Analyze();
+    data->set_loop_assignment(loop_assignment);
+  }
+};
+
+
+struct GraphBuilderPhase {
+  static const char* phase_name() { return "graph builder"; }
+
+  void Run(PipelineData* data, Zone* temp_zone) {
+    AstGraphBuilderWithPositions graph_builder(
+        temp_zone, data->info(), data->jsgraph(), data->loop_assignment(),
+        data->source_positions());
+    if (graph_builder.CreateGraph()) {
+      data->set_context_node(graph_builder.GetFunctionContext());
+    } else {
+      data->set_compilation_failed();
     }
-  } else {
-    AllowHandleDereference allow_deref;
-    name = info->function()->debug_name()->ToCString();
   }
-  return name;
+};
+
+
+struct ContextSpecializerPhase {
+  static const char* phase_name() { return "context specializing"; }
+
+  void Run(PipelineData* data, Zone* temp_zone) {
+    SourcePositionTable::Scope pos(data->source_positions(),
+                                   SourcePosition::Unknown());
+    JSContextSpecializer spec(data->info(), data->jsgraph(),
+                              data->context_node());
+    GraphReducer graph_reducer(data->graph(), temp_zone);
+    graph_reducer.AddReducer(&spec);
+    graph_reducer.ReduceGraph();
+  }
+};
+
+
+struct InliningPhase {
+  static const char* phase_name() { return "inlining"; }
+
+  void Run(PipelineData* data, Zone* temp_zone) {
+    SourcePositionTable::Scope pos(data->source_positions(),
+                                   SourcePosition::Unknown());
+    JSInliner inliner(temp_zone, data->info(), data->jsgraph());
+    inliner.Inline();
+  }
+};
+
+
+struct TyperPhase {
+  static const char* phase_name() { return "typer"; }
+
+  void Run(PipelineData* data, Zone* temp_zone) { data->typer()->Run(); }
+};
+
+
+struct TypedLoweringPhase {
+  static const char* phase_name() { return "typed lowering"; }
+
+  void Run(PipelineData* data, Zone* temp_zone) {
+    SourcePositionTable::Scope pos(data->source_positions(),
+                                   SourcePosition::Unknown());
+    ValueNumberingReducer vn_reducer(temp_zone);
+    LoadElimination load_elimination;
+    JSBuiltinReducer builtin_reducer(data->jsgraph());
+    JSTypedLowering typed_lowering(data->jsgraph(), temp_zone);
+    SimplifiedOperatorReducer simple_reducer(data->jsgraph());
+    CommonOperatorReducer common_reducer;
+    GraphReducer graph_reducer(data->graph(), temp_zone);
+    graph_reducer.AddReducer(&vn_reducer);
+    graph_reducer.AddReducer(&builtin_reducer);
+    graph_reducer.AddReducer(&typed_lowering);
+    graph_reducer.AddReducer(&load_elimination);
+    graph_reducer.AddReducer(&simple_reducer);
+    graph_reducer.AddReducer(&common_reducer);
+    graph_reducer.ReduceGraph();
+  }
+};
+
+
+struct SimplifiedLoweringPhase {
+  static const char* phase_name() { return "simplified lowering"; }
+
+  void Run(PipelineData* data, Zone* temp_zone) {
+    SourcePositionTable::Scope pos(data->source_positions(),
+                                   SourcePosition::Unknown());
+    SimplifiedLowering lowering(data->jsgraph());
+    lowering.LowerAllNodes();
+    ValueNumberingReducer vn_reducer(temp_zone);
+    SimplifiedOperatorReducer simple_reducer(data->jsgraph());
+    MachineOperatorReducer machine_reducer(data->jsgraph());
+    CommonOperatorReducer common_reducer;
+    GraphReducer graph_reducer(data->graph(), temp_zone);
+    graph_reducer.AddReducer(&vn_reducer);
+    graph_reducer.AddReducer(&simple_reducer);
+    graph_reducer.AddReducer(&machine_reducer);
+    graph_reducer.AddReducer(&common_reducer);
+    graph_reducer.ReduceGraph();
+  }
+};
+
+
+struct ChangeLoweringPhase {
+  static const char* phase_name() { return "change lowering"; }
+
+  void Run(PipelineData* data, Zone* temp_zone) {
+    SourcePositionTable::Scope pos(data->source_positions(),
+                                   SourcePosition::Unknown());
+    Linkage linkage(data->graph_zone(), data->info());
+    ValueNumberingReducer vn_reducer(temp_zone);
+    SimplifiedOperatorReducer simple_reducer(data->jsgraph());
+    ChangeLowering lowering(data->jsgraph(), &linkage);
+    MachineOperatorReducer machine_reducer(data->jsgraph());
+    CommonOperatorReducer common_reducer;
+    GraphReducer graph_reducer(data->graph(), temp_zone);
+    graph_reducer.AddReducer(&vn_reducer);
+    graph_reducer.AddReducer(&simple_reducer);
+    graph_reducer.AddReducer(&lowering);
+    graph_reducer.AddReducer(&machine_reducer);
+    graph_reducer.AddReducer(&common_reducer);
+    graph_reducer.ReduceGraph();
+  }
+};
+
+
+struct ControlReductionPhase {
+  void Run(PipelineData* data, Zone* temp_zone) {
+    SourcePositionTable::Scope pos(data->source_positions(),
+                                   SourcePosition::Unknown());
+    ControlReducer::ReduceGraph(temp_zone, data->jsgraph(), data->common());
+  }
+};
+
+
+struct EarlyControlReductionPhase : ControlReductionPhase {
+  static const char* phase_name() { return "early control reduction"; }
+};
+
+
+struct LateControlReductionPhase : ControlReductionPhase {
+  static const char* phase_name() { return "late control reduction"; }
+};
+
+
+struct GenericLoweringPhase {
+  static const char* phase_name() { return "generic lowering"; }
+
+  void Run(PipelineData* data, Zone* temp_zone) {
+    SourcePositionTable::Scope pos(data->source_positions(),
+                                   SourcePosition::Unknown());
+    JSGenericLowering generic(data->info(), data->jsgraph());
+    SelectLowering select(data->jsgraph()->graph(), data->jsgraph()->common());
+    GraphReducer graph_reducer(data->graph(), temp_zone);
+    graph_reducer.AddReducer(&generic);
+    graph_reducer.AddReducer(&select);
+    graph_reducer.ReduceGraph();
+  }
+};
+
+
+struct ComputeSchedulePhase {
+  static const char* phase_name() { return "scheduling"; }
+
+  void Run(PipelineData* data, Zone* temp_zone) {
+    Schedule* schedule = Scheduler::ComputeSchedule(temp_zone, data->graph());
+    TraceSchedule(schedule);
+    if (VerifyGraphs()) ScheduleVerifier::Run(schedule);
+    data->set_schedule(schedule);
+  }
+};
+
+
+struct InstructionSelectionPhase {
+  static const char* phase_name() { return "select instructions"; }
+
+  void Run(PipelineData* data, Zone* temp_zone, Linkage* linkage) {
+    InstructionSelector selector(temp_zone, data->graph(), linkage,
+                                 data->sequence(), data->schedule(),
+                                 data->source_positions());
+    selector.SelectInstructions();
+  }
+};
+
+
+struct MeetRegisterConstraintsPhase {
+  static const char* phase_name() { return "meet register constraints"; }
+
+  void Run(PipelineData* data, Zone* temp_zone) {
+    data->register_allocator()->MeetRegisterConstraints();
+  }
+};
+
+
+struct ResolvePhisPhase {
+  static const char* phase_name() { return "resolve phis"; }
+
+  void Run(PipelineData* data, Zone* temp_zone) {
+    data->register_allocator()->ResolvePhis();
+  }
+};
+
+
+struct BuildLiveRangesPhase {
+  static const char* phase_name() { return "build live ranges"; }
+
+  void Run(PipelineData* data, Zone* temp_zone) {
+    data->register_allocator()->BuildLiveRanges();
+  }
+};
+
+
+struct AllocateGeneralRegistersPhase {
+  static const char* phase_name() { return "allocate general registers"; }
+
+  void Run(PipelineData* data, Zone* temp_zone) {
+    data->register_allocator()->AllocateGeneralRegisters();
+  }
+};
+
+
+struct AllocateDoubleRegistersPhase {
+  static const char* phase_name() { return "allocate double registers"; }
+
+  void Run(PipelineData* data, Zone* temp_zone) {
+    data->register_allocator()->AllocateDoubleRegisters();
+  }
+};
+
+
+struct ReuseSpillSlotsPhase {
+  static const char* phase_name() { return "reuse spill slots"; }
+
+  void Run(PipelineData* data, Zone* temp_zone) {
+    data->register_allocator()->ReuseSpillSlots();
+  }
+};
+
+
+struct CommitAssignmentPhase {
+  static const char* phase_name() { return "commit assignment"; }
+
+  void Run(PipelineData* data, Zone* temp_zone) {
+    data->register_allocator()->CommitAssignment();
+  }
+};
+
+
+struct PopulatePointerMapsPhase {
+  static const char* phase_name() { return "populate pointer maps"; }
+
+  void Run(PipelineData* data, Zone* temp_zone) {
+    data->register_allocator()->PopulatePointerMaps();
+  }
+};
+
+
+struct ConnectRangesPhase {
+  static const char* phase_name() { return "connect ranges"; }
+
+  void Run(PipelineData* data, Zone* temp_zone) {
+    data->register_allocator()->ConnectRanges();
+  }
+};
+
+
+struct ResolveControlFlowPhase {
+  static const char* phase_name() { return "resolve control flow"; }
+
+  void Run(PipelineData* data, Zone* temp_zone) {
+    data->register_allocator()->ResolveControlFlow();
+  }
+};
+
+
+struct OptimizeMovesPhase {
+  static const char* phase_name() { return "optimize moves"; }
+
+  void Run(PipelineData* data, Zone* temp_zone) {
+    MoveOptimizer move_optimizer(temp_zone, data->sequence());
+    move_optimizer.Run();
+  }
+};
+
+
+struct JumpThreadingPhase {
+  static const char* phase_name() { return "jump threading"; }
+
+  void Run(PipelineData* data, Zone* temp_zone) {
+    ZoneVector<BasicBlock::RpoNumber> result(temp_zone);
+    if (JumpThreading::ComputeForwarding(temp_zone, result, data->sequence())) {
+      JumpThreading::ApplyForwarding(result, data->sequence());
+    }
+  }
+};
+
+
+struct GenerateCodePhase {
+  static const char* phase_name() { return "generate code"; }
+
+  void Run(PipelineData* data, Zone* temp_zone, Linkage* linkage) {
+    CodeGenerator generator(data->frame(), linkage, data->sequence(),
+                            data->info());
+    data->set_code(generator.GenerateCode());
+  }
+};
+
+
+struct PrintGraphPhase {
+  static const char* phase_name() { return nullptr; }
+
+  void Run(PipelineData* data, Zone* temp_zone, const char* phase) {
+    CompilationInfo* info = data->info();
+    Graph* graph = data->graph();
+    char buffer[256];
+    Vector<char> filename(buffer, sizeof(buffer));
+    SmartArrayPointer<char> functionname;
+    if (!info->shared_info().is_null()) {
+      functionname = info->shared_info()->DebugName()->ToCString();
+      if (strlen(functionname.get()) > 0) {
+        SNPrintF(filename, "turbo-%s-%s", functionname.get(), phase);
+      } else {
+        SNPrintF(filename, "turbo-%p-%s", static_cast<void*>(info), phase);
+      }
+    } else {
+      SNPrintF(filename, "turbo-none-%s", phase);
+    }
+    std::replace(filename.start(), filename.start() + filename.length(), ' ',
+                 '_');
+
+    {  // Print dot.
+      char dot_buffer[256];
+      Vector<char> dot_filename(dot_buffer, sizeof(dot_buffer));
+      SNPrintF(dot_filename, "%s.dot", filename.start());
+      FILE* dot_file = base::OS::FOpen(dot_filename.start(), "w+");
+      if (dot_file == nullptr) return;
+      OFStream dot_of(dot_file);
+      dot_of << AsDOT(*graph);
+      fclose(dot_file);
+    }
+
+    {  // Print JSON.
+      char json_buffer[256];
+      Vector<char> json_filename(json_buffer, sizeof(json_buffer));
+      SNPrintF(json_filename, "%s.json", filename.start());
+      FILE* json_file = base::OS::FOpen(json_filename.start(), "w+");
+      if (json_file == nullptr) return;
+      OFStream json_of(json_file);
+      json_of << AsJSON(*graph);
+      fclose(json_file);
+    }
+
+    OFStream os(stdout);
+    if (FLAG_trace_turbo_graph) {  // Simple textual RPO.
+      os << "-- Graph after " << phase << " -- " << std::endl;
+      os << AsRPO(*graph);
+    }
+
+    os << "-- " << phase << " graph printed to file " << filename.start()
+       << std::endl;
+  }
+};
+
+
+struct VerifyGraphPhase {
+  static const char* phase_name() { return nullptr; }
+
+  void Run(PipelineData* data, Zone* temp_zone, const bool untyped) {
+    Verifier::Run(data->graph(), FLAG_turbo_types && !untyped
+                                     ? Verifier::TYPED
+                                     : Verifier::UNTYPED);
+  }
+};
+
+
+void Pipeline::BeginPhaseKind(const char* phase_kind_name) {
+  if (data_->pipeline_statistics() != NULL) {
+    data_->pipeline_statistics()->BeginPhaseKind(phase_kind_name);
+  }
+}
+
+
+void Pipeline::RunPrintAndVerify(const char* phase, bool untyped) {
+  if (FLAG_trace_turbo) {
+    Run<PrintGraphPhase>(phase);
+  }
+  if (VerifyGraphs()) {
+    Run<VerifyGraphPhase>(untyped);
+  }
 }
 
 
@@ -293,13 +762,19 @@ Handle<Code> Pipeline::GenerateCode() {
   }
 
   ZonePool zone_pool(isolate());
-
   SmartPointer<PipelineStatistics> pipeline_statistics;
+
   if (FLAG_turbo_stats) {
     pipeline_statistics.Reset(new PipelineStatistics(info(), &zone_pool));
-    pipeline_statistics->BeginPhaseKind("graph creation");
+    pipeline_statistics->BeginPhaseKind("initializing");
   }
 
+  PipelineData data(&zone_pool, info());
+  this->data_ = &data;
+  data.Initialize(pipeline_statistics.get());
+
+  BeginPhaseKind("graph creation");
+
   if (FLAG_trace_turbo) {
     OFStream os(stdout);
     os << "---------------------------------------------------\n"
@@ -309,56 +784,32 @@ Handle<Code> Pipeline::GenerateCode() {
     tcf << AsC1VCompilation(info());
   }
 
-  // Initialize the graph and builders.
-  PipelineData data(info(), &zone_pool, pipeline_statistics.get());
-
   data.source_positions()->AddDecorator();
 
-  Node* context_node;
-  {
-    PhaseScope phase_scope(pipeline_statistics.get(), "graph builder");
-    ZonePool::Scope zone_scope(data.zone_pool());
-    AstGraphBuilderWithPositions graph_builder(
-        zone_scope.zone(), info(), data.jsgraph(), data.source_positions());
-    if (!graph_builder.CreateGraph()) return Handle<Code>::null();
-    context_node = graph_builder.GetFunctionContext();
+  if (FLAG_loop_assignment_analysis) {
+    Run<LoopAssignmentAnalysisPhase>();
   }
 
-  VerifyAndPrintGraph(data.graph(), "Initial untyped", true);
-
-  {
-    PhaseScope phase_scope(pipeline_statistics.get(),
-                           "early control reduction");
-    SourcePositionTable::Scope pos(data.source_positions(),
-                                   SourcePosition::Unknown());
-    ZonePool::Scope zone_scope(data.zone_pool());
-    ControlReducer::ReduceGraph(zone_scope.zone(), data.jsgraph(),
-                                data.common());
+  Run<GraphBuilderPhase>();
+  if (data.compilation_failed()) return Handle<Code>::null();
+  RunPrintAndVerify("Initial untyped", true);
 
-    VerifyAndPrintGraph(data.graph(), "Early Control reduced", true);
-  }
+  Run<EarlyControlReductionPhase>();
+  RunPrintAndVerify("Early Control reduced", true);
 
   if (info()->is_context_specializing()) {
-    SourcePositionTable::Scope pos(data.source_positions(),
-                                   SourcePosition::Unknown());
     // Specialize the code to the context as aggressively as possible.
-    JSContextSpecializer spec(info(), data.jsgraph(), context_node);
-    spec.SpecializeToContext();
-    VerifyAndPrintGraph(data.graph(), "Context specialized", true);
+    Run<ContextSpecializerPhase>();
+    RunPrintAndVerify("Context specialized", true);
   }
 
   if (info()->is_inlining_enabled()) {
-    PhaseScope phase_scope(pipeline_statistics.get(), "inlining");
-    SourcePositionTable::Scope pos(data.source_positions(),
-                                   SourcePosition::Unknown());
-    ZonePool::Scope zone_scope(data.zone_pool());
-    JSInliner inliner(zone_scope.zone(), info(), data.jsgraph());
-    inliner.Inline();
-    VerifyAndPrintGraph(data.graph(), "Inlined", true);
+    Run<InliningPhase>();
+    RunPrintAndVerify("Inlined", true);
   }
 
-  // Print a replay of the initial graph.
   if (FLAG_print_turbo_replay) {
+    // Print a replay of the initial graph.
     GraphReplayPrinter::PrintReplay(data.graph());
   }
 
@@ -366,125 +817,57 @@ Handle<Code> Pipeline::GenerateCode() {
   if (!SupportedTarget()) return Handle<Code>::null();
 
   if (info()->is_typing_enabled()) {
-    {
-      // Type the graph.
-      PhaseScope phase_scope(pipeline_statistics.get(), "typer");
-      data.typer()->Run();
-      VerifyAndPrintGraph(data.graph(), "Typed");
-    }
+    // Type the graph.
+    Run<TyperPhase>();
+    RunPrintAndVerify("Typed");
   }
 
-  if (!pipeline_statistics.is_empty()) {
-    pipeline_statistics->BeginPhaseKind("lowering");
-  }
+  BeginPhaseKind("lowering");
 
   if (info()->is_typing_enabled()) {
-    {
-      // Lower JSOperators where we can determine types.
-      PhaseScope phase_scope(pipeline_statistics.get(), "typed lowering");
-      SourcePositionTable::Scope pos(data.source_positions(),
-                                     SourcePosition::Unknown());
-      ValueNumberingReducer vn_reducer(data.graph_zone());
-      JSTypedLowering lowering(data.jsgraph());
-      SimplifiedOperatorReducer simple_reducer(data.jsgraph());
-      GraphReducer graph_reducer(data.graph());
-      graph_reducer.AddReducer(&vn_reducer);
-      graph_reducer.AddReducer(&lowering);
-      graph_reducer.AddReducer(&simple_reducer);
-      graph_reducer.ReduceGraph();
-
-      VerifyAndPrintGraph(data.graph(), "Lowered typed");
-    }
-    {
-      // Lower simplified operators and insert changes.
-      PhaseScope phase_scope(pipeline_statistics.get(), "simplified lowering");
-      SourcePositionTable::Scope pos(data.source_positions(),
-                                     SourcePosition::Unknown());
-      SimplifiedLowering lowering(data.jsgraph());
-      lowering.LowerAllNodes();
-      ValueNumberingReducer vn_reducer(data.graph_zone());
-      SimplifiedOperatorReducer simple_reducer(data.jsgraph());
-      GraphReducer graph_reducer(data.graph());
-      graph_reducer.AddReducer(&vn_reducer);
-      graph_reducer.AddReducer(&simple_reducer);
-      graph_reducer.ReduceGraph();
-
-      VerifyAndPrintGraph(data.graph(), "Lowered simplified");
-    }
-    {
-      // Lower changes that have been inserted before.
-      PhaseScope phase_scope(pipeline_statistics.get(), "change lowering");
-      SourcePositionTable::Scope pos(data.source_positions(),
-                                     SourcePosition::Unknown());
-      Linkage linkage(data.graph_zone(), info());
-      ValueNumberingReducer vn_reducer(data.graph_zone());
-      SimplifiedOperatorReducer simple_reducer(data.jsgraph());
-      ChangeLowering lowering(data.jsgraph(), &linkage);
-      MachineOperatorReducer mach_reducer(data.jsgraph());
-      GraphReducer graph_reducer(data.graph());
-      // TODO(titzer): Figure out if we should run all reducers at once here.
-      graph_reducer.AddReducer(&vn_reducer);
-      graph_reducer.AddReducer(&simple_reducer);
-      graph_reducer.AddReducer(&lowering);
-      graph_reducer.AddReducer(&mach_reducer);
-      graph_reducer.ReduceGraph();
-
-      // TODO(jarin, rossberg): Remove UNTYPED once machine typing works.
-      VerifyAndPrintGraph(data.graph(), "Lowered changes", true);
-    }
-
-    {
-      PhaseScope phase_scope(pipeline_statistics.get(),
-                             "late control reduction");
-      SourcePositionTable::Scope pos(data.source_positions(),
-                                     SourcePosition::Unknown());
-      ZonePool::Scope zone_scope(data.zone_pool());
-      ControlReducer::ReduceGraph(zone_scope.zone(), data.jsgraph(),
-                                  data.common());
+    // Lower JSOperators where we can determine types.
+    Run<TypedLoweringPhase>();
+    RunPrintAndVerify("Lowered typed");
 
-      VerifyAndPrintGraph(data.graph(), "Late Control reduced");
-    }
-  }
+    // Lower simplified operators and insert changes.
+    Run<SimplifiedLoweringPhase>();
+    RunPrintAndVerify("Lowered simplified");
 
-  {
-    // Lower any remaining generic JSOperators.
-    PhaseScope phase_scope(pipeline_statistics.get(), "generic lowering");
-    SourcePositionTable::Scope pos(data.source_positions(),
-                                   SourcePosition::Unknown());
-    JSGenericLowering generic(info(), data.jsgraph());
-    SelectLowering select(data.jsgraph()->graph(), data.jsgraph()->common());
-    GraphReducer graph_reducer(data.graph());
-    graph_reducer.AddReducer(&generic);
-    graph_reducer.AddReducer(&select);
-    graph_reducer.ReduceGraph();
+    // Lower changes that have been inserted before.
+    Run<ChangeLoweringPhase>();
+    // // TODO(jarin, rossberg): Remove UNTYPED once machine typing works.
+    RunPrintAndVerify("Lowered changes", true);
 
-    // TODO(jarin, rossberg): Remove UNTYPED once machine typing works.
-    VerifyAndPrintGraph(data.graph(), "Lowered generic", true);
+    Run<LateControlReductionPhase>();
+    RunPrintAndVerify("Late Control reduced");
   }
 
-  if (!pipeline_statistics.is_empty()) {
-    pipeline_statistics->BeginPhaseKind("block building");
-  }
+  // Lower any remaining generic JSOperators.
+  Run<GenericLoweringPhase>();
+  // TODO(jarin, rossberg): Remove UNTYPED once machine typing works.
+  RunPrintAndVerify("Lowered generic", true);
+
+  BeginPhaseKind("block building");
 
   data.source_positions()->RemoveDecorator();
 
   // Compute a schedule.
-  ComputeSchedule(&data);
+  Run<ComputeSchedulePhase>();
 
-  Handle<Code> code = Handle<Code>::null();
   {
     // Generate optimized code.
     Linkage linkage(data.instruction_zone(), info());
-    code = GenerateCode(&linkage, &data);
-    info()->SetCode(code);
+    GenerateCode(&linkage);
   }
+  Handle<Code> code = data.code();
+  info()->SetCode(code);
 
   // Print optimized code.
   v8::internal::CodeGenerator::PrintCode(code, info());
 
   if (FLAG_trace_turbo) {
     OFStream os(stdout);
-    os << "--------------------------------------------------\n"
+    os << "---------------------------------------------------\n"
        << "Finished compiling method " << GetDebugName(info()).get()
        << " using Turbofan" << std::endl;
   }
@@ -493,34 +876,48 @@ Handle<Code> Pipeline::GenerateCode() {
 }
 
 
-void Pipeline::ComputeSchedule(PipelineData* data) {
-  PhaseScope phase_scope(data->pipeline_statistics(), "scheduling");
-  Schedule* schedule =
-      Scheduler::ComputeSchedule(data->zone_pool(), data->graph());
-  TraceSchedule(schedule);
-  if (VerifyGraphs()) ScheduleVerifier::Run(schedule);
-  data->set_schedule(schedule);
+Handle<Code> Pipeline::GenerateCodeForTesting(CompilationInfo* info,
+                                              Graph* graph,
+                                              Schedule* schedule) {
+  CallDescriptor* call_descriptor =
+      Linkage::ComputeIncoming(info->zone(), info);
+  return GenerateCodeForTesting(info, call_descriptor, graph, schedule);
 }
 
 
-Handle<Code> Pipeline::GenerateCodeForMachineGraph(Linkage* linkage,
-                                                   Graph* graph,
-                                                   Schedule* schedule) {
-  ZonePool zone_pool(isolate());
+Handle<Code> Pipeline::GenerateCodeForTesting(CallDescriptor* call_descriptor,
+                                              Graph* graph,
+                                              Schedule* schedule) {
+  CompilationInfo info(graph->zone()->isolate(), graph->zone());
+  return GenerateCodeForTesting(&info, call_descriptor, graph, schedule);
+}
+
+
+Handle<Code> Pipeline::GenerateCodeForTesting(CompilationInfo* info,
+                                              CallDescriptor* call_descriptor,
+                                              Graph* graph,
+                                              Schedule* schedule) {
   CHECK(SupportedBackend());
-  PipelineData data(graph, schedule, &zone_pool);
+  ZonePool zone_pool(info->isolate());
+  Pipeline pipeline(info);
+  PipelineData data(&zone_pool, info);
+  pipeline.data_ = &data;
+  data.InitializeTorTesting(graph, schedule);
   if (schedule == NULL) {
     // TODO(rossberg): Should this really be untyped?
-    VerifyAndPrintGraph(graph, "Machine", true);
-    ComputeSchedule(&data);
+    pipeline.RunPrintAndVerify("Machine", true);
+    pipeline.Run<ComputeSchedulePhase>();
   } else {
     TraceSchedule(schedule);
   }
 
-  Handle<Code> code = GenerateCode(linkage, &data);
+  Linkage linkage(info->zone(), call_descriptor);
+  pipeline.GenerateCode(&linkage);
+  Handle<Code> code = data.code();
+
 #if ENABLE_DISASSEMBLER
   if (!code.is_null() && FLAG_print_opt_code) {
-    CodeTracer::Scope tracing_scope(isolate()->GetCodeTracer());
+    CodeTracer::Scope tracing_scope(info->isolate()->GetCodeTracer());
     OFStream os(tracing_scope.file());
     code->Disassemble("test code", os);
   }
@@ -529,7 +926,23 @@ Handle<Code> Pipeline::GenerateCodeForMachineGraph(Linkage* linkage,
 }
 
 
-Handle<Code> Pipeline::GenerateCode(Linkage* linkage, PipelineData* data) {
+bool Pipeline::AllocateRegistersForTesting(const RegisterConfiguration* config,
+                                           InstructionSequence* sequence,
+                                           bool run_verifier) {
+  CompilationInfo info(sequence->zone()->isolate(), sequence->zone());
+  ZonePool zone_pool(sequence->zone()->isolate());
+  PipelineData data(&zone_pool, &info);
+  data.InitializeTorTesting(sequence);
+  Pipeline pipeline(&info);
+  pipeline.data_ = &data;
+  pipeline.AllocateRegisters(config, run_verifier);
+  return !data.compilation_failed();
+}
+
+
+void Pipeline::GenerateCode(Linkage* linkage) {
+  PipelineData* data = this->data_;
+
   DCHECK_NOT_NULL(linkage);
   DCHECK_NOT_NULL(data->graph());
   DCHECK_NOT_NULL(data->schedule());
@@ -541,94 +954,129 @@ Handle<Code> Pipeline::GenerateCode(Linkage* linkage, PipelineData* data) {
                                                        data->schedule());
   }
 
-  InstructionBlocks* instruction_blocks =
-      InstructionSequence::InstructionBlocksFor(data->instruction_zone(),
-                                                data->schedule());
-  InstructionSequence sequence(data->instruction_zone(), instruction_blocks);
+  data->InitializeInstructionSequence();
 
   // Select and schedule instructions covering the scheduled graph.
-  {
-    PhaseScope phase_scope(data->pipeline_statistics(), "select instructions");
-    ZonePool::Scope zone_scope(data->zone_pool());
-    InstructionSelector selector(zone_scope.zone(), data->graph(), linkage,
-                                 &sequence, data->schedule(),
-                                 data->source_positions());
-    selector.SelectInstructions();
-  }
+  Run<InstructionSelectionPhase>(linkage);
 
-  if (FLAG_trace_turbo) {
-    OFStream os(stdout);
-    PrintableInstructionSequence printable = {
-        RegisterConfiguration::ArchDefault(), &sequence};
-    os << "----- Instruction sequence before register allocation -----\n"
-       << printable;
+  if (FLAG_trace_turbo && !data->MayHaveUnverifiableGraph()) {
     TurboCfgFile tcf(isolate());
     tcf << AsC1V("CodeGen", data->schedule(), data->source_positions(),
-                 &sequence);
+                 data->sequence());
   }
 
   data->DeleteGraphZone();
 
-  if (data->pipeline_statistics() != NULL) {
-    data->pipeline_statistics()->BeginPhaseKind("register allocation");
-  }
+  BeginPhaseKind("register allocation");
 
+  bool run_verifier = false;
+#ifdef DEBUG
+  run_verifier = true;
+#endif
   // Allocate registers.
-  Frame frame;
-  {
-    int node_count = sequence.VirtualRegisterCount();
-    if (node_count > UnallocatedOperand::kMaxVirtualRegisters) {
-      info()->AbortOptimization(kNotEnoughVirtualRegistersForValues);
-      return Handle<Code>::null();
-    }
-    ZonePool::Scope zone_scope(data->zone_pool());
+  AllocateRegisters(RegisterConfiguration::ArchDefault(), run_verifier);
+  if (data->compilation_failed()) {
+    info()->AbortOptimization(kNotEnoughVirtualRegistersRegalloc);
+    return;
+  }
+
+  BeginPhaseKind("code generation");
+
+  // Optimimize jumps.
+  if (FLAG_turbo_jt) {
+    Run<JumpThreadingPhase>();
+  }
+
+  // Generate final machine code.
+  Run<GenerateCodePhase>(linkage);
 
-    SmartArrayPointer<char> debug_name;
+  if (profiler_data != NULL) {
+#if ENABLE_DISASSEMBLER
+    std::ostringstream os;
+    data->code()->Disassemble(NULL, os);
+    profiler_data->SetCode(&os);
+#endif
+  }
+}
+
+
+void Pipeline::AllocateRegisters(const RegisterConfiguration* config,
+                                 bool run_verifier) {
+  PipelineData* data = this->data_;
+
+  int node_count = data->sequence()->VirtualRegisterCount();
+  if (node_count > UnallocatedOperand::kMaxVirtualRegisters) {
+    data->set_compilation_failed();
+    return;
+  }
+
+  // Don't track usage for this zone in compiler stats.
+  SmartPointer<Zone> verifier_zone;
+  RegisterAllocatorVerifier* verifier = nullptr;
+  if (run_verifier) {
+    verifier_zone.Reset(new Zone(info()->isolate()));
+    verifier = new (verifier_zone.get()) RegisterAllocatorVerifier(
+        verifier_zone.get(), config, data->sequence());
+  }
+
+  SmartArrayPointer<char> debug_name;
 #ifdef DEBUG
-    debug_name = GetDebugName(info());
+  debug_name = GetDebugName(data->info());
 #endif
 
+  ZonePool::Scope zone_scope(data->zone_pool());
+  data->InitializeRegisterAllocator(zone_scope.zone(), config,
+                                    debug_name.get());
 
-    RegisterAllocator allocator(RegisterConfiguration::ArchDefault(),
-                                zone_scope.zone(), &frame, &sequence,
-                                debug_name.get());
-    if (!allocator.Allocate(data->pipeline_statistics())) {
-      info()->AbortOptimization(kNotEnoughVirtualRegistersRegalloc);
-      return Handle<Code>::null();
-    }
-    if (FLAG_trace_turbo) {
-      TurboCfgFile tcf(isolate());
-      tcf << AsC1VAllocator("CodeGen", &allocator);
-    }
+  Run<MeetRegisterConstraintsPhase>();
+  Run<ResolvePhisPhase>();
+  Run<BuildLiveRangesPhase>();
+  if (FLAG_trace_turbo_graph) {
+    OFStream os(stdout);
+    PrintableInstructionSequence printable = {config, data->sequence()};
+    os << "----- Instruction sequence before register allocation -----\n"
+       << printable;
+  }
+  if (verifier != nullptr) {
+    CHECK(!data->register_allocator()->ExistsUseWithoutDefinition());
+  }
+  Run<AllocateGeneralRegistersPhase>();
+  if (!data->register_allocator()->AllocationOk()) {
+    data->set_compilation_failed();
+    return;
+  }
+  Run<AllocateDoubleRegistersPhase>();
+  if (!data->register_allocator()->AllocationOk()) {
+    data->set_compilation_failed();
+    return;
+  }
+  if (FLAG_turbo_reuse_spill_slots) {
+    Run<ReuseSpillSlotsPhase>();
+  }
+  Run<CommitAssignmentPhase>();
+  Run<PopulatePointerMapsPhase>();
+  Run<ConnectRangesPhase>();
+  Run<ResolveControlFlowPhase>();
+  if (FLAG_turbo_move_optimization) {
+    Run<OptimizeMovesPhase>();
   }
 
-  if (FLAG_trace_turbo) {
+  if (FLAG_trace_turbo_graph) {
     OFStream os(stdout);
-    PrintableInstructionSequence printable = {
-        RegisterConfiguration::ArchDefault(), &sequence};
+    PrintableInstructionSequence printable = {config, data->sequence()};
     os << "----- Instruction sequence after register allocation -----\n"
        << printable;
   }
 
-  if (data->pipeline_statistics() != NULL) {
-    data->pipeline_statistics()->BeginPhaseKind("code generation");
+  if (verifier != nullptr) {
+    verifier->VerifyAssignment();
+    verifier->VerifyGapMoves();
   }
 
-  // Generate native sequence.
-  Handle<Code> code;
-  {
-    PhaseScope phase_scope(data->pipeline_statistics(), "generate code");
-    CodeGenerator generator(&frame, linkage, &sequence, info());
-    code = generator.GenerateCode();
+  if (FLAG_trace_turbo && !data->MayHaveUnverifiableGraph()) {
+    TurboCfgFile tcf(data->isolate());
+    tcf << AsC1VAllocator("CodeGen", data->register_allocator());
   }
-  if (profiler_data != NULL) {
-#if ENABLE_DISASSEMBLER
-    std::ostringstream os;
-    code->Disassemble(NULL, os);
-    profiler_data->SetCode(&os);
-#endif
-  }
-  return code;
 }
 
 
index 7811300..73053dc 100644 (file)
@@ -17,9 +17,12 @@ namespace internal {
 namespace compiler {
 
 // Clients of this interface shouldn't depend on lots of compiler internals.
+class CallDescriptor;
 class Graph;
+class InstructionSequence;
 class Linkage;
 class PipelineData;
+class RegisterConfiguration;
 class Schedule;
 
 class Pipeline {
@@ -29,10 +32,22 @@ class Pipeline {
   // Run the entire pipeline and generate a handle to a code object.
   Handle<Code> GenerateCode();
 
-  // Run the pipeline on a machine graph and generate code. If {schedule}
-  // is {NULL}, then compute a new schedule for code generation.
-  Handle<Code> GenerateCodeForMachineGraph(Linkage* linkage, Graph* graph,
-                                           Schedule* schedule = NULL);
+  // Run the pipeline on a machine graph and generate code. If {schedule} is
+  // {nullptr}, then compute a new schedule for code generation.
+  static Handle<Code> GenerateCodeForTesting(CompilationInfo* info,
+                                             Graph* graph,
+                                             Schedule* schedule = nullptr);
+
+  // Run the pipeline on a machine graph and generate code. If {schedule} is
+  // {nullptr}, then compute a new schedule for code generation.
+  static Handle<Code> GenerateCodeForTesting(CallDescriptor* call_descriptor,
+                                             Graph* graph,
+                                             Schedule* schedule = nullptr);
+
+  // Run just the register allocator phases.
+  static bool AllocateRegistersForTesting(const RegisterConfiguration* config,
+                                          InstructionSequence* sequence,
+                                          bool run_verifier);
 
   static inline bool SupportedBackend() { return V8_TURBOFAN_BACKEND != 0; }
   static inline bool SupportedTarget() { return V8_TURBOFAN_TARGET != 0; }
@@ -41,18 +56,31 @@ class Pipeline {
   static void TearDown();
 
  private:
+  static Handle<Code> GenerateCodeForTesting(CompilationInfo* info,
+                                             CallDescriptor* call_descriptor,
+                                             Graph* graph, Schedule* schedule);
+
   CompilationInfo* info_;
+  PipelineData* data_;
+
+  // Helpers for executing pipeline phases.
+  template <typename Phase>
+  void Run();
+  template <typename Phase, typename Arg0>
+  void Run(Arg0 arg_0);
 
   CompilationInfo* info() const { return info_; }
   Isolate* isolate() { return info_->isolate(); }
 
-  void ComputeSchedule(PipelineData* data);
-  void VerifyAndPrintGraph(Graph* graph, const char* phase,
-                           bool untyped = false);
-  Handle<Code> GenerateCode(Linkage* linkage, PipelineData* data);
+  void BeginPhaseKind(const char* phase_kind);
+  void RunPrintAndVerify(const char* phase, bool untyped = false);
+  void GenerateCode(Linkage* linkage);
+  void AllocateRegisters(const RegisterConfiguration* config,
+                         bool run_verifier);
 };
-}
-}
-}  // namespace v8::internal::compiler
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
 
 #endif  // V8_COMPILER_PIPELINE_H_
index a8b658f..b93ec66 100644 (file)
@@ -17,7 +17,7 @@ RawMachineAssembler::RawMachineAssembler(Graph* graph,
                                          MachineOperatorBuilder::Flags flags)
     : GraphBuilder(graph),
       schedule_(new (zone()) Schedule(zone())),
-      machine_(word, flags),
+      machine_(zone(), word, flags),
       common_(zone()),
       machine_sig_(machine_sig),
       call_descriptor_(
@@ -40,8 +40,7 @@ RawMachineAssembler::RawMachineAssembler(Graph* graph,
 Schedule* RawMachineAssembler::Export() {
   // Compute the correct codegen order.
   DCHECK(schedule_->rpo_order()->empty());
-  ZonePool zone_pool(isolate());
-  Scheduler::ComputeSpecialRPO(&zone_pool, schedule_);
+  Scheduler::ComputeSpecialRPO(zone(), schedule_);
   // Invalidate MachineAssembler.
   Schedule* schedule = schedule_;
   schedule_ = NULL;
@@ -88,7 +87,8 @@ Node* RawMachineAssembler::CallFunctionStub0(Node* function, Node* receiver,
                                              CallFunctionFlags flags) {
   Callable callable = CodeFactory::CallFunction(isolate(), 0, flags);
   CallDescriptor* desc = Linkage::GetStubCallDescriptor(
-      callable.descriptor(), 1, CallDescriptor::kNeedsFrameState, zone());
+      callable.descriptor(), 1, CallDescriptor::kNeedsFrameState,
+      Operator::kNoProperties, zone());
   Node* stub_code = HeapConstant(callable.code());
   Node* call = graph()->NewNode(common()->Call(desc), stub_code, function,
                                 receiver, context, frame_state);
index b7b82b1..5455814 100644 (file)
@@ -48,7 +48,7 @@ class RawMachineAssembler : public GraphBuilder {
                       MachineType word = kMachPtr,
                       MachineOperatorBuilder::Flags flags =
                           MachineOperatorBuilder::Flag::kNoFlags);
-  virtual ~RawMachineAssembler() {}
+  ~RawMachineAssembler() OVERRIDE {}
 
   Isolate* isolate() const { return zone()->isolate(); }
   Zone* zone() const { return graph()->zone(); }
@@ -430,8 +430,8 @@ class RawMachineAssembler : public GraphBuilder {
   Schedule* Export();
 
  protected:
-  virtual Node* MakeNode(const Operator* op, int input_count, Node** inputs,
-                         bool incomplete) FINAL;
+  Node* MakeNode(const Operator* op, int input_count, Node** inputs,
+                 bool incomplete) FINAL;
 
   bool ScheduleValid() { return schedule_ != NULL; }
 
diff --git a/deps/v8/src/compiler/register-allocator-verifier.cc b/deps/v8/src/compiler/register-allocator-verifier.cc
new file mode 100644 (file)
index 0000000..dabfd59
--- /dev/null
@@ -0,0 +1,460 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/instruction.h"
+#include "src/compiler/register-allocator-verifier.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+static size_t OperandCount(const Instruction* instr) {
+  return instr->InputCount() + instr->OutputCount() + instr->TempCount();
+}
+
+
+static void VerifyGapEmpty(const GapInstruction* gap) {
+  for (int i = GapInstruction::FIRST_INNER_POSITION;
+       i <= GapInstruction::LAST_INNER_POSITION; i++) {
+    GapInstruction::InnerPosition inner_pos =
+        static_cast<GapInstruction::InnerPosition>(i);
+    CHECK_EQ(NULL, gap->GetParallelMove(inner_pos));
+  }
+}
+
+
+void RegisterAllocatorVerifier::VerifyInput(
+    const OperandConstraint& constraint) {
+  CHECK_NE(kSameAsFirst, constraint.type_);
+  if (constraint.type_ != kImmediate) {
+    CHECK_NE(UnallocatedOperand::kInvalidVirtualRegister,
+             constraint.virtual_register_);
+  }
+}
+
+
+void RegisterAllocatorVerifier::VerifyTemp(
+    const OperandConstraint& constraint) {
+  CHECK_NE(kSameAsFirst, constraint.type_);
+  CHECK_NE(kImmediate, constraint.type_);
+  CHECK_NE(kConstant, constraint.type_);
+  CHECK_EQ(UnallocatedOperand::kInvalidVirtualRegister,
+           constraint.virtual_register_);
+}
+
+
+void RegisterAllocatorVerifier::VerifyOutput(
+    const OperandConstraint& constraint) {
+  CHECK_NE(kImmediate, constraint.type_);
+  CHECK_NE(UnallocatedOperand::kInvalidVirtualRegister,
+           constraint.virtual_register_);
+}
+
+
+RegisterAllocatorVerifier::RegisterAllocatorVerifier(
+    Zone* zone, const RegisterConfiguration* config,
+    const InstructionSequence* sequence)
+    : zone_(zone), config_(config), sequence_(sequence), constraints_(zone) {
+  constraints_.reserve(sequence->instructions().size());
+  // TODO(dcarney): model unique constraints.
+  // Construct OperandConstraints for all InstructionOperands, eliminating
+  // kSameAsFirst along the way.
+  for (const auto* instr : sequence->instructions()) {
+    const size_t operand_count = OperandCount(instr);
+    auto* op_constraints =
+        zone->NewArray<OperandConstraint>(static_cast<int>(operand_count));
+    size_t count = 0;
+    for (size_t i = 0; i < instr->InputCount(); ++i, ++count) {
+      BuildConstraint(instr->InputAt(i), &op_constraints[count]);
+      VerifyInput(op_constraints[count]);
+    }
+    for (size_t i = 0; i < instr->TempCount(); ++i, ++count) {
+      BuildConstraint(instr->TempAt(i), &op_constraints[count]);
+      VerifyTemp(op_constraints[count]);
+    }
+    for (size_t i = 0; i < instr->OutputCount(); ++i, ++count) {
+      BuildConstraint(instr->OutputAt(i), &op_constraints[count]);
+      if (op_constraints[count].type_ == kSameAsFirst) {
+        CHECK(instr->InputCount() > 0);
+        op_constraints[count].type_ = op_constraints[0].type_;
+        op_constraints[count].value_ = op_constraints[0].value_;
+      }
+      VerifyOutput(op_constraints[count]);
+    }
+    // All gaps should be totally unallocated at this point.
+    if (instr->IsGapMoves()) {
+      CHECK(operand_count == 0);
+      VerifyGapEmpty(GapInstruction::cast(instr));
+    }
+    InstructionConstraint instr_constraint = {instr, operand_count,
+                                              op_constraints};
+    constraints()->push_back(instr_constraint);
+  }
+}
+
+
+void RegisterAllocatorVerifier::VerifyAssignment() {
+  CHECK(sequence()->instructions().size() == constraints()->size());
+  auto instr_it = sequence()->begin();
+  for (const auto& instr_constraint : *constraints()) {
+    const auto* instr = instr_constraint.instruction_;
+    const size_t operand_count = instr_constraint.operand_constaints_size_;
+    const auto* op_constraints = instr_constraint.operand_constraints_;
+    CHECK_EQ(instr, *instr_it);
+    CHECK(operand_count == OperandCount(instr));
+    size_t count = 0;
+    for (size_t i = 0; i < instr->InputCount(); ++i, ++count) {
+      CheckConstraint(instr->InputAt(i), &op_constraints[count]);
+    }
+    for (size_t i = 0; i < instr->TempCount(); ++i, ++count) {
+      CheckConstraint(instr->TempAt(i), &op_constraints[count]);
+    }
+    for (size_t i = 0; i < instr->OutputCount(); ++i, ++count) {
+      CheckConstraint(instr->OutputAt(i), &op_constraints[count]);
+    }
+    ++instr_it;
+  }
+}
+
+
+void RegisterAllocatorVerifier::BuildConstraint(const InstructionOperand* op,
+                                                OperandConstraint* constraint) {
+  constraint->value_ = kMinInt;
+  constraint->virtual_register_ = UnallocatedOperand::kInvalidVirtualRegister;
+  if (op->IsConstant()) {
+    constraint->type_ = kConstant;
+    constraint->value_ = ConstantOperand::cast(op)->index();
+    constraint->virtual_register_ = constraint->value_;
+  } else if (op->IsImmediate()) {
+    constraint->type_ = kImmediate;
+    constraint->value_ = ImmediateOperand::cast(op)->index();
+  } else {
+    CHECK(op->IsUnallocated());
+    const auto* unallocated = UnallocatedOperand::cast(op);
+    int vreg = unallocated->virtual_register();
+    constraint->virtual_register_ = vreg;
+    if (unallocated->basic_policy() == UnallocatedOperand::FIXED_SLOT) {
+      constraint->type_ = kFixedSlot;
+      constraint->value_ = unallocated->fixed_slot_index();
+    } else {
+      switch (unallocated->extended_policy()) {
+        case UnallocatedOperand::ANY:
+          CHECK(false);
+          break;
+        case UnallocatedOperand::NONE:
+          if (sequence()->IsDouble(vreg)) {
+            constraint->type_ = kNoneDouble;
+          } else {
+            constraint->type_ = kNone;
+          }
+          break;
+        case UnallocatedOperand::FIXED_REGISTER:
+          constraint->type_ = kFixedRegister;
+          constraint->value_ = unallocated->fixed_register_index();
+          break;
+        case UnallocatedOperand::FIXED_DOUBLE_REGISTER:
+          constraint->type_ = kFixedDoubleRegister;
+          constraint->value_ = unallocated->fixed_register_index();
+          break;
+        case UnallocatedOperand::MUST_HAVE_REGISTER:
+          if (sequence()->IsDouble(vreg)) {
+            constraint->type_ = kDoubleRegister;
+          } else {
+            constraint->type_ = kRegister;
+          }
+          break;
+        case UnallocatedOperand::SAME_AS_FIRST_INPUT:
+          constraint->type_ = kSameAsFirst;
+          break;
+      }
+    }
+  }
+}
+
+
+void RegisterAllocatorVerifier::CheckConstraint(
+    const InstructionOperand* op, const OperandConstraint* constraint) {
+  switch (constraint->type_) {
+    case kConstant:
+      CHECK(op->IsConstant());
+      CHECK_EQ(op->index(), constraint->value_);
+      return;
+    case kImmediate:
+      CHECK(op->IsImmediate());
+      CHECK_EQ(op->index(), constraint->value_);
+      return;
+    case kRegister:
+      CHECK(op->IsRegister());
+      return;
+    case kFixedRegister:
+      CHECK(op->IsRegister());
+      CHECK_EQ(op->index(), constraint->value_);
+      return;
+    case kDoubleRegister:
+      CHECK(op->IsDoubleRegister());
+      return;
+    case kFixedDoubleRegister:
+      CHECK(op->IsDoubleRegister());
+      CHECK_EQ(op->index(), constraint->value_);
+      return;
+    case kFixedSlot:
+      CHECK(op->IsStackSlot());
+      CHECK_EQ(op->index(), constraint->value_);
+      return;
+    case kNone:
+      CHECK(op->IsRegister() || op->IsStackSlot());
+      return;
+    case kNoneDouble:
+      CHECK(op->IsDoubleRegister() || op->IsDoubleStackSlot());
+      return;
+    case kSameAsFirst:
+      CHECK(false);
+      return;
+  }
+}
+
+
+class RegisterAllocatorVerifier::OutgoingMapping : public ZoneObject {
+ public:
+  struct OperandLess {
+    bool operator()(const InstructionOperand* a,
+                    const InstructionOperand* b) const {
+      if (a->kind() == b->kind()) return a->index() < b->index();
+      return a->kind() < b->kind();
+    }
+  };
+
+  typedef std::map<
+      const InstructionOperand*, int, OperandLess,
+      zone_allocator<std::pair<const InstructionOperand*, const int>>>
+      LocationMap;
+
+  explicit OutgoingMapping(Zone* zone)
+      : locations_(LocationMap::key_compare(),
+                   LocationMap::allocator_type(zone)),
+        predecessor_intersection_(LocationMap::key_compare(),
+                                  LocationMap::allocator_type(zone)) {}
+
+  LocationMap* locations() { return &locations_; }
+
+  void RunPhis(const InstructionSequence* sequence,
+               const InstructionBlock* block, size_t phi_index) {
+    // This operation is only valid in edge split form.
+    size_t predecessor_index = block->predecessors()[phi_index].ToSize();
+    CHECK(sequence->instruction_blocks()[predecessor_index]->SuccessorCount() ==
+          1);
+    for (const auto* phi : block->phis()) {
+      auto input = phi->inputs()[phi_index];
+      CHECK(locations()->find(input) != locations()->end());
+      auto it = locations()->find(phi->output());
+      CHECK(it != locations()->end());
+      if (input->IsConstant()) {
+        CHECK_EQ(it->second, input->index());
+      } else {
+        CHECK_EQ(it->second, phi->operands()[phi_index]);
+      }
+      it->second = phi->virtual_register();
+    }
+  }
+
+  void RunGapInstruction(Zone* zone, const GapInstruction* gap) {
+    for (int i = GapInstruction::FIRST_INNER_POSITION;
+         i <= GapInstruction::LAST_INNER_POSITION; i++) {
+      GapInstruction::InnerPosition inner_pos =
+          static_cast<GapInstruction::InnerPosition>(i);
+      const ParallelMove* move = gap->GetParallelMove(inner_pos);
+      if (move == nullptr) continue;
+      RunParallelMoves(zone, move);
+    }
+  }
+
+  void RunParallelMoves(Zone* zone, const ParallelMove* move) {
+    // Compute outgoing mappings.
+    LocationMap to_insert((LocationMap::key_compare()),
+                          LocationMap::allocator_type(zone));
+    auto* moves = move->move_operands();
+    for (auto i = moves->begin(); i != moves->end(); ++i) {
+      if (i->IsEliminated()) continue;
+      auto cur = locations()->find(i->source());
+      CHECK(cur != locations()->end());
+      to_insert.insert(std::make_pair(i->destination(), cur->second));
+    }
+    // Drop current mappings.
+    for (auto i = moves->begin(); i != moves->end(); ++i) {
+      if (i->IsEliminated()) continue;
+      auto cur = locations()->find(i->destination());
+      if (cur != locations()->end()) locations()->erase(cur);
+    }
+    // Insert new values.
+    locations()->insert(to_insert.begin(), to_insert.end());
+  }
+
+  void Map(const InstructionOperand* op, int virtual_register) {
+    locations()->insert(std::make_pair(op, virtual_register));
+  }
+
+  void Drop(const InstructionOperand* op) {
+    auto it = locations()->find(op);
+    if (it != locations()->end()) locations()->erase(it);
+  }
+
+  void DropRegisters(const RegisterConfiguration* config) {
+    for (int i = 0; i < config->num_general_registers(); ++i) {
+      InstructionOperand op(InstructionOperand::REGISTER, i);
+      Drop(&op);
+    }
+    for (int i = 0; i < config->num_double_registers(); ++i) {
+      InstructionOperand op(InstructionOperand::DOUBLE_REGISTER, i);
+      Drop(&op);
+    }
+  }
+
+  void InitializeFromFirstPredecessor(const InstructionSequence* sequence,
+                                      const OutgoingMappings* outgoing_mappings,
+                                      const InstructionBlock* block) {
+    if (block->predecessors().empty()) return;
+    size_t predecessor_index = block->predecessors()[0].ToSize();
+    CHECK(predecessor_index < block->rpo_number().ToSize());
+    auto* incoming = outgoing_mappings->at(predecessor_index);
+    if (block->PredecessorCount() > 1) {
+      // Update incoming map with phis. The remaining phis will be checked later
+      // as their mappings are not guaranteed to exist yet.
+      incoming->RunPhis(sequence, block, 0);
+    }
+    // Now initialize outgoing mapping for this block with incoming mapping.
+    CHECK(locations_.empty());
+    locations_ = incoming->locations_;
+  }
+
+  void InitializeFromIntersection() { locations_ = predecessor_intersection_; }
+
+  void InitializeIntersection(const OutgoingMapping* incoming) {
+    CHECK(predecessor_intersection_.empty());
+    predecessor_intersection_ = incoming->locations_;
+  }
+
+  void Intersect(const OutgoingMapping* other) {
+    if (predecessor_intersection_.empty()) return;
+    auto it = predecessor_intersection_.begin();
+    OperandLess less;
+    for (const auto& o : other->locations_) {
+      while (less(it->first, o.first)) {
+        ++it;
+        if (it == predecessor_intersection_.end()) return;
+      }
+      if (it->first->Equals(o.first)) {
+        if (o.second != it->second) {
+          predecessor_intersection_.erase(it++);
+        } else {
+          ++it;
+        }
+        if (it == predecessor_intersection_.end()) return;
+      }
+    }
+  }
+
+ private:
+  LocationMap locations_;
+  LocationMap predecessor_intersection_;
+
+  DISALLOW_COPY_AND_ASSIGN(OutgoingMapping);
+};
+
+
+// Verify that all gap moves move the operands for a virtual register into the
+// correct location for every instruction.
+void RegisterAllocatorVerifier::VerifyGapMoves() {
+  typedef ZoneVector<OutgoingMapping*> OutgoingMappings;
+  OutgoingMappings outgoing_mappings(
+      static_cast<int>(sequence()->instruction_blocks().size()), nullptr,
+      zone());
+  // Construct all mappings, ignoring back edges and multiple entries.
+  ConstructOutgoingMappings(&outgoing_mappings, true);
+  // Run all remaining phis and compute the intersection of all predecessor
+  // mappings.
+  for (const auto* block : sequence()->instruction_blocks()) {
+    if (block->PredecessorCount() == 0) continue;
+    const size_t block_index = block->rpo_number().ToSize();
+    auto* mapping = outgoing_mappings[block_index];
+    bool initialized = false;
+    // Walk predecessors in reverse to ensure Intersect is correctly working.
+    // If it did nothing, the second pass would do exactly what the first pass
+    // did.
+    for (size_t phi_input = block->PredecessorCount() - 1; true; --phi_input) {
+      const size_t pred_block_index = block->predecessors()[phi_input].ToSize();
+      auto* incoming = outgoing_mappings[pred_block_index];
+      if (phi_input != 0) incoming->RunPhis(sequence(), block, phi_input);
+      if (!initialized) {
+        mapping->InitializeIntersection(incoming);
+        initialized = true;
+      } else {
+        mapping->Intersect(incoming);
+      }
+      if (phi_input == 0) break;
+    }
+  }
+  // Construct all mappings again, this time using the instersection mapping
+  // above as the incoming mapping instead of the result from the first
+  // predecessor.
+  ConstructOutgoingMappings(&outgoing_mappings, false);
+}
+
+
+void RegisterAllocatorVerifier::ConstructOutgoingMappings(
+    OutgoingMappings* outgoing_mappings, bool initial_pass) {
+  // Compute the locations of all virtual registers leaving every block, using
+  // only the first predecessor as source for the input mapping.
+  for (const auto* block : sequence()->instruction_blocks()) {
+    const size_t block_index = block->rpo_number().ToSize();
+    auto* current = outgoing_mappings->at(block_index);
+    CHECK(initial_pass == (current == nullptr));
+    // Initialize current.
+    if (!initial_pass) {
+      // Skip check second time around for blocks without multiple predecessors
+      // as we have already executed this in the initial run.
+      if (block->PredecessorCount() <= 1) continue;
+      current->InitializeFromIntersection();
+    } else {
+      current = new (zone()) OutgoingMapping(zone());
+      outgoing_mappings->at(block_index) = current;
+      // Copy outgoing values from predecessor block.
+      current->InitializeFromFirstPredecessor(sequence(), outgoing_mappings,
+                                              block);
+    }
+    // Update current with gaps and operands for all instructions in block.
+    for (int instr_index = block->code_start(); instr_index < block->code_end();
+         ++instr_index) {
+      const auto& instr_constraint = constraints_[instr_index];
+      const auto* instr = instr_constraint.instruction_;
+      const auto* op_constraints = instr_constraint.operand_constraints_;
+      size_t count = 0;
+      for (size_t i = 0; i < instr->InputCount(); ++i, ++count) {
+        if (op_constraints[count].type_ == kImmediate) continue;
+        auto it = current->locations()->find(instr->InputAt(i));
+        int virtual_register = op_constraints[count].virtual_register_;
+        CHECK(it != current->locations()->end());
+        CHECK_EQ(it->second, virtual_register);
+      }
+      for (size_t i = 0; i < instr->TempCount(); ++i, ++count) {
+        current->Drop(instr->TempAt(i));
+      }
+      if (instr->IsCall()) {
+        current->DropRegisters(config());
+      }
+      for (size_t i = 0; i < instr->OutputCount(); ++i, ++count) {
+        current->Drop(instr->OutputAt(i));
+        int virtual_register = op_constraints[count].virtual_register_;
+        current->Map(instr->OutputAt(i), virtual_register);
+      }
+      if (instr->IsGapMoves()) {
+        const auto* gap = GapInstruction::cast(instr);
+        current->RunGapInstruction(zone(), gap);
+      }
+    }
+  }
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/deps/v8/src/compiler/register-allocator-verifier.h b/deps/v8/src/compiler/register-allocator-verifier.h
new file mode 100644 (file)
index 0000000..4e35dc2
--- /dev/null
@@ -0,0 +1,86 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_REGISTER_ALLOCATOR_VERIFIER_H_
+#define V8_REGISTER_ALLOCATOR_VERIFIER_H_
+
+#include "src/v8.h"
+#include "src/zone-containers.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class InstructionOperand;
+class InstructionSequence;
+
+class RegisterAllocatorVerifier FINAL : public ZoneObject {
+ public:
+  RegisterAllocatorVerifier(Zone* zone, const RegisterConfiguration* config,
+                            const InstructionSequence* sequence);
+
+  void VerifyAssignment();
+  void VerifyGapMoves();
+
+ private:
+  enum ConstraintType {
+    kConstant,
+    kImmediate,
+    kRegister,
+    kFixedRegister,
+    kDoubleRegister,
+    kFixedDoubleRegister,
+    kFixedSlot,
+    kNone,
+    kNoneDouble,
+    kSameAsFirst
+  };
+
+  struct OperandConstraint {
+    ConstraintType type_;
+    int value_;  // subkind index when relevant
+    int virtual_register_;
+  };
+
+  struct InstructionConstraint {
+    const Instruction* instruction_;
+    size_t operand_constaints_size_;
+    OperandConstraint* operand_constraints_;
+  };
+
+  class OutgoingMapping;
+
+  typedef ZoneVector<InstructionConstraint> Constraints;
+  typedef ZoneVector<OutgoingMapping*> OutgoingMappings;
+
+  Zone* zone() const { return zone_; }
+  const RegisterConfiguration* config() { return config_; }
+  const InstructionSequence* sequence() const { return sequence_; }
+  Constraints* constraints() { return &constraints_; }
+
+  static void VerifyInput(const OperandConstraint& constraint);
+  static void VerifyTemp(const OperandConstraint& constraint);
+  static void VerifyOutput(const OperandConstraint& constraint);
+
+  void BuildConstraint(const InstructionOperand* op,
+                       OperandConstraint* constraint);
+  void CheckConstraint(const InstructionOperand* op,
+                       const OperandConstraint* constraint);
+
+  void ConstructOutgoingMappings(OutgoingMappings* outgoing_mappings,
+                                 bool initial_pass);
+
+  Zone* const zone_;
+  const RegisterConfiguration* config_;
+  const InstructionSequence* const sequence_;
+  Constraints constraints_;
+
+  DISALLOW_COPY_AND_ASSIGN(RegisterAllocatorVerifier);
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif
index 23a7df6..9eb4a47 100644 (file)
@@ -3,7 +3,6 @@
 // found in the LICENSE file.
 
 #include "src/compiler/linkage.h"
-#include "src/compiler/pipeline-statistics.h"
 #include "src/compiler/register-allocator.h"
 #include "src/string-stream.h"
 
@@ -31,15 +30,22 @@ static void TraceAlloc(const char* msg, ...) {
 }
 
 
+static void RemoveElement(ZoneVector<LiveRange*>* v, LiveRange* range) {
+  auto it = std::find(v->begin(), v->end(), range);
+  DCHECK(it != v->end());
+  v->erase(it);
+}
+
+
 UsePosition::UsePosition(LifetimePosition pos, InstructionOperand* operand,
                          InstructionOperand* hint)
     : operand_(operand),
       hint_(hint),
       pos_(pos),
-      next_(NULL),
+      next_(nullptr),
       requires_reg_(false),
       register_beneficial_(true) {
-  if (operand_ != NULL && operand_->IsUnallocated()) {
+  if (operand_ != nullptr && operand_->IsUnallocated()) {
     const UnallocatedOperand* unalloc = UnallocatedOperand::cast(operand_);
     requires_reg_ = unalloc->HasRegisterPolicy();
     register_beneficial_ = !unalloc->HasAnyPolicy();
@@ -49,7 +55,7 @@ UsePosition::UsePosition(LifetimePosition pos, InstructionOperand* operand,
 
 
 bool UsePosition::HasHint() const {
-  return hint_ != NULL && !hint_->IsUnallocated();
+  return hint_ != nullptr && !hint_->IsUnallocated();
 }
 
 
@@ -61,19 +67,29 @@ bool UsePosition::RegisterIsBeneficial() const { return register_beneficial_; }
 
 void UseInterval::SplitAt(LifetimePosition pos, Zone* zone) {
   DCHECK(Contains(pos) && pos.Value() != start().Value());
-  UseInterval* after = new (zone) UseInterval(pos, end_);
+  auto after = new (zone) UseInterval(pos, end_);
   after->next_ = next_;
   next_ = after;
   end_ = pos;
 }
 
 
+struct LiveRange::SpillAtDefinitionList : ZoneObject {
+  SpillAtDefinitionList(int gap_index, InstructionOperand* operand,
+                        SpillAtDefinitionList* next)
+      : gap_index(gap_index), operand(operand), next(next) {}
+  const int gap_index;
+  InstructionOperand* const operand;
+  SpillAtDefinitionList* const next;
+};
+
+
 #ifdef DEBUG
 
 
 void LiveRange::Verify() const {
   UsePosition* cur = first_pos_;
-  while (cur != NULL) {
+  while (cur != nullptr) {
     DCHECK(Start().Value() <= cur->pos().Value() &&
            cur->pos().Value() <= End().Value());
     cur = cur->next();
@@ -83,7 +99,7 @@ void LiveRange::Verify() const {
 
 bool LiveRange::HasOverlap(UseInterval* target) const {
   UseInterval* current_interval = first_interval_;
-  while (current_interval != NULL) {
+  while (current_interval != nullptr) {
     // Intervals overlap if the start of one is contained in the other.
     if (current_interval->Contains(target->start()) ||
         target->Contains(current_interval->start())) {
@@ -105,52 +121,87 @@ LiveRange::LiveRange(int id, Zone* zone)
       is_non_loop_phi_(false),
       kind_(UNALLOCATED_REGISTERS),
       assigned_register_(kInvalidAssignment),
-      last_interval_(NULL),
-      first_interval_(NULL),
-      first_pos_(NULL),
-      parent_(NULL),
-      next_(NULL),
-      current_interval_(NULL),
-      last_processed_use_(NULL),
-      current_hint_operand_(NULL),
-      spill_operand_(new (zone) InstructionOperand()),
-      spill_start_index_(kMaxInt) {}
+      last_interval_(nullptr),
+      first_interval_(nullptr),
+      first_pos_(nullptr),
+      parent_(nullptr),
+      next_(nullptr),
+      current_interval_(nullptr),
+      last_processed_use_(nullptr),
+      current_hint_operand_(nullptr),
+      spill_start_index_(kMaxInt),
+      spill_type_(SpillType::kNoSpillType),
+      spill_operand_(nullptr),
+      spills_at_definition_(nullptr) {}
 
 
 void LiveRange::set_assigned_register(int reg, Zone* zone) {
   DCHECK(!HasRegisterAssigned() && !IsSpilled());
   assigned_register_ = reg;
-  ConvertOperands(zone);
+  // TODO(dcarney): stop aliasing hint operands.
+  ConvertUsesToOperand(CreateAssignedOperand(zone));
 }
 
 
-void LiveRange::MakeSpilled(Zone* zone) {
+void LiveRange::MakeSpilled() {
   DCHECK(!IsSpilled());
-  DCHECK(TopLevel()->HasAllocatedSpillOperand());
+  DCHECK(!TopLevel()->HasNoSpillType());
   spilled_ = true;
   assigned_register_ = kInvalidAssignment;
-  ConvertOperands(zone);
 }
 
 
-bool LiveRange::HasAllocatedSpillOperand() const {
-  DCHECK(spill_operand_ != NULL);
-  return !spill_operand_->IsIgnored();
+void LiveRange::SpillAtDefinition(Zone* zone, int gap_index,
+                                  InstructionOperand* operand) {
+  DCHECK(HasNoSpillType());
+  spills_at_definition_ = new (zone)
+      SpillAtDefinitionList(gap_index, operand, spills_at_definition_);
+}
+
+
+void LiveRange::CommitSpillsAtDefinition(InstructionSequence* sequence,
+                                         InstructionOperand* op) {
+  auto to_spill = TopLevel()->spills_at_definition_;
+  if (to_spill == nullptr) return;
+  auto zone = sequence->zone();
+  for (; to_spill != nullptr; to_spill = to_spill->next) {
+    auto gap = sequence->GapAt(to_spill->gap_index);
+    auto move = gap->GetOrCreateParallelMove(GapInstruction::START, zone);
+    move->AddMove(to_spill->operand, op, zone);
+  }
+  TopLevel()->spills_at_definition_ = nullptr;
 }
 
 
 void LiveRange::SetSpillOperand(InstructionOperand* operand) {
+  DCHECK(HasNoSpillType());
+  DCHECK(!operand->IsUnallocated());
+  spill_type_ = SpillType::kSpillOperand;
+  spill_operand_ = operand;
+}
+
+
+void LiveRange::SetSpillRange(SpillRange* spill_range) {
+  DCHECK(HasNoSpillType() || HasSpillRange());
+  DCHECK_NE(spill_range, nullptr);
+  spill_type_ = SpillType::kSpillRange;
+  spill_range_ = spill_range;
+}
+
+
+void LiveRange::CommitSpillOperand(InstructionOperand* operand) {
+  DCHECK(HasSpillRange());
   DCHECK(!operand->IsUnallocated());
-  DCHECK(spill_operand_ != NULL);
-  DCHECK(spill_operand_->IsIgnored());
-  spill_operand_->ConvertTo(operand->kind(), operand->index());
+  DCHECK(!IsChild());
+  spill_type_ = SpillType::kSpillOperand;
+  spill_operand_ = operand;
 }
 
 
 UsePosition* LiveRange::NextUsePosition(LifetimePosition start) {
   UsePosition* use_pos = last_processed_use_;
-  if (use_pos == NULL) use_pos = first_pos();
-  while (use_pos != NULL && use_pos->pos().Value() < start.Value()) {
+  if (use_pos == nullptr) use_pos = first_pos();
+  while (use_pos != nullptr && use_pos->pos().Value() < start.Value()) {
     use_pos = use_pos->next();
   }
   last_processed_use_ = use_pos;
@@ -161,7 +212,7 @@ UsePosition* LiveRange::NextUsePosition(LifetimePosition start) {
 UsePosition* LiveRange::NextUsePositionRegisterIsBeneficial(
     LifetimePosition start) {
   UsePosition* pos = NextUsePosition(start);
-  while (pos != NULL && !pos->RegisterIsBeneficial()) {
+  while (pos != nullptr && !pos->RegisterIsBeneficial()) {
     pos = pos->next();
   }
   return pos;
@@ -170,9 +221,9 @@ UsePosition* LiveRange::NextUsePositionRegisterIsBeneficial(
 
 UsePosition* LiveRange::PreviousUsePositionRegisterIsBeneficial(
     LifetimePosition start) {
-  UsePosition* pos = first_pos();
-  UsePosition* prev = NULL;
-  while (pos != NULL && pos->pos().Value() < start.Value()) {
+  auto pos = first_pos();
+  UsePosition* prev = nullptr;
+  while (pos != nullptr && pos->pos().Value() < start.Value()) {
     if (pos->RegisterIsBeneficial()) prev = pos;
     pos = pos->next();
   }
@@ -182,7 +233,7 @@ UsePosition* LiveRange::PreviousUsePositionRegisterIsBeneficial(
 
 UsePosition* LiveRange::NextRegisterPosition(LifetimePosition start) {
   UsePosition* pos = NextUsePosition(start);
-  while (pos != NULL && !pos->RequiresRegister()) {
+  while (pos != nullptr && !pos->RequiresRegister()) {
     pos = pos->next();
   }
   return pos;
@@ -192,15 +243,15 @@ UsePosition* LiveRange::NextRegisterPosition(LifetimePosition start) {
 bool LiveRange::CanBeSpilled(LifetimePosition pos) {
   // We cannot spill a live range that has a use requiring a register
   // at the current or the immediate next position.
-  UsePosition* use_pos = NextRegisterPosition(pos);
-  if (use_pos == NULL) return true;
+  auto use_pos = NextRegisterPosition(pos);
+  if (use_pos == nullptr) return true;
   return use_pos->pos().Value() >
          pos.NextInstruction().InstructionEnd().Value();
 }
 
 
 InstructionOperand* LiveRange::CreateAssignedOperand(Zone* zone) const {
-  InstructionOperand* op = NULL;
+  InstructionOperand* op = nullptr;
   if (HasRegisterAssigned()) {
     DCHECK(!IsSpilled());
     switch (Kind()) {
@@ -213,15 +264,11 @@ InstructionOperand* LiveRange::CreateAssignedOperand(Zone* zone) const {
       default:
         UNREACHABLE();
     }
-  } else if (IsSpilled()) {
+  } else {
+    DCHECK(IsSpilled());
     DCHECK(!HasRegisterAssigned());
     op = TopLevel()->GetSpillOperand();
     DCHECK(!op->IsUnallocated());
-  } else {
-    UnallocatedOperand* unalloc =
-        new (zone) UnallocatedOperand(UnallocatedOperand::NONE);
-    unalloc->set_virtual_register(id_);
-    op = unalloc;
   }
   return op;
 }
@@ -229,9 +276,9 @@ InstructionOperand* LiveRange::CreateAssignedOperand(Zone* zone) const {
 
 UseInterval* LiveRange::FirstSearchIntervalForPosition(
     LifetimePosition position) const {
-  if (current_interval_ == NULL) return first_interval_;
+  if (current_interval_ == nullptr) return first_interval_;
   if (current_interval_->start().Value() > position.Value()) {
-    current_interval_ = NULL;
+    current_interval_ = nullptr;
     return first_interval_;
   }
   return current_interval_;
@@ -240,11 +287,10 @@ UseInterval* LiveRange::FirstSearchIntervalForPosition(
 
 void LiveRange::AdvanceLastProcessedMarker(
     UseInterval* to_start_of, LifetimePosition but_not_past) const {
-  if (to_start_of == NULL) return;
+  if (to_start_of == nullptr) return;
   if (to_start_of->start().Value() > but_not_past.Value()) return;
-  LifetimePosition start = current_interval_ == NULL
-                               ? LifetimePosition::Invalid()
-                               : current_interval_->start();
+  auto start = current_interval_ == nullptr ? LifetimePosition::Invalid()
+                                            : current_interval_->start();
   if (to_start_of->start().Value() > start.Value()) {
     current_interval_ = to_start_of;
   }
@@ -258,7 +304,7 @@ void LiveRange::SplitAt(LifetimePosition position, LiveRange* result,
   // Find the last interval that ends before the position. If the
   // position is contained in one of the intervals in the chain, we
   // split that interval and use the first part.
-  UseInterval* current = FirstSearchIntervalForPosition(position);
+  auto current = FirstSearchIntervalForPosition(position);
 
   // If the split position coincides with the beginning of a use interval
   // we need to split use positons in a special way.
@@ -269,12 +315,12 @@ void LiveRange::SplitAt(LifetimePosition position, LiveRange* result,
     current = first_interval_;
   }
 
-  while (current != NULL) {
+  while (current != nullptr) {
     if (current->Contains(position)) {
       current->SplitAt(position, zone);
       break;
     }
-    UseInterval* next = current->next();
+    auto next = current->next();
     if (next->start().Value() >= position.Value()) {
       split_at_start = (next->start().Value() == position.Value());
       break;
@@ -283,8 +329,8 @@ void LiveRange::SplitAt(LifetimePosition position, LiveRange* result,
   }
 
   // Partition original use intervals to the two live ranges.
-  UseInterval* before = current;
-  UseInterval* after = before->next();
+  auto before = current;
+  auto after = before->next();
   result->last_interval_ =
       (last_interval_ == before)
           ? after            // Only interval in the range after split.
@@ -294,39 +340,41 @@ void LiveRange::SplitAt(LifetimePosition position, LiveRange* result,
 
   // Find the last use position before the split and the first use
   // position after it.
-  UsePosition* use_after = first_pos_;
-  UsePosition* use_before = NULL;
+  auto use_after = first_pos_;
+  UsePosition* use_before = nullptr;
   if (split_at_start) {
     // The split position coincides with the beginning of a use interval (the
     // end of a lifetime hole). Use at this position should be attributed to
     // the split child because split child owns use interval covering it.
-    while (use_after != NULL && use_after->pos().Value() < position.Value()) {
+    while (use_after != nullptr &&
+           use_after->pos().Value() < position.Value()) {
       use_before = use_after;
       use_after = use_after->next();
     }
   } else {
-    while (use_after != NULL && use_after->pos().Value() <= position.Value()) {
+    while (use_after != nullptr &&
+           use_after->pos().Value() <= position.Value()) {
       use_before = use_after;
       use_after = use_after->next();
     }
   }
 
   // Partition original use positions to the two live ranges.
-  if (use_before != NULL) {
-    use_before->next_ = NULL;
+  if (use_before != nullptr) {
+    use_before->next_ = nullptr;
   } else {
-    first_pos_ = NULL;
+    first_pos_ = nullptr;
   }
   result->first_pos_ = use_after;
 
   // Discard cached iteration state. It might be pointing
   // to the use that no longer belongs to this live range.
-  last_processed_use_ = NULL;
-  current_interval_ = NULL;
+  last_processed_use_ = nullptr;
+  current_interval_ = nullptr;
 
   // Link the new live range in the chain before any of the other
   // ranges linked from the range before the split.
-  result->parent_ = (parent_ == NULL) ? this : parent_;
+  result->parent_ = (parent_ == nullptr) ? this : parent_;
   result->kind_ = result->parent_->kind_;
   result->next_ = next_;
   next_ = result;
@@ -348,9 +396,9 @@ bool LiveRange::ShouldBeAllocatedBefore(const LiveRange* other) const {
   LifetimePosition other_start = other->Start();
   if (start.Value() == other_start.Value()) {
     UsePosition* pos = first_pos();
-    if (pos == NULL) return false;
+    if (pos == nullptr) return false;
     UsePosition* other_pos = other->first_pos();
-    if (other_pos == NULL) return true;
+    if (other_pos == nullptr) return true;
     return pos->pos().Value() < other_pos->pos().Value();
   }
   return start.Value() < other_start.Value();
@@ -359,7 +407,7 @@ bool LiveRange::ShouldBeAllocatedBefore(const LiveRange* other) const {
 
 void LiveRange::ShortenTo(LifetimePosition start) {
   TraceAlloc("Shorten live range %d to [%d\n", id_, start.Value());
-  DCHECK(first_interval_ != NULL);
+  DCHECK(first_interval_ != nullptr);
   DCHECK(first_interval_->start().Value() <= start.Value());
   DCHECK(start.Value() < first_interval_->end().Value());
   first_interval_->set_start(start);
@@ -370,8 +418,8 @@ void LiveRange::EnsureInterval(LifetimePosition start, LifetimePosition end,
                                Zone* zone) {
   TraceAlloc("Ensure live range %d in interval [%d %d[\n", id_, start.Value(),
              end.Value());
-  LifetimePosition new_end = end;
-  while (first_interval_ != NULL &&
+  auto new_end = end;
+  while (first_interval_ != nullptr &&
          first_interval_->start().Value() <= end.Value()) {
     if (first_interval_->end().Value() > end.Value()) {
       new_end = first_interval_->end();
@@ -379,10 +427,10 @@ void LiveRange::EnsureInterval(LifetimePosition start, LifetimePosition end,
     first_interval_ = first_interval_->next();
   }
 
-  UseInterval* new_interval = new (zone) UseInterval(start, new_end);
+  auto new_interval = new (zone) UseInterval(start, new_end);
   new_interval->next_ = first_interval_;
   first_interval_ = new_interval;
-  if (new_interval->next() == NULL) {
+  if (new_interval->next() == nullptr) {
     last_interval_ = new_interval;
   }
 }
@@ -392,15 +440,15 @@ void LiveRange::AddUseInterval(LifetimePosition start, LifetimePosition end,
                                Zone* zone) {
   TraceAlloc("Add to live range %d interval [%d %d[\n", id_, start.Value(),
              end.Value());
-  if (first_interval_ == NULL) {
-    UseInterval* interval = new (zone) UseInterval(start, end);
+  if (first_interval_ == nullptr) {
+    auto interval = new (zone) UseInterval(start, end);
     first_interval_ = interval;
     last_interval_ = interval;
   } else {
     if (end.Value() == first_interval_->start().Value()) {
       first_interval_->set_start(start);
     } else if (end.Value() < first_interval_->start().Value()) {
-      UseInterval* interval = new (zone) UseInterval(start, end);
+      auto interval = new (zone) UseInterval(start, end);
       interval->set_next(first_interval_);
       first_interval_ = interval;
     } else {
@@ -419,17 +467,17 @@ void LiveRange::AddUsePosition(LifetimePosition pos,
                                InstructionOperand* operand,
                                InstructionOperand* hint, Zone* zone) {
   TraceAlloc("Add to live range %d use position %d\n", id_, pos.Value());
-  UsePosition* use_pos = new (zone) UsePosition(pos, operand, hint);
-  UsePosition* prev_hint = NULL;
-  UsePosition* prev = NULL;
-  UsePosition* current = first_pos_;
-  while (current != NULL && current->pos().Value() < pos.Value()) {
+  auto use_pos = new (zone) UsePosition(pos, operand, hint);
+  UsePosition* prev_hint = nullptr;
+  UsePosition* prev = nullptr;
+  auto current = first_pos_;
+  while (current != nullptr && current->pos().Value() < pos.Value()) {
     prev_hint = current->HasHint() ? current : prev_hint;
     prev = current;
     current = current->next();
   }
 
-  if (prev == NULL) {
+  if (prev == nullptr) {
     use_pos->set_next(first_pos_);
     first_pos_ = use_pos;
   } else {
@@ -437,16 +485,15 @@ void LiveRange::AddUsePosition(LifetimePosition pos,
     prev->next_ = use_pos;
   }
 
-  if (prev_hint == NULL && use_pos->HasHint()) {
+  if (prev_hint == nullptr && use_pos->HasHint()) {
     current_hint_operand_ = hint;
   }
 }
 
 
-void LiveRange::ConvertOperands(Zone* zone) {
-  InstructionOperand* op = CreateAssignedOperand(zone);
-  UsePosition* use_pos = first_pos();
-  while (use_pos != NULL) {
+void LiveRange::ConvertUsesToOperand(InstructionOperand* op) {
+  auto use_pos = first_pos();
+  while (use_pos != nullptr) {
     DCHECK(Start().Value() <= use_pos->pos().Value() &&
            use_pos->pos().Value() <= End().Value());
 
@@ -469,10 +516,10 @@ bool LiveRange::CanCover(LifetimePosition position) const {
 
 bool LiveRange::Covers(LifetimePosition position) {
   if (!CanCover(position)) return false;
-  UseInterval* start_search = FirstSearchIntervalForPosition(position);
-  for (UseInterval* interval = start_search; interval != NULL;
+  auto start_search = FirstSearchIntervalForPosition(position);
+  for (auto interval = start_search; interval != nullptr;
        interval = interval->next()) {
-    DCHECK(interval->next() == NULL ||
+    DCHECK(interval->next() == nullptr ||
            interval->next()->start().Value() >= interval->start().Value());
     AdvanceLastProcessedMarker(interval, position);
     if (interval->Contains(position)) return true;
@@ -483,20 +530,20 @@ bool LiveRange::Covers(LifetimePosition position) {
 
 
 LifetimePosition LiveRange::FirstIntersection(LiveRange* other) {
-  UseInterval* b = other->first_interval();
-  if (b == NULL) return LifetimePosition::Invalid();
-  LifetimePosition advance_last_processed_up_to = b->start();
-  UseInterval* a = FirstSearchIntervalForPosition(b->start());
-  while (a != NULL && b != NULL) {
+  auto b = other->first_interval();
+  if (b == nullptr) return LifetimePosition::Invalid();
+  auto advance_last_processed_up_to = b->start();
+  auto a = FirstSearchIntervalForPosition(b->start());
+  while (a != nullptr && b != nullptr) {
     if (a->start().Value() > other->End().Value()) break;
     if (b->start().Value() > End().Value()) break;
-    LifetimePosition cur_intersection = a->Intersect(b);
+    auto cur_intersection = a->Intersect(b);
     if (cur_intersection.IsValid()) {
       return cur_intersection;
     }
     if (a->start().Value() < b->start().Value()) {
       a = a->next();
-      if (a == NULL || a->start().Value() > other->End().Value()) break;
+      if (a == nullptr || a->start().Value() > other->End().Value()) break;
       AdvanceLastProcessedMarker(a, advance_last_processed_up_to);
     } else {
       b = b->next();
@@ -515,16 +562,18 @@ RegisterAllocator::RegisterAllocator(const RegisterConfiguration* config,
       code_(code),
       debug_name_(debug_name),
       config_(config),
-      live_in_sets_(code->InstructionBlockCount(), local_zone()),
-      live_ranges_(code->VirtualRegisterCount() * 2, local_zone()),
-      fixed_live_ranges_(this->config()->num_general_registers(), NULL,
+      phi_map_(PhiMap::key_compare(), PhiMap::allocator_type(local_zone())),
+      live_in_sets_(code->InstructionBlockCount(), nullptr, local_zone()),
+      live_ranges_(code->VirtualRegisterCount() * 2, nullptr, local_zone()),
+      fixed_live_ranges_(this->config()->num_general_registers(), nullptr,
                          local_zone()),
-      fixed_double_live_ranges_(this->config()->num_double_registers(), NULL,
+      fixed_double_live_ranges_(this->config()->num_double_registers(), nullptr,
                                 local_zone()),
-      unhandled_live_ranges_(code->VirtualRegisterCount() * 2, local_zone()),
-      active_live_ranges_(8, local_zone()),
-      inactive_live_ranges_(8, local_zone()),
-      reusable_slots_(8, local_zone()),
+      unhandled_live_ranges_(local_zone()),
+      active_live_ranges_(local_zone()),
+      inactive_live_ranges_(local_zone()),
+      reusable_slots_(local_zone()),
+      spill_ranges_(local_zone()),
       mode_(UNALLOCATED_REGISTERS),
       num_registers_(-1),
       allocation_ok_(true) {
@@ -534,35 +583,39 @@ RegisterAllocator::RegisterAllocator(const RegisterConfiguration* config,
          RegisterConfiguration::kMaxDoubleRegisters);
   // TryAllocateFreeReg and AllocateBlockedReg assume this
   // when allocating local arrays.
-  DCHECK(this->config()->num_double_registers() >=
+  DCHECK(RegisterConfiguration::kMaxDoubleRegisters >=
          this->config()->num_general_registers());
-}
-
-
-void RegisterAllocator::InitializeLivenessAnalysis() {
-  // Initialize the live_in sets for each block to NULL.
-  int block_count = code()->InstructionBlockCount();
-  live_in_sets_.Initialize(block_count, local_zone());
-  live_in_sets_.AddBlock(NULL, block_count, local_zone());
+  unhandled_live_ranges().reserve(
+      static_cast<size_t>(code->VirtualRegisterCount() * 2));
+  active_live_ranges().reserve(8);
+  inactive_live_ranges().reserve(8);
+  reusable_slots().reserve(8);
+  spill_ranges().reserve(8);
+  assigned_registers_ =
+      new (code_zone()) BitVector(config->num_general_registers(), code_zone());
+  assigned_double_registers_ = new (code_zone())
+      BitVector(config->num_aliased_double_registers(), code_zone());
+  frame->SetAllocatedRegisters(assigned_registers_);
+  frame->SetAllocatedDoubleRegisters(assigned_double_registers_);
 }
 
 
 BitVector* RegisterAllocator::ComputeLiveOut(const InstructionBlock* block) {
   // Compute live out for the given block, except not including backward
   // successor edges.
-  BitVector* live_out = new (local_zone())
+  auto live_out = new (local_zone())
       BitVector(code()->VirtualRegisterCount(), local_zone());
 
   // Process all successor blocks.
   for (auto succ : block->successors()) {
     // Add values live on entry to the successor. Note the successor's
     // live_in will not be computed yet for backwards edges.
-    BitVector* live_in = live_in_sets_[static_cast<int>(succ.ToSize())];
-    if (live_in != NULL) live_out->Union(*live_in);
+    auto live_in = live_in_sets_[succ.ToSize()];
+    if (live_in != nullptr) live_out->Union(*live_in);
 
     // All phi input operands corresponding to this successor edge are live
     // out from this block.
-    const InstructionBlock* successor = code()->InstructionBlockAt(succ);
+    auto successor = code()->InstructionBlockAt(succ);
     size_t index = successor->PredecessorIndexOf(block->rpo_number());
     DCHECK(index < successor->PredecessorCount());
     for (auto phi : successor->phis()) {
@@ -577,14 +630,14 @@ void RegisterAllocator::AddInitialIntervals(const InstructionBlock* block,
                                             BitVector* live_out) {
   // Add an interval that includes the entire block to the live range for
   // each live_out value.
-  LifetimePosition start =
+  auto start =
       LifetimePosition::FromInstructionIndex(block->first_instruction_index());
-  LifetimePosition end = LifetimePosition::FromInstructionIndex(
-                             block->last_instruction_index()).NextInstruction();
+  auto end = LifetimePosition::FromInstructionIndex(
+                 block->last_instruction_index()).NextInstruction();
   BitVector::Iterator iterator(live_out);
   while (!iterator.Done()) {
     int operand_index = iterator.Current();
-    LiveRange* range = LiveRangeFor(operand_index);
+    auto range = LiveRangeFor(operand_index);
     range->AddUseInterval(start, end, local_zone());
     iterator.Advance();
   }
@@ -614,7 +667,7 @@ InstructionOperand* RegisterAllocator::AllocateFixed(
   }
   if (is_tagged) {
     TraceAlloc("Fixed reg is tagged at %d\n", pos);
-    Instruction* instr = InstructionAt(pos);
+    auto instr = InstructionAt(pos);
     if (instr->HasPointerMap()) {
       instr->pointer_map()->RecordPointer(operand, code_zone());
     }
@@ -625,8 +678,8 @@ InstructionOperand* RegisterAllocator::AllocateFixed(
 
 LiveRange* RegisterAllocator::FixedLiveRangeFor(int index) {
   DCHECK(index < config()->num_general_registers());
-  LiveRange* result = fixed_live_ranges_[index];
-  if (result == NULL) {
+  auto result = fixed_live_ranges()[index];
+  if (result == nullptr) {
     // TODO(titzer): add a utility method to allocate a new LiveRange:
     // The LiveRange object itself can go in this zone, but the
     // InstructionOperand needs
@@ -635,7 +688,7 @@ LiveRange* RegisterAllocator::FixedLiveRangeFor(int index) {
     DCHECK(result->IsFixed());
     result->kind_ = GENERAL_REGISTERS;
     SetLiveRangeAssignedRegister(result, index);
-    fixed_live_ranges_[index] = result;
+    fixed_live_ranges()[index] = result;
   }
   return result;
 }
@@ -643,28 +696,27 @@ LiveRange* RegisterAllocator::FixedLiveRangeFor(int index) {
 
 LiveRange* RegisterAllocator::FixedDoubleLiveRangeFor(int index) {
   DCHECK(index < config()->num_aliased_double_registers());
-  LiveRange* result = fixed_double_live_ranges_[index];
-  if (result == NULL) {
+  auto result = fixed_double_live_ranges()[index];
+  if (result == nullptr) {
     result = new (local_zone())
         LiveRange(FixedDoubleLiveRangeID(index), code_zone());
     DCHECK(result->IsFixed());
     result->kind_ = DOUBLE_REGISTERS;
     SetLiveRangeAssignedRegister(result, index);
-    fixed_double_live_ranges_[index] = result;
+    fixed_double_live_ranges()[index] = result;
   }
   return result;
 }
 
 
 LiveRange* RegisterAllocator::LiveRangeFor(int index) {
-  if (index >= live_ranges_.length()) {
-    live_ranges_.AddBlock(NULL, index - live_ranges_.length() + 1,
-                          local_zone());
+  if (index >= static_cast<int>(live_ranges().size())) {
+    live_ranges().resize(index + 1, nullptr);
   }
-  LiveRange* result = live_ranges_[index];
-  if (result == NULL) {
+  auto result = live_ranges()[index];
+  if (result == nullptr) {
     result = new (local_zone()) LiveRange(index, code_zone());
-    live_ranges_[index] = result;
+    live_ranges()[index] = result;
   }
   return result;
 }
@@ -684,7 +736,7 @@ LiveRange* RegisterAllocator::LiveRangeFor(InstructionOperand* operand) {
   } else if (operand->IsDoubleRegister()) {
     return FixedDoubleLiveRangeFor(operand->index());
   } else {
-    return NULL;
+    return nullptr;
   }
 }
 
@@ -692,19 +744,20 @@ LiveRange* RegisterAllocator::LiveRangeFor(InstructionOperand* operand) {
 void RegisterAllocator::Define(LifetimePosition position,
                                InstructionOperand* operand,
                                InstructionOperand* hint) {
-  LiveRange* range = LiveRangeFor(operand);
-  if (range == NULL) return;
+  auto range = LiveRangeFor(operand);
+  if (range == nullptr) return;
 
   if (range->IsEmpty() || range->Start().Value() > position.Value()) {
     // Can happen if there is a definition without use.
     range->AddUseInterval(position, position.NextInstruction(), local_zone());
-    range->AddUsePosition(position.NextInstruction(), NULL, NULL, local_zone());
+    range->AddUsePosition(position.NextInstruction(), nullptr, nullptr,
+                          local_zone());
   } else {
     range->ShortenTo(position);
   }
 
   if (operand->IsUnallocated()) {
-    UnallocatedOperand* unalloc_operand = UnallocatedOperand::cast(operand);
+    auto unalloc_operand = UnallocatedOperand::cast(operand);
     range->AddUsePosition(position, unalloc_operand, hint, local_zone());
   }
 }
@@ -714,8 +767,8 @@ void RegisterAllocator::Use(LifetimePosition block_start,
                             LifetimePosition position,
                             InstructionOperand* operand,
                             InstructionOperand* hint) {
-  LiveRange* range = LiveRangeFor(operand);
-  if (range == NULL) return;
+  auto range = LiveRangeFor(operand);
+  if (range == nullptr) return;
   if (operand->IsUnallocated()) {
     UnallocatedOperand* unalloc_operand = UnallocatedOperand::cast(operand);
     range->AddUsePosition(position, unalloc_operand, hint, local_zone());
@@ -724,27 +777,258 @@ void RegisterAllocator::Use(LifetimePosition block_start,
 }
 
 
-void RegisterAllocator::AddConstraintsGapMove(int index,
-                                              InstructionOperand* from,
-                                              InstructionOperand* to) {
-  GapInstruction* gap = code()->GapAt(index);
-  ParallelMove* move =
-      gap->GetOrCreateParallelMove(GapInstruction::START, code_zone());
-  if (from->IsUnallocated()) {
-    const ZoneList<MoveOperands>* move_operands = move->move_operands();
-    for (int i = 0; i < move_operands->length(); ++i) {
-      MoveOperands cur = move_operands->at(i);
-      InstructionOperand* cur_to = cur.destination();
-      if (cur_to->IsUnallocated()) {
-        if (UnallocatedOperand::cast(cur_to)->virtual_register() ==
-            UnallocatedOperand::cast(from)->virtual_register()) {
-          move->AddMove(cur.source(), to, code_zone());
-          return;
-        }
+void RegisterAllocator::AddGapMove(int index,
+                                   GapInstruction::InnerPosition position,
+                                   InstructionOperand* from,
+                                   InstructionOperand* to) {
+  auto gap = code()->GapAt(index);
+  auto move = gap->GetOrCreateParallelMove(position, code_zone());
+  move->AddMove(from, to, code_zone());
+}
+
+
+static bool AreUseIntervalsIntersecting(UseInterval* interval1,
+                                        UseInterval* interval2) {
+  while (interval1 != nullptr && interval2 != nullptr) {
+    if (interval1->start().Value() < interval2->start().Value()) {
+      if (interval1->end().Value() > interval2->start().Value()) {
+        return true;
+      }
+      interval1 = interval1->next();
+    } else {
+      if (interval2->end().Value() > interval1->start().Value()) {
+        return true;
       }
+      interval2 = interval2->next();
     }
   }
-  move->AddMove(from, to, code_zone());
+  return false;
+}
+
+
+SpillRange::SpillRange(LiveRange* range, Zone* zone) : live_ranges_(zone) {
+  auto src = range->first_interval();
+  UseInterval* result = nullptr;
+  UseInterval* node = nullptr;
+  // Copy the nodes
+  while (src != nullptr) {
+    auto new_node = new (zone) UseInterval(src->start(), src->end());
+    if (result == nullptr) {
+      result = new_node;
+    } else {
+      node->set_next(new_node);
+    }
+    node = new_node;
+    src = src->next();
+  }
+  use_interval_ = result;
+  live_ranges().push_back(range);
+  end_position_ = node->end();
+  DCHECK(!range->HasSpillRange());
+  range->SetSpillRange(this);
+}
+
+
+bool SpillRange::IsIntersectingWith(SpillRange* other) const {
+  if (this->use_interval_ == nullptr || other->use_interval_ == nullptr ||
+      this->End().Value() <= other->use_interval_->start().Value() ||
+      other->End().Value() <= this->use_interval_->start().Value()) {
+    return false;
+  }
+  return AreUseIntervalsIntersecting(use_interval_, other->use_interval_);
+}
+
+
+bool SpillRange::TryMerge(SpillRange* other) {
+  if (Kind() != other->Kind() || IsIntersectingWith(other)) return false;
+
+  auto max = LifetimePosition::MaxPosition();
+  if (End().Value() < other->End().Value() &&
+      other->End().Value() != max.Value()) {
+    end_position_ = other->End();
+  }
+  other->end_position_ = max;
+
+  MergeDisjointIntervals(other->use_interval_);
+  other->use_interval_ = nullptr;
+
+  for (auto range : other->live_ranges()) {
+    DCHECK(range->GetSpillRange() == other);
+    range->SetSpillRange(this);
+  }
+
+  live_ranges().insert(live_ranges().end(), other->live_ranges().begin(),
+                       other->live_ranges().end());
+  other->live_ranges().clear();
+
+  return true;
+}
+
+
+void SpillRange::SetOperand(InstructionOperand* op) {
+  for (auto range : live_ranges()) {
+    DCHECK(range->GetSpillRange() == this);
+    range->CommitSpillOperand(op);
+  }
+}
+
+
+void SpillRange::MergeDisjointIntervals(UseInterval* other) {
+  UseInterval* tail = nullptr;
+  auto current = use_interval_;
+  while (other != nullptr) {
+    // Make sure the 'current' list starts first
+    if (current == nullptr ||
+        current->start().Value() > other->start().Value()) {
+      std::swap(current, other);
+    }
+    // Check disjointness
+    DCHECK(other == nullptr ||
+           current->end().Value() <= other->start().Value());
+    // Append the 'current' node to the result accumulator and move forward
+    if (tail == nullptr) {
+      use_interval_ = current;
+    } else {
+      tail->set_next(current);
+    }
+    tail = current;
+    current = current->next();
+  }
+  // Other list is empty => we are done
+}
+
+
+void RegisterAllocator::ReuseSpillSlots() {
+  DCHECK(FLAG_turbo_reuse_spill_slots);
+
+  // Merge disjoint spill ranges
+  for (size_t i = 0; i < spill_ranges().size(); i++) {
+    auto range = spill_ranges()[i];
+    if (range->IsEmpty()) continue;
+    for (size_t j = i + 1; j < spill_ranges().size(); j++) {
+      auto other = spill_ranges()[j];
+      if (!other->IsEmpty()) {
+        range->TryMerge(other);
+      }
+    }
+  }
+
+  // Allocate slots for the merged spill ranges.
+  for (auto range : spill_ranges()) {
+    if (range->IsEmpty()) continue;
+    // Allocate a new operand referring to the spill slot.
+    auto kind = range->Kind();
+    int index = frame()->AllocateSpillSlot(kind == DOUBLE_REGISTERS);
+    auto op_kind = kind == DOUBLE_REGISTERS
+                       ? InstructionOperand::DOUBLE_STACK_SLOT
+                       : InstructionOperand::STACK_SLOT;
+    auto op = new (code_zone()) InstructionOperand(op_kind, index);
+    range->SetOperand(op);
+  }
+}
+
+
+void RegisterAllocator::CommitAssignment() {
+  for (auto range : live_ranges()) {
+    if (range == nullptr || range->IsEmpty()) continue;
+    // Register assignments were committed in set_assigned_register.
+    if (range->HasRegisterAssigned()) continue;
+    auto assigned = range->CreateAssignedOperand(code_zone());
+    range->ConvertUsesToOperand(assigned);
+    if (range->IsSpilled()) {
+      range->CommitSpillsAtDefinition(code(), assigned);
+    }
+  }
+}
+
+
+SpillRange* RegisterAllocator::AssignSpillRangeToLiveRange(LiveRange* range) {
+  DCHECK(FLAG_turbo_reuse_spill_slots);
+  auto spill_range = new (local_zone()) SpillRange(range, local_zone());
+  spill_ranges().push_back(spill_range);
+  return spill_range;
+}
+
+
+bool RegisterAllocator::TryReuseSpillForPhi(LiveRange* range) {
+  DCHECK(FLAG_turbo_reuse_spill_slots);
+  if (range->IsChild() || !range->is_phi()) return false;
+  DCHECK(range->HasNoSpillType());
+
+  auto lookup = phi_map_.find(range->id());
+  DCHECK(lookup != phi_map_.end());
+  auto phi = lookup->second.phi;
+  auto block = lookup->second.block;
+  // Count the number of spilled operands.
+  size_t spilled_count = 0;
+  LiveRange* first_op = nullptr;
+  for (size_t i = 0; i < phi->operands().size(); i++) {
+    int op = phi->operands()[i];
+    LiveRange* op_range = LiveRangeFor(op);
+    if (op_range->GetSpillRange() == nullptr) continue;
+    auto pred = code()->InstructionBlockAt(block->predecessors()[i]);
+    auto pred_end =
+        LifetimePosition::FromInstructionIndex(pred->last_instruction_index());
+    while (op_range != nullptr && !op_range->CanCover(pred_end)) {
+      op_range = op_range->next();
+    }
+    if (op_range != nullptr && op_range->IsSpilled()) {
+      spilled_count++;
+      if (first_op == nullptr) {
+        first_op = op_range->TopLevel();
+      }
+    }
+  }
+
+  // Only continue if more than half of the operands are spilled.
+  if (spilled_count * 2 <= phi->operands().size()) {
+    return false;
+  }
+
+  // Try to merge the spilled operands and count the number of merged spilled
+  // operands.
+  DCHECK(first_op != nullptr);
+  auto first_op_spill = first_op->GetSpillRange();
+  size_t num_merged = 1;
+  for (size_t i = 1; i < phi->operands().size(); i++) {
+    int op = phi->operands()[i];
+    auto op_range = LiveRangeFor(op);
+    auto op_spill = op_range->GetSpillRange();
+    if (op_spill != nullptr &&
+        (op_spill == first_op_spill || first_op_spill->TryMerge(op_spill))) {
+      num_merged++;
+    }
+  }
+
+  // Only continue if enough operands could be merged to the
+  // same spill slot.
+  if (num_merged * 2 <= phi->operands().size() ||
+      AreUseIntervalsIntersecting(first_op_spill->interval(),
+                                  range->first_interval())) {
+    return false;
+  }
+
+  // If the range does not need register soon, spill it to the merged
+  // spill range.
+  auto next_pos = range->Start();
+  if (code()->IsGapAt(next_pos.InstructionIndex())) {
+    next_pos = next_pos.NextInstruction();
+  }
+  auto pos = range->NextUsePositionRegisterIsBeneficial(next_pos);
+  if (pos == nullptr) {
+    auto spill_range = AssignSpillRangeToLiveRange(range->TopLevel());
+    CHECK(first_op_spill->TryMerge(spill_range));
+    Spill(range);
+    return true;
+  } else if (pos->pos().Value() > range->Start().NextInstruction().Value()) {
+    auto spill_range = AssignSpillRangeToLiveRange(range->TopLevel());
+    CHECK(first_op_spill->TryMerge(spill_range));
+    SpillBetween(range, range->Start(), pos->pos());
+    if (!AllocationOk()) return false;
+    DCHECK(UnhandledIsSorted());
+    return true;
+  }
+  return false;
 }
 
 
@@ -754,8 +1038,8 @@ void RegisterAllocator::MeetRegisterConstraints(const InstructionBlock* block) {
   DCHECK_NE(-1, start);
   for (int i = start; i <= end; ++i) {
     if (code()->IsGapAt(i)) {
-      Instruction* instr = NULL;
-      Instruction* prev_instr = NULL;
+      Instruction* instr = nullptr;
+      Instruction* prev_instr = nullptr;
       if (i < end) instr = InstructionAt(i + 1);
       if (i > start) prev_instr = InstructionAt(i - 1);
       MeetConstraintsBetween(prev_instr, instr, i);
@@ -773,18 +1057,19 @@ void RegisterAllocator::MeetRegisterConstraints(const InstructionBlock* block) {
 void RegisterAllocator::MeetRegisterConstraintsForLastInstructionInBlock(
     const InstructionBlock* block) {
   int end = block->last_instruction_index();
-  Instruction* last_instruction = InstructionAt(end);
+  auto last_instruction = InstructionAt(end);
   for (size_t i = 0; i < last_instruction->OutputCount(); i++) {
-    InstructionOperand* output_operand = last_instruction->OutputAt(i);
+    auto output_operand = last_instruction->OutputAt(i);
     DCHECK(!output_operand->IsConstant());
-    UnallocatedOperand* output = UnallocatedOperand::cast(output_operand);
+    auto output = UnallocatedOperand::cast(output_operand);
     int output_vreg = output->virtual_register();
-    LiveRange* range = LiveRangeFor(output_vreg);
+    auto range = LiveRangeFor(output_vreg);
     bool assigned = false;
     if (output->HasFixedPolicy()) {
       AllocateFixed(output, -1, false);
       // This value is produced on the stack, we never need to spill it.
       if (output->IsStackSlot()) {
+        DCHECK(output->index() < 0);
         range->SetSpillOperand(output);
         range->SetSpillStartIndex(end);
         assigned = true;
@@ -802,7 +1087,7 @@ void RegisterAllocator::MeetRegisterConstraintsForLastInstructionInBlock(
             new (code_zone()) UnallocatedOperand(UnallocatedOperand::ANY);
         output_copy->set_virtual_register(output_vreg);
 
-        code()->AddGapMove(gap_index, output, output_copy);
+        AddGapMove(gap_index, GapInstruction::START, output, output_copy);
       }
     }
 
@@ -811,16 +1096,8 @@ void RegisterAllocator::MeetRegisterConstraintsForLastInstructionInBlock(
         const InstructionBlock* successor = code()->InstructionBlockAt(succ);
         DCHECK(successor->PredecessorCount() == 1);
         int gap_index = successor->first_instruction_index() + 1;
+        range->SpillAtDefinition(local_zone(), gap_index, output);
         range->SetSpillStartIndex(gap_index);
-
-        // This move to spill operand is not a real use. Liveness analysis
-        // and splitting of live ranges do not account for it.
-        // Thus it should be inserted to a lifetime position corresponding to
-        // the instruction end.
-        GapInstruction* gap = code()->GapAt(gap_index);
-        ParallelMove* move =
-            gap->GetOrCreateParallelMove(GapInstruction::BEFORE, code_zone());
-        move->AddMove(output, range->GetSpillOperand(), code_zone());
       }
     }
   }
@@ -830,10 +1107,10 @@ void RegisterAllocator::MeetRegisterConstraintsForLastInstructionInBlock(
 void RegisterAllocator::MeetConstraintsBetween(Instruction* first,
                                                Instruction* second,
                                                int gap_index) {
-  if (first != NULL) {
+  if (first != nullptr) {
     // Handle fixed temporaries.
     for (size_t i = 0; i < first->TempCount(); i++) {
-      UnallocatedOperand* temp = UnallocatedOperand::cast(first->TempAt(i));
+      auto temp = UnallocatedOperand::cast(first->TempAt(i));
       if (temp->HasFixedPolicy()) {
         AllocateFixed(temp, gap_index - 1, false);
       }
@@ -844,66 +1121,58 @@ void RegisterAllocator::MeetConstraintsBetween(Instruction* first,
       InstructionOperand* output = first->OutputAt(i);
       if (output->IsConstant()) {
         int output_vreg = output->index();
-        LiveRange* range = LiveRangeFor(output_vreg);
+        auto range = LiveRangeFor(output_vreg);
         range->SetSpillStartIndex(gap_index - 1);
         range->SetSpillOperand(output);
       } else {
-        UnallocatedOperand* first_output = UnallocatedOperand::cast(output);
-        LiveRange* range = LiveRangeFor(first_output->virtual_register());
+        auto first_output = UnallocatedOperand::cast(output);
+        auto range = LiveRangeFor(first_output->virtual_register());
         bool assigned = false;
         if (first_output->HasFixedPolicy()) {
-          UnallocatedOperand* output_copy =
-              first_output->CopyUnconstrained(code_zone());
+          auto output_copy = first_output->CopyUnconstrained(code_zone());
           bool is_tagged = HasTaggedValue(first_output->virtual_register());
           AllocateFixed(first_output, gap_index, is_tagged);
 
           // This value is produced on the stack, we never need to spill it.
           if (first_output->IsStackSlot()) {
+            DCHECK(first_output->index() < 0);
             range->SetSpillOperand(first_output);
             range->SetSpillStartIndex(gap_index - 1);
             assigned = true;
           }
-          code()->AddGapMove(gap_index, first_output, output_copy);
+          AddGapMove(gap_index, GapInstruction::START, first_output,
+                     output_copy);
         }
 
         // Make sure we add a gap move for spilling (if we have not done
         // so already).
         if (!assigned) {
+          range->SpillAtDefinition(local_zone(), gap_index, first_output);
           range->SetSpillStartIndex(gap_index);
-
-          // This move to spill operand is not a real use. Liveness analysis
-          // and splitting of live ranges do not account for it.
-          // Thus it should be inserted to a lifetime position corresponding to
-          // the instruction end.
-          GapInstruction* gap = code()->GapAt(gap_index);
-          ParallelMove* move =
-              gap->GetOrCreateParallelMove(GapInstruction::BEFORE, code_zone());
-          move->AddMove(first_output, range->GetSpillOperand(), code_zone());
         }
       }
     }
   }
 
-  if (second != NULL) {
+  if (second != nullptr) {
     // Handle fixed input operands of second instruction.
     for (size_t i = 0; i < second->InputCount(); i++) {
-      InstructionOperand* input = second->InputAt(i);
+      auto input = second->InputAt(i);
       if (input->IsImmediate()) continue;  // Ignore immediates.
-      UnallocatedOperand* cur_input = UnallocatedOperand::cast(input);
+      auto cur_input = UnallocatedOperand::cast(input);
       if (cur_input->HasFixedPolicy()) {
-        UnallocatedOperand* input_copy =
-            cur_input->CopyUnconstrained(code_zone());
+        auto input_copy = cur_input->CopyUnconstrained(code_zone());
         bool is_tagged = HasTaggedValue(cur_input->virtual_register());
         AllocateFixed(cur_input, gap_index + 1, is_tagged);
-        AddConstraintsGapMove(gap_index, input_copy, cur_input);
+        AddGapMove(gap_index, GapInstruction::END, input_copy, cur_input);
       }
     }
 
     // Handle "output same as input" for second instruction.
     for (size_t i = 0; i < second->OutputCount(); i++) {
-      InstructionOperand* output = second->OutputAt(i);
+      auto output = second->OutputAt(i);
       if (!output->IsUnallocated()) continue;
-      UnallocatedOperand* second_output = UnallocatedOperand::cast(output);
+      auto second_output = UnallocatedOperand::cast(output);
       if (second_output->HasSameAsInputPolicy()) {
         DCHECK(i == 0);  // Only valid for first output.
         UnallocatedOperand* cur_input =
@@ -911,10 +1180,9 @@ void RegisterAllocator::MeetConstraintsBetween(Instruction* first,
         int output_vreg = second_output->virtual_register();
         int input_vreg = cur_input->virtual_register();
 
-        UnallocatedOperand* input_copy =
-            cur_input->CopyUnconstrained(code_zone());
+        auto input_copy = cur_input->CopyUnconstrained(code_zone());
         cur_input->set_virtual_register(second_output->virtual_register());
-        AddConstraintsGapMove(gap_index, input_copy, cur_input);
+        AddGapMove(gap_index, GapInstruction::END, input_copy, cur_input);
 
         if (HasTaggedValue(input_vreg) && !HasTaggedValue(output_vreg)) {
           int index = gap_index + 1;
@@ -938,7 +1206,7 @@ void RegisterAllocator::MeetConstraintsBetween(Instruction* first,
 
 bool RegisterAllocator::IsOutputRegisterOf(Instruction* instr, int index) {
   for (size_t i = 0; i < instr->OutputCount(); i++) {
-    InstructionOperand* output = instr->OutputAt(i);
+    auto output = instr->OutputAt(i);
     if (output->IsRegister() && output->index() == index) return true;
   }
   return false;
@@ -948,7 +1216,7 @@ bool RegisterAllocator::IsOutputRegisterOf(Instruction* instr, int index) {
 bool RegisterAllocator::IsOutputDoubleRegisterOf(Instruction* instr,
                                                  int index) {
   for (size_t i = 0; i < instr->OutputCount(); i++) {
-    InstructionOperand* output = instr->OutputAt(i);
+    auto output = instr->OutputAt(i);
     if (output->IsDoubleRegister() && output->index() == index) return true;
   }
   return false;
@@ -958,59 +1226,62 @@ bool RegisterAllocator::IsOutputDoubleRegisterOf(Instruction* instr,
 void RegisterAllocator::ProcessInstructions(const InstructionBlock* block,
                                             BitVector* live) {
   int block_start = block->first_instruction_index();
-
-  LifetimePosition block_start_position =
+  auto block_start_position =
       LifetimePosition::FromInstructionIndex(block_start);
 
   for (int index = block->last_instruction_index(); index >= block_start;
        index--) {
-    LifetimePosition curr_position =
-        LifetimePosition::FromInstructionIndex(index);
-
-    Instruction* instr = InstructionAt(index);
-    DCHECK(instr != NULL);
+    auto curr_position = LifetimePosition::FromInstructionIndex(index);
+    auto instr = InstructionAt(index);
+    DCHECK(instr != nullptr);
     if (instr->IsGapMoves()) {
       // Process the moves of the gap instruction, making their sources live.
-      GapInstruction* gap = code()->GapAt(index);
-
-      // TODO(titzer): no need to create the parallel move if it doesn't exist.
-      ParallelMove* move =
-          gap->GetOrCreateParallelMove(GapInstruction::START, code_zone());
-      const ZoneList<MoveOperands>* move_operands = move->move_operands();
-      for (int i = 0; i < move_operands->length(); ++i) {
-        MoveOperands* cur = &move_operands->at(i);
-        if (cur->IsIgnored()) continue;
-        InstructionOperand* from = cur->source();
-        InstructionOperand* to = cur->destination();
-        InstructionOperand* hint = to;
-        if (to->IsUnallocated()) {
-          int to_vreg = UnallocatedOperand::cast(to)->virtual_register();
-          LiveRange* to_range = LiveRangeFor(to_vreg);
-          if (to_range->is_phi()) {
-            if (to_range->is_non_loop_phi()) {
-              hint = to_range->current_hint_operand();
-            }
-          } else {
-            if (live->Contains(to_vreg)) {
-              Define(curr_position, to, from);
-              live->Remove(to_vreg);
+      auto gap = code()->GapAt(index);
+      const GapInstruction::InnerPosition kPositions[] = {
+          GapInstruction::END, GapInstruction::START};
+      for (auto position : kPositions) {
+        auto move = gap->GetParallelMove(position);
+        if (move == nullptr) continue;
+        if (position == GapInstruction::END) {
+          curr_position = curr_position.InstructionEnd();
+        } else {
+          curr_position = curr_position.InstructionStart();
+        }
+        auto move_ops = move->move_operands();
+        for (auto cur = move_ops->begin(); cur != move_ops->end(); ++cur) {
+          auto from = cur->source();
+          auto to = cur->destination();
+          auto hint = to;
+          if (to->IsUnallocated()) {
+            int to_vreg = UnallocatedOperand::cast(to)->virtual_register();
+            auto to_range = LiveRangeFor(to_vreg);
+            if (to_range->is_phi()) {
+              DCHECK(!FLAG_turbo_delay_ssa_decon);
+              if (to_range->is_non_loop_phi()) {
+                hint = to_range->current_hint_operand();
+              }
             } else {
-              cur->Eliminate();
-              continue;
+              if (live->Contains(to_vreg)) {
+                Define(curr_position, to, from);
+                live->Remove(to_vreg);
+              } else {
+                cur->Eliminate();
+                continue;
+              }
             }
+          } else {
+            Define(curr_position, to, from);
+          }
+          Use(block_start_position, curr_position, from, hint);
+          if (from->IsUnallocated()) {
+            live->Add(UnallocatedOperand::cast(from)->virtual_register());
           }
-        } else {
-          Define(curr_position, to, from);
-        }
-        Use(block_start_position, curr_position, from, hint);
-        if (from->IsUnallocated()) {
-          live->Add(UnallocatedOperand::cast(from)->virtual_register());
         }
       }
     } else {
       // Process output, inputs, and temps of this non-gap instruction.
       for (size_t i = 0; i < instr->OutputCount(); i++) {
-        InstructionOperand* output = instr->OutputAt(i);
+        auto output = instr->OutputAt(i);
         if (output->IsUnallocated()) {
           int out_vreg = UnallocatedOperand::cast(output)->virtual_register();
           live->Remove(out_vreg);
@@ -1018,13 +1289,13 @@ void RegisterAllocator::ProcessInstructions(const InstructionBlock* block,
           int out_vreg = output->index();
           live->Remove(out_vreg);
         }
-        Define(curr_position, output, NULL);
+        Define(curr_position, output, nullptr);
       }
 
       if (instr->ClobbersRegisters()) {
         for (int i = 0; i < config()->num_general_registers(); ++i) {
           if (!IsOutputRegisterOf(instr, i)) {
-            LiveRange* range = FixedLiveRangeFor(i);
+            auto range = FixedLiveRangeFor(i);
             range->AddUseInterval(curr_position, curr_position.InstructionEnd(),
                                   local_zone());
           }
@@ -1034,7 +1305,7 @@ void RegisterAllocator::ProcessInstructions(const InstructionBlock* block,
       if (instr->ClobbersDoubleRegisters()) {
         for (int i = 0; i < config()->num_aliased_double_registers(); ++i) {
           if (!IsOutputDoubleRegisterOf(instr, i)) {
-            LiveRange* range = FixedDoubleLiveRangeFor(i);
+            auto range = FixedDoubleLiveRangeFor(i);
             range->AddUseInterval(curr_position, curr_position.InstructionEnd(),
                                   local_zone());
           }
@@ -1042,7 +1313,7 @@ void RegisterAllocator::ProcessInstructions(const InstructionBlock* block,
       }
 
       for (size_t i = 0; i < instr->InputCount(); i++) {
-        InstructionOperand* input = instr->InputAt(i);
+        auto input = instr->InputAt(i);
         if (input->IsImmediate()) continue;  // Ignore immediates.
         LifetimePosition use_pos;
         if (input->IsUnallocated() &&
@@ -1052,14 +1323,14 @@ void RegisterAllocator::ProcessInstructions(const InstructionBlock* block,
           use_pos = curr_position.InstructionEnd();
         }
 
-        Use(block_start_position, use_pos, input, NULL);
+        Use(block_start_position, use_pos, input, nullptr);
         if (input->IsUnallocated()) {
           live->Add(UnallocatedOperand::cast(input)->virtual_register());
         }
       }
 
       for (size_t i = 0; i < instr->TempCount(); i++) {
-        InstructionOperand* temp = instr->TempAt(i);
+        auto temp = instr->TempAt(i);
         if (instr->ClobbersTemps()) {
           if (temp->IsRegister()) continue;
           if (temp->IsUnallocated()) {
@@ -1069,8 +1340,9 @@ void RegisterAllocator::ProcessInstructions(const InstructionBlock* block,
             }
           }
         }
-        Use(block_start_position, curr_position.InstructionEnd(), temp, NULL);
-        Define(curr_position, temp, NULL);
+        Use(block_start_position, curr_position.InstructionEnd(), temp,
+            nullptr);
+        Define(curr_position, temp, nullptr);
       }
     }
   }
@@ -1079,93 +1351,38 @@ void RegisterAllocator::ProcessInstructions(const InstructionBlock* block,
 
 void RegisterAllocator::ResolvePhis(const InstructionBlock* block) {
   for (auto phi : block->phis()) {
-    UnallocatedOperand* phi_operand =
-        new (code_zone()) UnallocatedOperand(UnallocatedOperand::NONE);
+    if (FLAG_turbo_reuse_spill_slots) {
+      auto res = phi_map_.insert(
+          std::make_pair(phi->virtual_register(), PhiMapValue(phi, block)));
+      DCHECK(res.second);
+      USE(res);
+    }
+    auto output = phi->output();
     int phi_vreg = phi->virtual_register();
-    phi_operand->set_virtual_register(phi_vreg);
-
-    for (size_t i = 0; i < phi->operands().size(); ++i) {
-      UnallocatedOperand* operand =
-          new (code_zone()) UnallocatedOperand(UnallocatedOperand::ANY);
-      operand->set_virtual_register(phi->operands()[i]);
-      InstructionBlock* cur_block =
-          code()->InstructionBlockAt(block->predecessors()[i]);
-      // The gap move must be added without any special processing as in
-      // the AddConstraintsGapMove.
-      code()->AddGapMove(cur_block->last_instruction_index() - 1, operand,
-                         phi_operand);
-
-      Instruction* branch = InstructionAt(cur_block->last_instruction_index());
-      DCHECK(!branch->HasPointerMap());
-      USE(branch);
-    }
-
-    LiveRange* live_range = LiveRangeFor(phi_vreg);
-    BlockStartInstruction* block_start =
-        code()->GetBlockStart(block->rpo_number());
-    block_start->GetOrCreateParallelMove(GapInstruction::START, code_zone())
-        ->AddMove(phi_operand, live_range->GetSpillOperand(), code_zone());
-    live_range->SetSpillStartIndex(block->first_instruction_index());
-
+    if (!FLAG_turbo_delay_ssa_decon) {
+      for (size_t i = 0; i < phi->operands().size(); ++i) {
+        InstructionBlock* cur_block =
+            code()->InstructionBlockAt(block->predecessors()[i]);
+        AddGapMove(cur_block->last_instruction_index() - 1, GapInstruction::END,
+                   phi->inputs()[i], output);
+        DCHECK(!InstructionAt(cur_block->last_instruction_index())
+                    ->HasPointerMap());
+      }
+    }
+    auto live_range = LiveRangeFor(phi_vreg);
+    int gap_index = block->first_instruction_index();
+    live_range->SpillAtDefinition(local_zone(), gap_index, output);
+    live_range->SetSpillStartIndex(gap_index);
     // We use the phi-ness of some nodes in some later heuristics.
     live_range->set_is_phi(true);
-    if (!block->IsLoopHeader()) {
-      live_range->set_is_non_loop_phi(true);
-    }
+    live_range->set_is_non_loop_phi(!block->IsLoopHeader());
   }
 }
 
 
-bool RegisterAllocator::Allocate(PipelineStatistics* stats) {
-  assigned_registers_ = new (code_zone())
-      BitVector(config()->num_general_registers(), code_zone());
-  assigned_double_registers_ = new (code_zone())
-      BitVector(config()->num_aliased_double_registers(), code_zone());
-  {
-    PhaseScope phase_scope(stats, "meet register constraints");
-    MeetRegisterConstraints();
-  }
-  if (!AllocationOk()) return false;
-  {
-    PhaseScope phase_scope(stats, "resolve phis");
-    ResolvePhis();
-  }
-  {
-    PhaseScope phase_scope(stats, "build live ranges");
-    BuildLiveRanges();
-  }
-  {
-    PhaseScope phase_scope(stats, "allocate general registers");
-    AllocateGeneralRegisters();
-  }
-  if (!AllocationOk()) return false;
-  {
-    PhaseScope phase_scope(stats, "allocate double registers");
-    AllocateDoubleRegisters();
-  }
-  if (!AllocationOk()) return false;
-  {
-    PhaseScope phase_scope(stats, "populate pointer maps");
-    PopulatePointerMaps();
-  }
-  {
-    PhaseScope phase_scope(stats, "connect ranges");
-    ConnectRanges();
-  }
-  {
-    PhaseScope phase_scope(stats, "resolve control flow");
-    ResolveControlFlow();
-  }
-  frame()->SetAllocatedRegisters(assigned_registers_);
-  frame()->SetAllocatedDoubleRegisters(assigned_double_registers_);
-  return true;
-}
-
-
 void RegisterAllocator::MeetRegisterConstraints() {
   for (auto block : code()->instruction_blocks()) {
     MeetRegisterConstraints(block);
-    if (!AllocationOk()) return;
   }
 }
 
@@ -1183,7 +1400,7 @@ ParallelMove* RegisterAllocator::GetConnectingParallelMove(
     LifetimePosition pos) {
   int index = pos.InstructionIndex();
   if (code()->IsGapAt(index)) {
-    GapInstruction* gap = code()->GapAt(index);
+    auto gap = code()->GapAt(index);
     return gap->GetOrCreateParallelMove(
         pos.IsInstructionStart() ? GapInstruction::START : GapInstruction::END,
         code_zone());
@@ -1202,14 +1419,11 @@ const InstructionBlock* RegisterAllocator::GetInstructionBlock(
 
 
 void RegisterAllocator::ConnectRanges() {
-  for (int i = 0; i < live_ranges().length(); ++i) {
-    LiveRange* first_range = live_ranges().at(i);
-    if (first_range == NULL || first_range->parent() != NULL) continue;
-
-    LiveRange* second_range = first_range->next();
-    while (second_range != NULL) {
-      LifetimePosition pos = second_range->Start();
-
+  for (auto first_range : live_ranges()) {
+    if (first_range == nullptr || first_range->IsChild()) continue;
+    auto second_range = first_range->next();
+    while (second_range != nullptr) {
+      auto pos = second_range->Start();
       if (!second_range->IsSpilled()) {
         // Add gap move if the two live ranges touch and there is no block
         // boundary.
@@ -1220,16 +1434,13 @@ void RegisterAllocator::ConnectRanges() {
                 CanEagerlyResolveControlFlow(GetInstructionBlock(pos));
           }
           if (should_insert) {
-            ParallelMove* move = GetConnectingParallelMove(pos);
-            InstructionOperand* prev_operand =
-                first_range->CreateAssignedOperand(code_zone());
-            InstructionOperand* cur_operand =
-                second_range->CreateAssignedOperand(code_zone());
+            auto move = GetConnectingParallelMove(pos);
+            auto prev_operand = first_range->CreateAssignedOperand(code_zone());
+            auto cur_operand = second_range->CreateAssignedOperand(code_zone());
             move->AddMove(prev_operand, cur_operand, code_zone());
           }
         }
       }
-
       first_range = second_range;
       second_range = second_range->next();
     }
@@ -1277,15 +1488,15 @@ class LiveRangeBoundArray {
  public:
   LiveRangeBoundArray() : length_(0), start_(nullptr) {}
 
-  bool ShouldInitialize() { return start_ == NULL; }
+  bool ShouldInitialize() { return start_ == nullptr; }
 
   void Initialize(Zone* zone, const LiveRange* const range) {
     size_t length = 0;
-    for (const LiveRange* i = range; i != NULL; i = i->next()) length++;
+    for (auto i = range; i != nullptr; i = i->next()) length++;
     start_ = zone->NewArray<LiveRangeBound>(static_cast<int>(length));
     length_ = length;
-    LiveRangeBound* curr = start_;
-    for (const LiveRange* i = range; i != NULL; i = i->next(), ++curr) {
+    auto curr = start_;
+    for (auto i = range; i != nullptr; i = i->next(), ++curr) {
       new (curr) LiveRangeBound(i);
     }
   }
@@ -1296,7 +1507,7 @@ class LiveRangeBoundArray {
     while (true) {
       size_t current_index = left_index + (right_index - left_index) / 2;
       DCHECK(right_index > current_index);
-      LiveRangeBound* bound = &start_[current_index];
+      auto bound = &start_[current_index];
       if (bound->start_.Value() <= position.Value()) {
         if (position.Value() < bound->end_.Value()) return bound;
         DCHECK(left_index < current_index);
@@ -1307,13 +1518,25 @@ class LiveRangeBoundArray {
     }
   }
 
+  LiveRangeBound* FindPred(const InstructionBlock* pred) {
+    auto pred_end =
+        LifetimePosition::FromInstructionIndex(pred->last_instruction_index());
+    return Find(pred_end);
+  }
+
+  LiveRangeBound* FindSucc(const InstructionBlock* succ) {
+    auto succ_start =
+        LifetimePosition::FromInstructionIndex(succ->first_instruction_index());
+    return Find(succ_start);
+  }
+
   void Find(const InstructionBlock* block, const InstructionBlock* pred,
             FindResult* result) const {
-    const LifetimePosition pred_end =
+    auto pred_end =
         LifetimePosition::FromInstructionIndex(pred->last_instruction_index());
-    LiveRangeBound* bound = Find(pred_end);
+    auto bound = Find(pred_end);
     result->pred_cover_ = bound->range_;
-    const LifetimePosition cur_start = LifetimePosition::FromInstructionIndex(
+    auto cur_start = LifetimePosition::FromInstructionIndex(
         block->first_instruction_index());
     // Common case.
     if (bound->CanCover(cur_start)) {
@@ -1321,7 +1544,7 @@ class LiveRangeBoundArray {
       return;
     }
     result->cur_cover_ = Find(cur_start)->range_;
-    DCHECK(result->pred_cover_ != NULL && result->cur_cover_ != NULL);
+    DCHECK(result->pred_cover_ != nullptr && result->cur_cover_ != nullptr);
   }
 
  private:
@@ -1336,7 +1559,7 @@ class LiveRangeFinder {
  public:
   explicit LiveRangeFinder(const RegisterAllocator& allocator)
       : allocator_(allocator),
-        bounds_length_(allocator.live_ranges().length()),
+        bounds_length_(static_cast<int>(allocator.live_ranges().size())),
         bounds_(allocator.local_zone()->NewArray<LiveRangeBoundArray>(
             bounds_length_)) {
     for (int i = 0; i < bounds_length_; ++i) {
@@ -1346,9 +1569,9 @@ class LiveRangeFinder {
 
   LiveRangeBoundArray* ArrayFor(int operand_index) {
     DCHECK(operand_index < bounds_length_);
-    const LiveRange* range = allocator_.live_ranges()[operand_index];
+    auto range = allocator_.live_ranges()[operand_index];
     DCHECK(range != nullptr && !range->IsEmpty());
-    LiveRangeBoundArray* array = &bounds_[operand_index];
+    auto array = &bounds_[operand_index];
     if (array->ShouldInitialize()) {
       array->Initialize(allocator_.local_zone(), range);
     }
@@ -1371,19 +1594,40 @@ void RegisterAllocator::ResolveControlFlow() {
   LiveRangeFinder finder(*this);
   for (auto block : code()->instruction_blocks()) {
     if (CanEagerlyResolveControlFlow(block)) continue;
-    BitVector* live = live_in_sets_[block->rpo_number().ToInt()];
+    if (FLAG_turbo_delay_ssa_decon) {
+      // resolve phis
+      for (auto phi : block->phis()) {
+        auto* block_bound =
+            finder.ArrayFor(phi->virtual_register())->FindSucc(block);
+        auto phi_output =
+            block_bound->range_->CreateAssignedOperand(code_zone());
+        phi->output()->ConvertTo(phi_output->kind(), phi_output->index());
+        size_t pred_index = 0;
+        for (auto pred : block->predecessors()) {
+          const InstructionBlock* pred_block = code()->InstructionBlockAt(pred);
+          auto* pred_bound = finder.ArrayFor(phi->operands()[pred_index])
+                                 ->FindPred(pred_block);
+          auto pred_op = pred_bound->range_->CreateAssignedOperand(code_zone());
+          phi->inputs()[pred_index] = pred_op;
+          ResolveControlFlow(block, phi_output, pred_block, pred_op);
+          pred_index++;
+        }
+      }
+    }
+    auto live = live_in_sets_[block->rpo_number().ToInt()];
     BitVector::Iterator iterator(live);
     while (!iterator.Done()) {
-      LiveRangeBoundArray* array = finder.ArrayFor(iterator.Current());
+      auto* array = finder.ArrayFor(iterator.Current());
       for (auto pred : block->predecessors()) {
         FindResult result;
-        const InstructionBlock* pred_block = code()->InstructionBlockAt(pred);
+        const auto* pred_block = code()->InstructionBlockAt(pred);
         array->Find(block, pred_block, &result);
         if (result.cur_cover_ == result.pred_cover_ ||
             result.cur_cover_->IsSpilled())
           continue;
-        ResolveControlFlow(block, result.cur_cover_, pred_block,
-                           result.pred_cover_);
+        auto pred_op = result.pred_cover_->CreateAssignedOperand(code_zone());
+        auto cur_op = result.cur_cover_->CreateAssignedOperand(code_zone());
+        ResolveControlFlow(block, cur_op, pred_block, pred_op);
       }
       iterator.Advance();
     }
@@ -1392,37 +1636,32 @@ void RegisterAllocator::ResolveControlFlow() {
 
 
 void RegisterAllocator::ResolveControlFlow(const InstructionBlock* block,
-                                           const LiveRange* cur_cover,
+                                           InstructionOperand* cur_op,
                                            const InstructionBlock* pred,
-                                           const LiveRange* pred_cover) {
-  InstructionOperand* pred_op = pred_cover->CreateAssignedOperand(code_zone());
-  InstructionOperand* cur_op = cur_cover->CreateAssignedOperand(code_zone());
-  if (!pred_op->Equals(cur_op)) {
-    GapInstruction* gap = NULL;
-    if (block->PredecessorCount() == 1) {
-      gap = code()->GapAt(block->first_instruction_index());
-    } else {
-      DCHECK(pred->SuccessorCount() == 1);
-      gap = GetLastGap(pred);
-
-      Instruction* branch = InstructionAt(pred->last_instruction_index());
-      DCHECK(!branch->HasPointerMap());
-      USE(branch);
-    }
-    gap->GetOrCreateParallelMove(GapInstruction::START, code_zone())
-        ->AddMove(pred_op, cur_op, code_zone());
+                                           InstructionOperand* pred_op) {
+  if (pred_op->Equals(cur_op)) return;
+  int gap_index;
+  GapInstruction::InnerPosition position;
+  if (block->PredecessorCount() == 1) {
+    gap_index = block->first_instruction_index();
+    position = GapInstruction::START;
+  } else {
+    DCHECK(pred->SuccessorCount() == 1);
+    DCHECK(!InstructionAt(pred->last_instruction_index())->HasPointerMap());
+    gap_index = pred->last_instruction_index() - 1;
+    position = GapInstruction::END;
   }
+  AddGapMove(gap_index, position, pred_op, cur_op);
 }
 
 
 void RegisterAllocator::BuildLiveRanges() {
-  InitializeLivenessAnalysis();
   // Process the blocks in reverse order.
   for (int block_id = code()->InstructionBlockCount() - 1; block_id >= 0;
        --block_id) {
-    const InstructionBlock* block =
+    auto block =
         code()->InstructionBlockAt(BasicBlock::RpoNumber::FromInt(block_id));
-    BitVector* live = ComputeLiveOut(block);
+    auto live = ComputeLiveOut(block);
     // Initially consider all live_out values live for the entire block. We
     // will shorten these intervals if necessary.
     AddInitialIntervals(block, live);
@@ -1436,29 +1675,27 @@ void RegisterAllocator::BuildLiveRanges() {
       // block.
       int phi_vreg = phi->virtual_register();
       live->Remove(phi_vreg);
-
-      InstructionOperand* hint = NULL;
-      InstructionOperand* phi_operand = NULL;
-      GapInstruction* gap =
-          GetLastGap(code()->InstructionBlockAt(block->predecessors()[0]));
-
-      // TODO(titzer): no need to create the parallel move if it doesn't exit.
-      ParallelMove* move =
-          gap->GetOrCreateParallelMove(GapInstruction::START, code_zone());
-      for (int j = 0; j < move->move_operands()->length(); ++j) {
-        InstructionOperand* to = move->move_operands()->at(j).destination();
-        if (to->IsUnallocated() &&
-            UnallocatedOperand::cast(to)->virtual_register() == phi_vreg) {
-          hint = move->move_operands()->at(j).source();
-          phi_operand = to;
-          break;
+      if (!FLAG_turbo_delay_ssa_decon) {
+        InstructionOperand* hint = nullptr;
+        InstructionOperand* phi_operand = nullptr;
+        auto gap =
+            GetLastGap(code()->InstructionBlockAt(block->predecessors()[0]));
+        auto move =
+            gap->GetOrCreateParallelMove(GapInstruction::END, code_zone());
+        for (int j = 0; j < move->move_operands()->length(); ++j) {
+          auto to = move->move_operands()->at(j).destination();
+          if (to->IsUnallocated() &&
+              UnallocatedOperand::cast(to)->virtual_register() == phi_vreg) {
+            hint = move->move_operands()->at(j).source();
+            phi_operand = to;
+            break;
+          }
         }
+        DCHECK(hint != nullptr);
+        auto block_start = LifetimePosition::FromInstructionIndex(
+            block->first_instruction_index());
+        Define(block_start, phi_operand, hint);
       }
-      DCHECK(hint != NULL);
-
-      LifetimePosition block_start = LifetimePosition::FromInstructionIndex(
-          block->first_instruction_index());
-      Define(block_start, phi_operand, hint);
     }
 
     // Now live is live_in for this block except not including values live
@@ -1469,82 +1706,71 @@ void RegisterAllocator::BuildLiveRanges() {
       // Add a live range stretching from the first loop instruction to the last
       // for each value live on entry to the header.
       BitVector::Iterator iterator(live);
-      LifetimePosition start = LifetimePosition::FromInstructionIndex(
+      auto start = LifetimePosition::FromInstructionIndex(
           block->first_instruction_index());
-      LifetimePosition end =
-          LifetimePosition::FromInstructionIndex(
-              code()->LastLoopInstructionIndex(block)).NextInstruction();
+      auto end = LifetimePosition::FromInstructionIndex(
+                     code()->LastLoopInstructionIndex(block)).NextInstruction();
       while (!iterator.Done()) {
         int operand_index = iterator.Current();
-        LiveRange* range = LiveRangeFor(operand_index);
+        auto range = LiveRangeFor(operand_index);
         range->EnsureInterval(start, end, local_zone());
         iterator.Advance();
       }
-
       // Insert all values into the live in sets of all blocks in the loop.
       for (int i = block->rpo_number().ToInt() + 1;
            i < block->loop_end().ToInt(); ++i) {
         live_in_sets_[i]->Union(*live);
       }
     }
+  }
 
-#ifdef DEBUG
-    if (block_id == 0) {
-      BitVector::Iterator iterator(live);
-      bool found = false;
-      while (!iterator.Done()) {
-        found = true;
-        int operand_index = iterator.Current();
-        PrintF("Register allocator error: live v%d reached first block.\n",
-               operand_index);
-        LiveRange* range = LiveRangeFor(operand_index);
-        PrintF("  (first use is at %d)\n", range->first_pos()->pos().Value());
-        if (debug_name() == nullptr) {
-          PrintF("\n");
-        } else {
-          PrintF("  (function: %s)\n", debug_name());
+  for (auto range : live_ranges()) {
+    if (range == nullptr) continue;
+    range->kind_ = RequiredRegisterKind(range->id());
+    // TODO(bmeurer): This is a horrible hack to make sure that for constant
+    // live ranges, every use requires the constant to be in a register.
+    // Without this hack, all uses with "any" policy would get the constant
+    // operand assigned.
+    if (range->HasSpillOperand() && range->GetSpillOperand()->IsConstant()) {
+      for (auto pos = range->first_pos(); pos != nullptr; pos = pos->next_) {
+        pos->register_beneficial_ = true;
+        // TODO(dcarney): should the else case assert requires_reg_ == false?
+        // Can't mark phis as needing a register.
+        if (!code()
+                 ->InstructionAt(pos->pos().InstructionIndex())
+                 ->IsGapMoves()) {
+          pos->requires_reg_ = true;
         }
-        iterator.Advance();
       }
-      DCHECK(!found);
     }
-#endif
   }
+}
 
-  for (int i = 0; i < live_ranges_.length(); ++i) {
-    if (live_ranges_[i] != NULL) {
-      live_ranges_[i]->kind_ = RequiredRegisterKind(live_ranges_[i]->id());
-
-      // TODO(bmeurer): This is a horrible hack to make sure that for constant
-      // live ranges, every use requires the constant to be in a register.
-      // Without this hack, all uses with "any" policy would get the constant
-      // operand assigned.
-      LiveRange* range = live_ranges_[i];
-      if (range->HasAllocatedSpillOperand() &&
-          range->GetSpillOperand()->IsConstant()) {
-        for (UsePosition* pos = range->first_pos(); pos != NULL;
-             pos = pos->next_) {
-          pos->register_beneficial_ = true;
-          // TODO(dcarney): should the else case assert requires_reg_ == false?
-          // Can't mark phis as needing a register.
-          if (!code()
-                   ->InstructionAt(pos->pos().InstructionIndex())
-                   ->IsGapMoves()) {
-            pos->requires_reg_ = true;
-          }
-        }
-      }
+
+bool RegisterAllocator::ExistsUseWithoutDefinition() {
+  bool found = false;
+  BitVector::Iterator iterator(live_in_sets_[0]);
+  while (!iterator.Done()) {
+    found = true;
+    int operand_index = iterator.Current();
+    PrintF("Register allocator error: live v%d reached first block.\n",
+           operand_index);
+    LiveRange* range = LiveRangeFor(operand_index);
+    PrintF("  (first use is at %d)\n", range->first_pos()->pos().Value());
+    if (debug_name() == nullptr) {
+      PrintF("\n");
+    } else {
+      PrintF("  (function: %s)\n", debug_name());
     }
+    iterator.Advance();
   }
+  return found;
 }
 
 
 bool RegisterAllocator::SafePointsAreInOrder() const {
   int safe_point = 0;
-  const PointerMapDeque* pointer_maps = code()->pointer_maps();
-  for (PointerMapDeque::const_iterator it = pointer_maps->begin();
-       it != pointer_maps->end(); ++it) {
-    PointerMap* map = *it;
+  for (auto map : *code()->pointer_maps()) {
     if (safe_point > map->instruction_position()) return false;
     safe_point = map->instruction_position();
   }
@@ -1558,13 +1784,12 @@ void RegisterAllocator::PopulatePointerMaps() {
   // Iterate over all safe point positions and record a pointer
   // for all spilled live ranges at this point.
   int last_range_start = 0;
-  const PointerMapDeque* pointer_maps = code()->pointer_maps();
+  auto pointer_maps = code()->pointer_maps();
   PointerMapDeque::const_iterator first_it = pointer_maps->begin();
-  for (int range_idx = 0; range_idx < live_ranges().length(); ++range_idx) {
-    LiveRange* range = live_ranges().at(range_idx);
-    if (range == NULL) continue;
+  for (LiveRange* range : live_ranges()) {
+    if (range == nullptr) continue;
     // Iterate over the first parts of multi-part live ranges.
-    if (range->parent() != NULL) continue;
+    if (range->IsChild()) continue;
     // Skip non-reference values.
     if (!HasTaggedValue(range->id())) continue;
     // Skip empty live ranges.
@@ -1573,8 +1798,8 @@ void RegisterAllocator::PopulatePointerMaps() {
     // Find the extent of the range and its children.
     int start = range->Start().InstructionIndex();
     int end = 0;
-    for (LiveRange* cur = range; cur != NULL; cur = cur->next()) {
-      LifetimePosition this_end = cur->End();
+    for (auto cur = range; cur != nullptr; cur = cur->next()) {
+      auto this_end = cur->End();
       if (this_end.InstructionIndex() > end) end = this_end.InstructionIndex();
       DCHECK(cur->Start().InstructionIndex() >= start);
     }
@@ -1587,14 +1812,13 @@ void RegisterAllocator::PopulatePointerMaps() {
     // Step across all the safe points that are before the start of this range,
     // recording how far we step in order to save doing this for the next range.
     for (; first_it != pointer_maps->end(); ++first_it) {
-      PointerMap* map = *first_it;
+      auto map = *first_it;
       if (map->instruction_position() >= start) break;
     }
 
     // Step through the safe points to see whether they are in the range.
-    for (PointerMapDeque::const_iterator it = first_it;
-         it != pointer_maps->end(); ++it) {
-      PointerMap* map = *it;
+    for (auto it = first_it; it != pointer_maps->end(); ++it) {
+      auto map = *it;
       int safe_point = map->instruction_position();
 
       // The safe points are sorted so we can stop searching here.
@@ -1602,17 +1826,16 @@ void RegisterAllocator::PopulatePointerMaps() {
 
       // Advance to the next active range that covers the current
       // safe point position.
-      LifetimePosition safe_point_pos =
-          LifetimePosition::FromInstructionIndex(safe_point);
-      LiveRange* cur = range;
-      while (cur != NULL && !cur->Covers(safe_point_pos)) {
+      auto safe_point_pos = LifetimePosition::FromInstructionIndex(safe_point);
+      auto cur = range;
+      while (cur != nullptr && !cur->Covers(safe_point_pos)) {
         cur = cur->next();
       }
-      if (cur == NULL) continue;
+      if (cur == nullptr) continue;
 
       // Check if the live range is spilled and the safe point is after
       // the spill position.
-      if (range->HasAllocatedSpillOperand() &&
+      if (range->HasSpillOperand() &&
           safe_point >= range->spill_start_index() &&
           !range->GetSpillOperand()->IsConstant()) {
         TraceAlloc("Pointer for range %d (spilled at %d) at safe point %d\n",
@@ -1649,59 +1872,59 @@ void RegisterAllocator::AllocateDoubleRegisters() {
 
 
 void RegisterAllocator::AllocateRegisters() {
-  DCHECK(unhandled_live_ranges_.is_empty());
+  DCHECK(unhandled_live_ranges().empty());
 
-  for (int i = 0; i < live_ranges_.length(); ++i) {
-    if (live_ranges_[i] != NULL) {
-      if (live_ranges_[i]->Kind() == mode_) {
-        AddToUnhandledUnsorted(live_ranges_[i]);
-      }
+  for (auto range : live_ranges()) {
+    if (range == nullptr) continue;
+    if (range->Kind() == mode_) {
+      AddToUnhandledUnsorted(range);
     }
   }
   SortUnhandled();
   DCHECK(UnhandledIsSorted());
 
-  DCHECK(reusable_slots_.is_empty());
-  DCHECK(active_live_ranges_.is_empty());
-  DCHECK(inactive_live_ranges_.is_empty());
+  DCHECK(reusable_slots().empty());
+  DCHECK(active_live_ranges().empty());
+  DCHECK(inactive_live_ranges().empty());
 
   if (mode_ == DOUBLE_REGISTERS) {
     for (int i = 0; i < config()->num_aliased_double_registers(); ++i) {
-      LiveRange* current = fixed_double_live_ranges_.at(i);
-      if (current != NULL) {
+      auto current = fixed_double_live_ranges()[i];
+      if (current != nullptr) {
         AddToInactive(current);
       }
     }
   } else {
     DCHECK(mode_ == GENERAL_REGISTERS);
     for (auto current : fixed_live_ranges()) {
-      if (current != NULL) {
+      if (current != nullptr) {
         AddToInactive(current);
       }
     }
   }
 
-  while (!unhandled_live_ranges_.is_empty()) {
+  while (!unhandled_live_ranges().empty()) {
     DCHECK(UnhandledIsSorted());
-    LiveRange* current = unhandled_live_ranges_.RemoveLast();
+    auto current = unhandled_live_ranges().back();
+    unhandled_live_ranges().pop_back();
     DCHECK(UnhandledIsSorted());
-    LifetimePosition position = current->Start();
+    auto position = current->Start();
 #ifdef DEBUG
     allocation_finger_ = position;
 #endif
     TraceAlloc("Processing interval %d start=%d\n", current->id(),
                position.Value());
 
-    if (current->HasAllocatedSpillOperand()) {
+    if (!current->HasNoSpillType()) {
       TraceAlloc("Live range %d already has a spill operand\n", current->id());
-      LifetimePosition next_pos = position;
+      auto next_pos = position;
       if (code()->IsGapAt(next_pos.InstructionIndex())) {
         next_pos = next_pos.NextInstruction();
       }
-      UsePosition* pos = current->NextUsePositionRegisterIsBeneficial(next_pos);
+      auto pos = current->NextUsePositionRegisterIsBeneficial(next_pos);
       // If the range already has a spill operand and it doesn't need a
       // register immediately, split it and spill the first part of the range.
-      if (pos == NULL) {
+      if (pos == nullptr) {
         Spill(current);
         continue;
       } else if (pos->pos().Value() >
@@ -1715,8 +1938,15 @@ void RegisterAllocator::AllocateRegisters() {
       }
     }
 
-    for (int i = 0; i < active_live_ranges_.length(); ++i) {
-      LiveRange* cur_active = active_live_ranges_.at(i);
+    if (FLAG_turbo_reuse_spill_slots) {
+      if (TryReuseSpillForPhi(current)) {
+        continue;
+      }
+      if (!AllocationOk()) return;
+    }
+
+    for (size_t i = 0; i < active_live_ranges().size(); ++i) {
+      auto cur_active = active_live_ranges()[i];
       if (cur_active->End().Value() <= position.Value()) {
         ActiveToHandled(cur_active);
         --i;  // The live range was removed from the list of active live ranges.
@@ -1726,8 +1956,8 @@ void RegisterAllocator::AllocateRegisters() {
       }
     }
 
-    for (int i = 0; i < inactive_live_ranges_.length(); ++i) {
-      LiveRange* cur_inactive = inactive_live_ranges_.at(i);
+    for (size_t i = 0; i < inactive_live_ranges().size(); ++i) {
+      auto cur_inactive = inactive_live_ranges()[i];
       if (cur_inactive->End().Value() <= position.Value()) {
         InactiveToHandled(cur_inactive);
         --i;  // Live range was removed from the list of inactive live ranges.
@@ -1750,9 +1980,9 @@ void RegisterAllocator::AllocateRegisters() {
     }
   }
 
-  reusable_slots_.Rewind(0);
-  active_live_ranges_.Rewind(0);
-  inactive_live_ranges_.Rewind(0);
+  reusable_slots().clear();
+  active_live_ranges().clear();
+  inactive_live_ranges().clear();
 }
 
 
@@ -1779,49 +2009,49 @@ RegisterKind RegisterAllocator::RequiredRegisterKind(
 
 void RegisterAllocator::AddToActive(LiveRange* range) {
   TraceAlloc("Add live range %d to active\n", range->id());
-  active_live_ranges_.Add(range, local_zone());
+  active_live_ranges().push_back(range);
 }
 
 
 void RegisterAllocator::AddToInactive(LiveRange* range) {
   TraceAlloc("Add live range %d to inactive\n", range->id());
-  inactive_live_ranges_.Add(range, local_zone());
+  inactive_live_ranges().push_back(range);
 }
 
 
 void RegisterAllocator::AddToUnhandledSorted(LiveRange* range) {
-  if (range == NULL || range->IsEmpty()) return;
+  if (range == nullptr || range->IsEmpty()) return;
   DCHECK(!range->HasRegisterAssigned() && !range->IsSpilled());
   DCHECK(allocation_finger_.Value() <= range->Start().Value());
-  for (int i = unhandled_live_ranges_.length() - 1; i >= 0; --i) {
-    LiveRange* cur_range = unhandled_live_ranges_.at(i);
-    if (range->ShouldBeAllocatedBefore(cur_range)) {
-      TraceAlloc("Add live range %d to unhandled at %d\n", range->id(), i + 1);
-      unhandled_live_ranges_.InsertAt(i + 1, range, local_zone());
-      DCHECK(UnhandledIsSorted());
-      return;
-    }
+  for (int i = static_cast<int>(unhandled_live_ranges().size() - 1); i >= 0;
+       --i) {
+    auto cur_range = unhandled_live_ranges().at(i);
+    if (!range->ShouldBeAllocatedBefore(cur_range)) continue;
+    TraceAlloc("Add live range %d to unhandled at %d\n", range->id(), i + 1);
+    auto it = unhandled_live_ranges().begin() + (i + 1);
+    unhandled_live_ranges().insert(it, range);
+    DCHECK(UnhandledIsSorted());
+    return;
   }
   TraceAlloc("Add live range %d to unhandled at start\n", range->id());
-  unhandled_live_ranges_.InsertAt(0, range, local_zone());
+  unhandled_live_ranges().insert(unhandled_live_ranges().begin(), range);
   DCHECK(UnhandledIsSorted());
 }
 
 
 void RegisterAllocator::AddToUnhandledUnsorted(LiveRange* range) {
-  if (range == NULL || range->IsEmpty()) return;
+  if (range == nullptr || range->IsEmpty()) return;
   DCHECK(!range->HasRegisterAssigned() && !range->IsSpilled());
   TraceAlloc("Add live range %d to unhandled unsorted at end\n", range->id());
-  unhandled_live_ranges_.Add(range, local_zone());
+  unhandled_live_ranges().push_back(range);
 }
 
 
-static int UnhandledSortHelper(LiveRange* const* a, LiveRange* const* b) {
-  DCHECK(!(*a)->ShouldBeAllocatedBefore(*b) ||
-         !(*b)->ShouldBeAllocatedBefore(*a));
-  if ((*a)->ShouldBeAllocatedBefore(*b)) return 1;
-  if ((*b)->ShouldBeAllocatedBefore(*a)) return -1;
-  return (*a)->id() - (*b)->id();
+static bool UnhandledSortHelper(LiveRange* a, LiveRange* b) {
+  DCHECK(!a->ShouldBeAllocatedBefore(b) || !b->ShouldBeAllocatedBefore(a));
+  if (a->ShouldBeAllocatedBefore(b)) return false;
+  if (b->ShouldBeAllocatedBefore(a)) return true;
+  return a->id() < b->id();
 }
 
 
@@ -1830,15 +2060,16 @@ static int UnhandledSortHelper(LiveRange* const* a, LiveRange* const* b) {
 // algorithm because it is efficient to remove elements from the end.
 void RegisterAllocator::SortUnhandled() {
   TraceAlloc("Sort unhandled\n");
-  unhandled_live_ranges_.Sort(&UnhandledSortHelper);
+  std::sort(unhandled_live_ranges().begin(), unhandled_live_ranges().end(),
+            &UnhandledSortHelper);
 }
 
 
 bool RegisterAllocator::UnhandledIsSorted() {
-  int len = unhandled_live_ranges_.length();
-  for (int i = 1; i < len; i++) {
-    LiveRange* a = unhandled_live_ranges_.at(i - 1);
-    LiveRange* b = unhandled_live_ranges_.at(i);
+  size_t len = unhandled_live_ranges().size();
+  for (size_t i = 1; i < len; i++) {
+    auto a = unhandled_live_ranges().at(i - 1);
+    auto b = unhandled_live_ranges().at(i);
     if (a->Start().Value() < b->Start().Value()) return false;
   }
   return true;
@@ -1846,60 +2077,55 @@ bool RegisterAllocator::UnhandledIsSorted() {
 
 
 void RegisterAllocator::FreeSpillSlot(LiveRange* range) {
+  DCHECK(!FLAG_turbo_reuse_spill_slots);
   // Check that we are the last range.
-  if (range->next() != NULL) return;
-
-  if (!range->TopLevel()->HasAllocatedSpillOperand()) return;
-
-  InstructionOperand* spill_operand = range->TopLevel()->GetSpillOperand();
+  if (range->next() != nullptr) return;
+  if (!range->TopLevel()->HasSpillOperand()) return;
+  auto spill_operand = range->TopLevel()->GetSpillOperand();
   if (spill_operand->IsConstant()) return;
   if (spill_operand->index() >= 0) {
-    reusable_slots_.Add(range, local_zone());
+    reusable_slots().push_back(range);
   }
 }
 
 
 InstructionOperand* RegisterAllocator::TryReuseSpillSlot(LiveRange* range) {
-  if (reusable_slots_.is_empty()) return NULL;
-  if (reusable_slots_.first()->End().Value() >
+  DCHECK(!FLAG_turbo_reuse_spill_slots);
+  if (reusable_slots().empty()) return nullptr;
+  if (reusable_slots().front()->End().Value() >
       range->TopLevel()->Start().Value()) {
-    return NULL;
+    return nullptr;
   }
-  InstructionOperand* result =
-      reusable_slots_.first()->TopLevel()->GetSpillOperand();
-  reusable_slots_.Remove(0);
+  auto result = reusable_slots().front()->TopLevel()->GetSpillOperand();
+  reusable_slots().erase(reusable_slots().begin());
   return result;
 }
 
 
 void RegisterAllocator::ActiveToHandled(LiveRange* range) {
-  DCHECK(active_live_ranges_.Contains(range));
-  active_live_ranges_.RemoveElement(range);
+  RemoveElement(&active_live_ranges(), range);
   TraceAlloc("Moving live range %d from active to handled\n", range->id());
-  FreeSpillSlot(range);
+  if (!FLAG_turbo_reuse_spill_slots) FreeSpillSlot(range);
 }
 
 
 void RegisterAllocator::ActiveToInactive(LiveRange* range) {
-  DCHECK(active_live_ranges_.Contains(range));
-  active_live_ranges_.RemoveElement(range);
-  inactive_live_ranges_.Add(range, local_zone());
+  RemoveElement(&active_live_ranges(), range);
+  inactive_live_ranges().push_back(range);
   TraceAlloc("Moving live range %d from active to inactive\n", range->id());
 }
 
 
 void RegisterAllocator::InactiveToHandled(LiveRange* range) {
-  DCHECK(inactive_live_ranges_.Contains(range));
-  inactive_live_ranges_.RemoveElement(range);
+  RemoveElement(&inactive_live_ranges(), range);
   TraceAlloc("Moving live range %d from inactive to handled\n", range->id());
-  FreeSpillSlot(range);
+  if (!FLAG_turbo_reuse_spill_slots) FreeSpillSlot(range);
 }
 
 
 void RegisterAllocator::InactiveToActive(LiveRange* range) {
-  DCHECK(inactive_live_ranges_.Contains(range));
-  inactive_live_ranges_.RemoveElement(range);
-  active_live_ranges_.Add(range, local_zone());
+  RemoveElement(&inactive_live_ranges(), range);
+  active_live_ranges().push_back(range);
   TraceAlloc("Moving live range %d from inactive to active\n", range->id());
 }
 
@@ -1911,24 +2137,21 @@ bool RegisterAllocator::TryAllocateFreeReg(LiveRange* current) {
     free_until_pos[i] = LifetimePosition::MaxPosition();
   }
 
-  for (int i = 0; i < active_live_ranges_.length(); ++i) {
-    LiveRange* cur_active = active_live_ranges_.at(i);
+  for (auto cur_active : active_live_ranges()) {
     free_until_pos[cur_active->assigned_register()] =
         LifetimePosition::FromInstructionIndex(0);
   }
 
-  for (int i = 0; i < inactive_live_ranges_.length(); ++i) {
-    LiveRange* cur_inactive = inactive_live_ranges_.at(i);
+  for (auto cur_inactive : inactive_live_ranges()) {
     DCHECK(cur_inactive->End().Value() > current->Start().Value());
-    LifetimePosition next_intersection =
-        cur_inactive->FirstIntersection(current);
+    auto next_intersection = cur_inactive->FirstIntersection(current);
     if (!next_intersection.IsValid()) continue;
     int cur_reg = cur_inactive->assigned_register();
     free_until_pos[cur_reg] = Min(free_until_pos[cur_reg], next_intersection);
   }
 
-  InstructionOperand* hint = current->FirstHint();
-  if (hint != NULL && (hint->IsRegister() || hint->IsDoubleRegister())) {
+  auto hint = current->FirstHint();
+  if (hint != nullptr && (hint->IsRegister() || hint->IsDoubleRegister())) {
     int register_index = hint->index();
     TraceAlloc(
         "Found reg hint %s (free until [%d) for live range %d (end %d[).\n",
@@ -1952,7 +2175,7 @@ bool RegisterAllocator::TryAllocateFreeReg(LiveRange* current) {
     }
   }
 
-  LifetimePosition pos = free_until_pos[reg];
+  auto pos = free_until_pos[reg];
 
   if (pos.Value() <= current->Start().Value()) {
     // All registers are blocked.
@@ -1962,12 +2185,11 @@ bool RegisterAllocator::TryAllocateFreeReg(LiveRange* current) {
   if (pos.Value() < current->End().Value()) {
     // Register reg is available at the range start but becomes blocked before
     // the range end. Split current at position where it becomes blocked.
-    LiveRange* tail = SplitRangeAt(current, pos);
+    auto tail = SplitRangeAt(current, pos);
     if (!AllocationOk()) return false;
     AddToUnhandledSorted(tail);
   }
 
-
   // Register reg is available at the range start and is free until
   // the range end.
   DCHECK(pos.Value() >= current->End().Value());
@@ -1980,31 +2202,30 @@ bool RegisterAllocator::TryAllocateFreeReg(LiveRange* current) {
 
 
 void RegisterAllocator::AllocateBlockedReg(LiveRange* current) {
-  UsePosition* register_use = current->NextRegisterPosition(current->Start());
-  if (register_use == NULL) {
+  auto register_use = current->NextRegisterPosition(current->Start());
+  if (register_use == nullptr) {
     // There is no use in the current live range that requires a register.
     // We can just spill it.
     Spill(current);
     return;
   }
 
-  LifetimePosition use_pos[RegisterConfiguration::kMaxGeneralRegisters];
+  LifetimePosition use_pos[RegisterConfiguration::kMaxDoubleRegisters];
   LifetimePosition block_pos[RegisterConfiguration::kMaxDoubleRegisters];
 
   for (int i = 0; i < num_registers_; i++) {
     use_pos[i] = block_pos[i] = LifetimePosition::MaxPosition();
   }
 
-  for (int i = 0; i < active_live_ranges_.length(); ++i) {
-    LiveRange* range = active_live_ranges_[i];
+  for (auto range : active_live_ranges()) {
     int cur_reg = range->assigned_register();
     if (range->IsFixed() || !range->CanBeSpilled(current->Start())) {
       block_pos[cur_reg] = use_pos[cur_reg] =
           LifetimePosition::FromInstructionIndex(0);
     } else {
-      UsePosition* next_use =
+      auto next_use =
           range->NextUsePositionRegisterIsBeneficial(current->Start());
-      if (next_use == NULL) {
+      if (next_use == nullptr) {
         use_pos[cur_reg] = range->End();
       } else {
         use_pos[cur_reg] = next_use->pos();
@@ -2012,10 +2233,9 @@ void RegisterAllocator::AllocateBlockedReg(LiveRange* current) {
     }
   }
 
-  for (int i = 0; i < inactive_live_ranges_.length(); ++i) {
-    LiveRange* range = inactive_live_ranges_.at(i);
+  for (auto range : inactive_live_ranges()) {
     DCHECK(range->End().Value() > current->Start().Value());
-    LifetimePosition next_intersection = range->FirstIntersection(current);
+    auto next_intersection = range->FirstIntersection(current);
     if (!next_intersection.IsValid()) continue;
     int cur_reg = range->assigned_register();
     if (range->IsFixed()) {
@@ -2033,7 +2253,7 @@ void RegisterAllocator::AllocateBlockedReg(LiveRange* current) {
     }
   }
 
-  LifetimePosition pos = use_pos[reg];
+  auto pos = use_pos[reg];
 
   if (pos.Value() < register_use->pos().Value()) {
     // All registers are blocked before the first use that requires a register.
@@ -2066,31 +2286,31 @@ void RegisterAllocator::AllocateBlockedReg(LiveRange* current) {
 
 static const InstructionBlock* GetContainingLoop(
     const InstructionSequence* sequence, const InstructionBlock* block) {
-  BasicBlock::RpoNumber index = block->loop_header();
-  if (!index.IsValid()) return NULL;
+  auto index = block->loop_header();
+  if (!index.IsValid()) return nullptr;
   return sequence->InstructionBlockAt(index);
 }
 
 
 LifetimePosition RegisterAllocator::FindOptimalSpillingPos(
     LiveRange* range, LifetimePosition pos) {
-  const InstructionBlock* block = GetInstructionBlock(pos.InstructionStart());
-  const InstructionBlock* loop_header =
+  auto block = GetInstructionBlock(pos.InstructionStart());
+  auto loop_header =
       block->IsLoopHeader() ? block : GetContainingLoop(code(), block);
 
-  if (loop_header == NULL) return pos;
+  if (loop_header == nullptr) return pos;
 
-  UsePosition* prev_use = range->PreviousUsePositionRegisterIsBeneficial(pos);
+  auto prev_use = range->PreviousUsePositionRegisterIsBeneficial(pos);
 
-  while (loop_header != NULL) {
+  while (loop_header != nullptr) {
     // We are going to spill live range inside the loop.
     // If possible try to move spilling position backwards to loop header.
     // This will reduce number of memory moves on the back edge.
-    LifetimePosition loop_start = LifetimePosition::FromInstructionIndex(
+    auto loop_start = LifetimePosition::FromInstructionIndex(
         loop_header->first_instruction_index());
 
     if (range->Covers(loop_start)) {
-      if (prev_use == NULL || prev_use->pos().Value() < loop_start.Value()) {
+      if (prev_use == nullptr || prev_use->pos().Value() < loop_start.Value()) {
         // No register beneficial use inside the loop before the pos.
         pos = loop_start;
       }
@@ -2107,13 +2327,13 @@ LifetimePosition RegisterAllocator::FindOptimalSpillingPos(
 void RegisterAllocator::SplitAndSpillIntersecting(LiveRange* current) {
   DCHECK(current->HasRegisterAssigned());
   int reg = current->assigned_register();
-  LifetimePosition split_pos = current->Start();
-  for (int i = 0; i < active_live_ranges_.length(); ++i) {
-    LiveRange* range = active_live_ranges_[i];
+  auto split_pos = current->Start();
+  for (size_t i = 0; i < active_live_ranges().size(); ++i) {
+    auto range = active_live_ranges()[i];
     if (range->assigned_register() == reg) {
-      UsePosition* next_pos = range->NextRegisterPosition(current->Start());
-      LifetimePosition spill_pos = FindOptimalSpillingPos(range, split_pos);
-      if (next_pos == NULL) {
+      auto next_pos = range->NextRegisterPosition(current->Start());
+      auto spill_pos = FindOptimalSpillingPos(range, split_pos);
+      if (next_pos == nullptr) {
         SpillAfter(range, spill_pos);
       } else {
         // When spilling between spill_pos and next_pos ensure that the range
@@ -2132,14 +2352,14 @@ void RegisterAllocator::SplitAndSpillIntersecting(LiveRange* current) {
     }
   }
 
-  for (int i = 0; i < inactive_live_ranges_.length(); ++i) {
-    LiveRange* range = inactive_live_ranges_[i];
+  for (size_t i = 0; i < inactive_live_ranges().size(); ++i) {
+    auto range = inactive_live_ranges()[i];
     DCHECK(range->End().Value() > current->Start().Value());
     if (range->assigned_register() == reg && !range->IsFixed()) {
       LifetimePosition next_intersection = range->FirstIntersection(current);
       if (next_intersection.IsValid()) {
         UsePosition* next_pos = range->NextRegisterPosition(current->Start());
-        if (next_pos == NULL) {
+        if (next_pos == nullptr) {
           SpillAfter(range, split_pos);
         } else {
           next_intersection = Min(next_intersection, next_pos->pos());
@@ -2173,8 +2393,8 @@ LiveRange* RegisterAllocator::SplitRangeAt(LiveRange* range,
          !InstructionAt(pos.InstructionIndex())->IsControl());
 
   int vreg = GetVirtualRegister();
-  if (!AllocationOk()) return NULL;
-  LiveRange* result = LiveRangeFor(vreg);
+  if (!AllocationOk()) return nullptr;
+  auto result = LiveRangeFor(vreg);
   range->SplitAt(pos, result, local_zone());
   return result;
 }
@@ -2187,7 +2407,7 @@ LiveRange* RegisterAllocator::SplitBetween(LiveRange* range,
   TraceAlloc("Splitting live range %d in position between [%d, %d]\n",
              range->id(), start.Value(), end.Value());
 
-  LifetimePosition split_pos = FindOptimalSplitPos(start, end);
+  auto split_pos = FindOptimalSplitPos(start, end);
   DCHECK(split_pos.Value() >= start.Value());
   return SplitRangeAt(range, split_pos);
 }
@@ -2202,8 +2422,8 @@ LifetimePosition RegisterAllocator::FindOptimalSplitPos(LifetimePosition start,
   // We have no choice
   if (start_instr == end_instr) return end;
 
-  const InstructionBlock* start_block = GetInstructionBlock(start);
-  const InstructionBlock* end_block = GetInstructionBlock(end);
+  auto start_block = GetInstructionBlock(start);
+  auto end_block = GetInstructionBlock(end);
 
   if (end_block == start_block) {
     // The interval is split in the same basic block. Split at the latest
@@ -2211,10 +2431,10 @@ LifetimePosition RegisterAllocator::FindOptimalSplitPos(LifetimePosition start,
     return end;
   }
 
-  const InstructionBlock* block = end_block;
+  auto block = end_block;
   // Find header of outermost loop.
   // TODO(titzer): fix redundancy below.
-  while (GetContainingLoop(code(), block) != NULL &&
+  while (GetContainingLoop(code(), block) != nullptr &&
          GetContainingLoop(code(), block)->rpo_number().ToInt() >
              start_block->rpo_number().ToInt()) {
     block = GetContainingLoop(code(), block);
@@ -2230,7 +2450,7 @@ LifetimePosition RegisterAllocator::FindOptimalSplitPos(LifetimePosition start,
 
 
 void RegisterAllocator::SpillAfter(LiveRange* range, LifetimePosition pos) {
-  LiveRange* second_part = SplitRangeAt(range, pos);
+  auto second_part = SplitRangeAt(range, pos);
   if (!AllocationOk()) return;
   Spill(second_part);
 }
@@ -2247,14 +2467,14 @@ void RegisterAllocator::SpillBetweenUntil(LiveRange* range,
                                           LifetimePosition until,
                                           LifetimePosition end) {
   CHECK(start.Value() < end.Value());
-  LiveRange* second_part = SplitRangeAt(range, start);
+  auto second_part = SplitRangeAt(range, start);
   if (!AllocationOk()) return;
 
   if (second_part->Start().Value() < end.Value()) {
     // The split result intersects with [start, end[.
     // Split it at position between ]start+1, end[, spill the middle part
     // and put the rest to unhandled.
-    LiveRange* third_part = SplitBetween(
+    auto third_part = SplitBetween(
         second_part, Max(second_part->Start().InstructionEnd(), until),
         end.PrevInstruction().InstructionEnd());
     if (!AllocationOk()) return;
@@ -2274,24 +2494,25 @@ void RegisterAllocator::SpillBetweenUntil(LiveRange* range,
 void RegisterAllocator::Spill(LiveRange* range) {
   DCHECK(!range->IsSpilled());
   TraceAlloc("Spilling live range %d\n", range->id());
-  LiveRange* first = range->TopLevel();
-
-  if (!first->HasAllocatedSpillOperand()) {
-    InstructionOperand* op = TryReuseSpillSlot(range);
-    if (op == NULL) {
-      // Allocate a new operand referring to the spill slot.
-      RegisterKind kind = range->Kind();
-      int index = frame()->AllocateSpillSlot(kind == DOUBLE_REGISTERS);
-      if (kind == DOUBLE_REGISTERS) {
-        op = DoubleStackSlotOperand::Create(index, local_zone());
-      } else {
-        DCHECK(kind == GENERAL_REGISTERS);
-        op = StackSlotOperand::Create(index, local_zone());
+  auto first = range->TopLevel();
+  if (first->HasNoSpillType()) {
+    if (FLAG_turbo_reuse_spill_slots) {
+      AssignSpillRangeToLiveRange(first);
+    } else {
+      auto op = TryReuseSpillSlot(range);
+      if (op == nullptr) {
+        // Allocate a new operand referring to the spill slot.
+        RegisterKind kind = range->Kind();
+        int index = frame()->AllocateSpillSlot(kind == DOUBLE_REGISTERS);
+        auto op_kind = kind == DOUBLE_REGISTERS
+                           ? InstructionOperand::DOUBLE_STACK_SLOT
+                           : InstructionOperand::STACK_SLOT;
+        op = new (code_zone()) InstructionOperand(op_kind, index);
       }
+      first->SetSpillOperand(op);
     }
-    first->SetSpillOperand(op);
   }
-  range->MakeSpilled(code_zone());
+  range->MakeSpilled();
 }
 
 
@@ -2303,7 +2524,7 @@ int RegisterAllocator::RegisterCount() const { return num_registers_; }
 
 void RegisterAllocator::Verify() const {
   for (auto current : live_ranges()) {
-    if (current != NULL) current->Verify();
+    if (current != nullptr) current->Verify();
   }
 }
 
@@ -2322,6 +2543,6 @@ void RegisterAllocator::SetLiveRangeAssignedRegister(LiveRange* range,
   range->set_assigned_register(reg, code_zone());
 }
 
-}
-}
-}  // namespace v8::internal::compiler
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
index a6578af..b17837b 100644 (file)
@@ -12,8 +12,6 @@ namespace v8 {
 namespace internal {
 namespace compiler {
 
-class PipelineStatistics;
-
 enum RegisterKind {
   UNALLOCATED_REGISTERS,
   GENERAL_REGISTERS,
@@ -107,7 +105,7 @@ class LifetimePosition FINAL {
 class UseInterval FINAL : public ZoneObject {
  public:
   UseInterval(LifetimePosition start, LifetimePosition end)
-      : start_(start), end_(end), next_(NULL) {
+      : start_(start), end_(end), next_(nullptr) {
     DCHECK(start.Value() < end.Value());
   }
 
@@ -150,7 +148,7 @@ class UsePosition FINAL : public ZoneObject {
               InstructionOperand* hint);
 
   InstructionOperand* operand() const { return operand_; }
-  bool HasOperand() const { return operand_ != NULL; }
+  bool HasOperand() const { return operand_ != nullptr; }
 
   InstructionOperand* hint() const { return hint_; }
   bool HasHint() const;
@@ -173,6 +171,7 @@ class UsePosition FINAL : public ZoneObject {
   DISALLOW_COPY_AND_ASSIGN(UsePosition);
 };
 
+class SpillRange;
 
 // Representation of SSA values' live ranges as a collection of (continuous)
 // intervals over the instruction ordering.
@@ -185,20 +184,20 @@ class LiveRange FINAL : public ZoneObject {
   UseInterval* first_interval() const { return first_interval_; }
   UsePosition* first_pos() const { return first_pos_; }
   LiveRange* parent() const { return parent_; }
-  LiveRange* TopLevel() { return (parent_ == NULL) ? this : parent_; }
+  LiveRange* TopLevel() { return (parent_ == nullptr) ? this : parent_; }
   const LiveRange* TopLevel() const {
-    return (parent_ == NULL) ? this : parent_;
+    return (parent_ == nullptr) ? this : parent_;
   }
   LiveRange* next() const { return next_; }
-  bool IsChild() const { return parent() != NULL; }
+  bool IsChild() const { return parent() != nullptr; }
   int id() const { return id_; }
   bool IsFixed() const { return id_ < 0; }
-  bool IsEmpty() const { return first_interval() == NULL; }
+  bool IsEmpty() const { return first_interval() == nullptr; }
   InstructionOperand* CreateAssignedOperand(Zone* zone) const;
   int assigned_register() const { return assigned_register_; }
   int spill_start_index() const { return spill_start_index_; }
   void set_assigned_register(int reg, Zone* zone);
-  void MakeSpilled(Zone* zone);
+  void MakeSpilled();
   bool is_phi() const { return is_phi_; }
   void set_is_phi(bool is_phi) { is_phi_ = is_phi; }
   bool is_non_loop_phi() const { return is_non_loop_phi_; }
@@ -246,9 +245,9 @@ class LiveRange FINAL : public ZoneObject {
   }
   InstructionOperand* FirstHint() const {
     UsePosition* pos = first_pos_;
-    while (pos != NULL && !pos->HasHint()) pos = pos->next();
-    if (pos != NULL) return pos->hint();
-    return NULL;
+    while (pos != nullptr && !pos->HasHint()) pos = pos->next();
+    if (pos != nullptr) return pos->hint();
+    return nullptr;
   }
 
   LifetimePosition Start() const {
@@ -261,9 +260,27 @@ class LiveRange FINAL : public ZoneObject {
     return last_interval_->end();
   }
 
-  bool HasAllocatedSpillOperand() const;
-  InstructionOperand* GetSpillOperand() const { return spill_operand_; }
+  enum class SpillType { kNoSpillType, kSpillOperand, kSpillRange };
+  SpillType spill_type() const { return spill_type_; }
+  InstructionOperand* GetSpillOperand() const {
+    return spill_type_ == SpillType::kSpillOperand ? spill_operand_ : nullptr;
+  }
+  SpillRange* GetSpillRange() const {
+    return spill_type_ == SpillType::kSpillRange ? spill_range_ : nullptr;
+  }
+  bool HasNoSpillType() const { return spill_type_ == SpillType::kNoSpillType; }
+  bool HasSpillOperand() const {
+    return spill_type_ == SpillType::kSpillOperand;
+  }
+  bool HasSpillRange() const { return spill_type_ == SpillType::kSpillRange; }
+
+  void SpillAtDefinition(Zone* zone, int gap_index,
+                         InstructionOperand* operand);
   void SetSpillOperand(InstructionOperand* operand);
+  void SetSpillRange(SpillRange* spill_range);
+  void CommitSpillOperand(InstructionOperand* operand);
+  void CommitSpillsAtDefinition(InstructionSequence* sequence,
+                                InstructionOperand* operand);
 
   void SetSpillStartIndex(int start) {
     spill_start_index_ = Min(start, spill_start_index_);
@@ -290,11 +307,14 @@ class LiveRange FINAL : public ZoneObject {
 #endif
 
  private:
-  void ConvertOperands(Zone* zone);
+  struct SpillAtDefinitionList;
+
+  void ConvertUsesToOperand(InstructionOperand* op);
   UseInterval* FirstSearchIntervalForPosition(LifetimePosition position) const;
   void AdvanceLastProcessedMarker(UseInterval* to_start_of,
                                   LifetimePosition but_not_past) const;
 
+  // TODO(dcarney): pack this structure better.
   int id_;
   bool spilled_;
   bool is_phi_;
@@ -311,8 +331,13 @@ class LiveRange FINAL : public ZoneObject {
   UsePosition* last_processed_use_;
   // This is used as a cache, it's invalid outside of BuildLiveRanges.
   InstructionOperand* current_hint_operand_;
-  InstructionOperand* spill_operand_;
   int spill_start_index_;
+  SpillType spill_type_;
+  union {
+    InstructionOperand* spill_operand_;
+    SpillRange* spill_range_;
+  };
+  SpillAtDefinitionList* spills_at_definition_;
 
   friend class RegisterAllocator;  // Assigns to kind_.
 
@@ -320,19 +345,41 @@ class LiveRange FINAL : public ZoneObject {
 };
 
 
-class RegisterAllocator FINAL {
+class SpillRange FINAL : public ZoneObject {
+ public:
+  SpillRange(LiveRange* range, Zone* zone);
+
+  UseInterval* interval() const { return use_interval_; }
+  RegisterKind Kind() const { return live_ranges_[0]->Kind(); }
+  bool IsEmpty() const { return live_ranges_.empty(); }
+  bool TryMerge(SpillRange* other);
+  void SetOperand(InstructionOperand* op);
+
+ private:
+  LifetimePosition End() const { return end_position_; }
+  ZoneVector<LiveRange*>& live_ranges() { return live_ranges_; }
+  bool IsIntersectingWith(SpillRange* other) const;
+  // Merge intervals, making sure the use intervals are sorted
+  void MergeDisjointIntervals(UseInterval* other);
+
+  ZoneVector<LiveRange*> live_ranges_;
+  UseInterval* use_interval_;
+  LifetimePosition end_position_;
+
+  DISALLOW_COPY_AND_ASSIGN(SpillRange);
+};
+
+
+class RegisterAllocator FINAL : public ZoneObject {
  public:
   explicit RegisterAllocator(const RegisterConfiguration* config,
                              Zone* local_zone, Frame* frame,
                              InstructionSequence* code,
                              const char* debug_name = nullptr);
 
-  bool Allocate(PipelineStatistics* stats = NULL);
   bool AllocationOk() { return allocation_ok_; }
-  BitVector* assigned_registers() { return assigned_registers_; }
-  BitVector* assigned_double_registers() { return assigned_double_registers_; }
 
-  const ZoneList<LiveRange*>& live_ranges() const { return live_ranges_; }
+  const ZoneVector<LiveRange*>& live_ranges() const { return live_ranges_; }
   const ZoneVector<LiveRange*>& fixed_live_ranges() const {
     return fixed_live_ranges_;
   }
@@ -343,6 +390,36 @@ class RegisterAllocator FINAL {
   // This zone is for datastructures only needed during register allocation.
   Zone* local_zone() const { return local_zone_; }
 
+  // Phase 1 : insert moves to account for fixed register operands.
+  void MeetRegisterConstraints();
+
+  // Phase 2: deconstruct SSA by inserting moves in successors and the headers
+  // of blocks containing phis.
+  void ResolvePhis();
+
+  // Phase 3: compute liveness of all virtual register.
+  void BuildLiveRanges();
+  bool ExistsUseWithoutDefinition();
+
+  // Phase 4: compute register assignments.
+  void AllocateGeneralRegisters();
+  void AllocateDoubleRegisters();
+
+  // Phase 5: reassign spill splots for maximal reuse.
+  void ReuseSpillSlots();
+
+  // Phase 6: commit assignment.
+  void CommitAssignment();
+
+  // Phase 7: compute values for pointer maps.
+  void PopulatePointerMaps();  // TODO(titzer): rename to PopulateReferenceMaps.
+
+  // Phase 8: reconnect split ranges with moves.
+  void ConnectRanges();
+
+  // Phase 9: insert moves to connect ranges across basic blocks.
+  void ResolveControlFlow();
+
  private:
   int GetVirtualRegister() {
     int vreg = code()->NextVirtualRegister();
@@ -365,24 +442,18 @@ class RegisterAllocator FINAL {
   // allocation.
   Zone* code_zone() const { return code()->zone(); }
 
+  BitVector* assigned_registers() { return assigned_registers_; }
+  BitVector* assigned_double_registers() { return assigned_double_registers_; }
+
 #ifdef DEBUG
   void Verify() const;
 #endif
 
-  void MeetRegisterConstraints();
-  void ResolvePhis();
-  void BuildLiveRanges();
-  void AllocateGeneralRegisters();
-  void AllocateDoubleRegisters();
-  void ConnectRanges();
-  void ResolveControlFlow();
-  void PopulatePointerMaps();  // TODO(titzer): rename to PopulateReferenceMaps.
   void AllocateRegisters();
   bool CanEagerlyResolveControlFlow(const InstructionBlock* block) const;
   bool SafePointsAreInOrder() const;
 
   // Liveness analysis support.
-  void InitializeLivenessAnalysis();
   BitVector* ComputeLiveOut(const InstructionBlock* block);
   void AddInitialIntervals(const InstructionBlock* block, BitVector* live_out);
   bool IsOutputRegisterOf(Instruction* instr, int index);
@@ -403,8 +474,8 @@ class RegisterAllocator FINAL {
               InstructionOperand* hint);
   void Use(LifetimePosition block_start, LifetimePosition position,
            InstructionOperand* operand, InstructionOperand* hint);
-  void AddConstraintsGapMove(int index, InstructionOperand* from,
-                             InstructionOperand* to);
+  void AddGapMove(int index, GapInstruction::InnerPosition position,
+                  InstructionOperand* from, InstructionOperand* to);
 
   // Helper methods for updating the life range lists.
   void AddToActive(LiveRange* range);
@@ -417,12 +488,14 @@ class RegisterAllocator FINAL {
   void ActiveToInactive(LiveRange* range);
   void InactiveToHandled(LiveRange* range);
   void InactiveToActive(LiveRange* range);
-  void FreeSpillSlot(LiveRange* range);
-  InstructionOperand* TryReuseSpillSlot(LiveRange* range);
 
   // Helper methods for allocating registers.
+  bool TryReuseSpillForPhi(LiveRange* range);
   bool TryAllocateFreeReg(LiveRange* range);
   void AllocateBlockedReg(LiveRange* range);
+  SpillRange* AssignSpillRangeToLiveRange(LiveRange* range);
+  void FreeSpillSlot(LiveRange* range);
+  InstructionOperand* TryReuseSpillSlot(LiveRange* range);
 
   // Live range splitting helpers.
 
@@ -468,9 +541,9 @@ class RegisterAllocator FINAL {
 
   // Helper methods for resolving control flow.
   void ResolveControlFlow(const InstructionBlock* block,
-                          const LiveRange* cur_cover,
+                          InstructionOperand* cur_op,
                           const InstructionBlock* pred,
-                          const LiveRange* pred_cover);
+                          InstructionOperand* pred_op);
 
   void SetLiveRangeAssignedRegister(LiveRange* range, int reg);
 
@@ -497,6 +570,29 @@ class RegisterAllocator FINAL {
   Frame* frame() const { return frame_; }
   const char* debug_name() const { return debug_name_; }
   const RegisterConfiguration* config() const { return config_; }
+  ZoneVector<LiveRange*>& live_ranges() { return live_ranges_; }
+  ZoneVector<LiveRange*>& fixed_live_ranges() { return fixed_live_ranges_; }
+  ZoneVector<LiveRange*>& fixed_double_live_ranges() {
+    return fixed_double_live_ranges_;
+  }
+  ZoneVector<LiveRange*>& unhandled_live_ranges() {
+    return unhandled_live_ranges_;
+  }
+  ZoneVector<LiveRange*>& active_live_ranges() { return active_live_ranges_; }
+  ZoneVector<LiveRange*>& inactive_live_ranges() {
+    return inactive_live_ranges_;
+  }
+  ZoneVector<LiveRange*>& reusable_slots() { return reusable_slots_; }
+  ZoneVector<SpillRange*>& spill_ranges() { return spill_ranges_; }
+
+  struct PhiMapValue {
+    PhiMapValue(PhiInstruction* phi, const InstructionBlock* block)
+        : phi(phi), block(block) {}
+    PhiInstruction* const phi;
+    const InstructionBlock* const block;
+  };
+  typedef std::map<int, PhiMapValue, std::less<int>,
+                   zone_allocator<std::pair<int, PhiMapValue>>> PhiMap;
 
   Zone* const local_zone_;
   Frame* const frame_;
@@ -505,20 +601,23 @@ class RegisterAllocator FINAL {
 
   const RegisterConfiguration* config_;
 
+  PhiMap phi_map_;
+
   // During liveness analysis keep a mapping from block id to live_in sets
   // for blocks already analyzed.
-  ZoneList<BitVector*> live_in_sets_;
+  ZoneVector<BitVector*> live_in_sets_;
 
   // Liveness analysis results.
-  ZoneList<LiveRange*> live_ranges_;
+  ZoneVector<LiveRange*> live_ranges_;
 
   // Lists of live ranges
   ZoneVector<LiveRange*> fixed_live_ranges_;
   ZoneVector<LiveRange*> fixed_double_live_ranges_;
-  ZoneList<LiveRange*> unhandled_live_ranges_;
-  ZoneList<LiveRange*> active_live_ranges_;
-  ZoneList<LiveRange*> inactive_live_ranges_;
-  ZoneList<LiveRange*> reusable_slots_;
+  ZoneVector<LiveRange*> unhandled_live_ranges_;
+  ZoneVector<LiveRange*> active_live_ranges_;
+  ZoneVector<LiveRange*> inactive_live_ranges_;
+  ZoneVector<LiveRange*> reusable_slots_;
+  ZoneVector<SpillRange*> spill_ranges_;
 
   RegisterKind mode_;
   int num_registers_;
@@ -536,8 +635,8 @@ class RegisterAllocator FINAL {
   DISALLOW_COPY_AND_ASSIGN(RegisterAllocator);
 };
 
-}
-}
-}  // namespace v8::internal::compiler
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
 
 #endif  // V8_REGISTER_ALLOCATOR_H_
index 5e7a8d0..e4c257f 100644 (file)
@@ -294,7 +294,7 @@ class RepresentationChanger {
       case IrOpcode::kInt32Constant: {
         int32_t value = OpParameter<int32_t>(node);
         if (value == 0 || value == 1) return node;
-        return jsgraph()->OneConstant();  // value != 0
+        return jsgraph()->Int32Constant(1);  // value != 0
       }
       case IrOpcode::kHeapConstant: {
         Handle<Object> handle = OpParameter<Unique<Object> >(node).handle();
index 50ece95..30bfbc8 100644 (file)
@@ -13,11 +13,12 @@ namespace internal {
 namespace compiler {
 
 BasicBlock::BasicBlock(Zone* zone, Id id)
-    : ao_number_(-1),
+    : loop_number_(-1),
       rpo_number_(-1),
       deferred_(false),
       dominator_depth_(-1),
       dominator_(NULL),
+      rpo_next_(NULL),
       loop_header_(NULL),
       loop_end_(NULL),
       loop_depth_(0),
@@ -62,16 +63,6 @@ void BasicBlock::set_control_input(Node* control_input) {
 }
 
 
-void BasicBlock::set_dominator_depth(int32_t dominator_depth) {
-  dominator_depth_ = dominator_depth;
-}
-
-
-void BasicBlock::set_dominator(BasicBlock* dominator) {
-  dominator_ = dominator;
-}
-
-
 void BasicBlock::set_loop_depth(int32_t loop_depth) {
   loop_depth_ = loop_depth;
 }
index b65a507..0bba689 100644 (file)
@@ -61,7 +61,7 @@ class BasicBlock FINAL : public ZoneObject {
       DCHECK(IsValid());
       return static_cast<size_t>(index_);
     }
-    bool IsValid() const { return index_ != kInvalidRpoNumber; }
+    bool IsValid() const { return index_ >= 0; }
     static RpoNumber FromInt(int index) { return RpoNumber(index); }
     static RpoNumber Invalid() { return RpoNumber(kInvalidRpoNumber); }
 
@@ -146,10 +146,13 @@ class BasicBlock FINAL : public ZoneObject {
   void set_deferred(bool deferred) { deferred_ = deferred; }
 
   int32_t dominator_depth() const { return dominator_depth_; }
-  void set_dominator_depth(int32_t dominator_depth);
+  void set_dominator_depth(int32_t depth) { dominator_depth_ = depth; }
 
   BasicBlock* dominator() const { return dominator_; }
-  void set_dominator(BasicBlock* dominator);
+  void set_dominator(BasicBlock* dominator) { dominator_ = dominator; }
+
+  BasicBlock* rpo_next() const { return rpo_next_; }
+  void set_rpo_next(BasicBlock* rpo_next) { rpo_next_ = rpo_next; }
 
   BasicBlock* loop_header() const { return loop_header_; }
   void set_loop_header(BasicBlock* loop_header);
@@ -160,9 +163,8 @@ class BasicBlock FINAL : public ZoneObject {
   int32_t loop_depth() const { return loop_depth_; }
   void set_loop_depth(int32_t loop_depth);
 
-  RpoNumber GetAoNumber() const { return RpoNumber::FromInt(ao_number_); }
-  int32_t ao_number() const { return ao_number_; }
-  void set_ao_number(int32_t ao_number) { ao_number_ = ao_number; }
+  int32_t loop_number() const { return loop_number_; }
+  void set_loop_number(int32_t loop_number) { loop_number_ = loop_number; }
 
   RpoNumber GetRpoNumber() const { return RpoNumber::FromInt(rpo_number_); }
   int32_t rpo_number() const { return rpo_number_; }
@@ -173,11 +175,12 @@ class BasicBlock FINAL : public ZoneObject {
   bool LoopContains(BasicBlock* block) const;
 
  private:
-  int32_t ao_number_;        // assembly order number of the block.
+  int32_t loop_number_;      // loop number of the block.
   int32_t rpo_number_;       // special RPO number of the block.
   bool deferred_;            // true if the block contains deferred code.
   int32_t dominator_depth_;  // Depth within the dominator tree.
   BasicBlock* dominator_;    // Immediate dominator of the block.
+  BasicBlock* rpo_next_;     // Link to next block in special RPO order.
   BasicBlock* loop_header_;  // Pointer to dominating loop header basic block,
                              // NULL if none. For loop headers, this points to
                              // enclosing loop header.
@@ -265,8 +268,6 @@ class Schedule FINAL : public ZoneObject {
 
  private:
   friend class Scheduler;
-  friend class CodeGenerator;
-  friend class ScheduleVisualizer;
   friend class BasicBlockInstrumentor;
 
   void AddSuccessor(BasicBlock* block, BasicBlock* succ);
index 36ed088..f12c631 100644 (file)
@@ -8,6 +8,7 @@
 #include "src/compiler/scheduler.h"
 
 #include "src/bit-vector.h"
+#include "src/compiler/control-equivalence.h"
 #include "src/compiler/graph.h"
 #include "src/compiler/graph-inl.h"
 #include "src/compiler/node.h"
@@ -38,11 +39,10 @@ Scheduler::Scheduler(Zone* zone, Graph* graph, Schedule* schedule)
       node_data_(graph_->NodeCount(), DefaultSchedulerData(), zone) {}
 
 
-Schedule* Scheduler::ComputeSchedule(ZonePool* zone_pool, Graph* graph) {
-  ZonePool::Scope zone_scope(zone_pool);
+Schedule* Scheduler::ComputeSchedule(Zone* zone, Graph* graph) {
   Schedule* schedule = new (graph->zone())
       Schedule(graph->zone(), static_cast<size_t>(graph->NodeCount()));
-  Scheduler scheduler(zone_scope.zone(), graph, schedule);
+  Scheduler scheduler(zone, graph, schedule);
 
   scheduler.BuildCFG();
   scheduler.ComputeSpecialRPONumbering();
@@ -59,7 +59,7 @@ Schedule* Scheduler::ComputeSchedule(ZonePool* zone_pool, Graph* graph) {
 
 
 Scheduler::SchedulerData Scheduler::DefaultSchedulerData() {
-  SchedulerData def = {schedule_->start(), 0, false, false, kUnknown};
+  SchedulerData def = {schedule_->start(), 0, kUnknown};
   return def;
 }
 
@@ -86,17 +86,12 @@ Scheduler::Placement Scheduler::GetPlacement(Node* node) {
         data->placement_ = (p == kFixed ? kFixed : kCoupled);
         break;
       }
-#define DEFINE_FLOATING_CONTROL_CASE(V) case IrOpcode::k##V:
-      CONTROL_OP_LIST(DEFINE_FLOATING_CONTROL_CASE)
-#undef DEFINE_FLOATING_CONTROL_CASE
+#define DEFINE_CONTROL_CASE(V) case IrOpcode::k##V:
+      CONTROL_OP_LIST(DEFINE_CONTROL_CASE)
+#undef DEFINE_CONTROL_CASE
       {
         // Control nodes that were not control-reachable from end may float.
         data->placement_ = kSchedulable;
-        if (!data->is_connected_control_) {
-          data->is_floating_control_ = true;
-          Trace("Floating control found: #%d:%s\n", node->id(),
-                node->op()->mnemonic());
-        }
         break;
       }
       default:
@@ -126,9 +121,9 @@ void Scheduler::UpdatePlacement(Node* node, Placement placement) {
         schedule_->AddNode(block, node);
         break;
       }
-#define DEFINE_FLOATING_CONTROL_CASE(V) case IrOpcode::k##V:
-      CONTROL_OP_LIST(DEFINE_FLOATING_CONTROL_CASE)
-#undef DEFINE_FLOATING_CONTROL_CASE
+#define DEFINE_CONTROL_CASE(V) case IrOpcode::k##V:
+      CONTROL_OP_LIST(DEFINE_CONTROL_CASE)
+#undef DEFINE_CONTROL_CASE
       {
         // Control nodes force coupled uses to be placed.
         Node::Uses uses = node->uses();
@@ -148,8 +143,8 @@ void Scheduler::UpdatePlacement(Node* node, Placement placement) {
     // Reduce the use count of the node's inputs to potentially make them
     // schedulable. If all the uses of a node have been scheduled, then the node
     // itself can be scheduled.
-    for (InputIter i = node->inputs().begin(); i != node->inputs().end(); ++i) {
-      DecrementUnscheduledUseCount(*i, i.index(), i.edge().from());
+    for (Edge const edge : node->input_edges()) {
+      DecrementUnscheduledUseCount(edge.to(), edge.index(), edge.from());
     }
   }
   data->placement_ = placement;
@@ -235,14 +230,15 @@ BasicBlock* Scheduler::GetCommonDominator(BasicBlock* b1, BasicBlock* b2) {
 // between them within a Schedule) from the node graph. Visits control edges of
 // the graph backwards from an end node in order to find the connected control
 // subgraph, needed for scheduling.
-class CFGBuilder {
+class CFGBuilder : public ZoneObject {
  public:
   CFGBuilder(Zone* zone, Scheduler* scheduler)
       : scheduler_(scheduler),
         schedule_(scheduler->schedule_),
+        queued_(scheduler->graph_, 2),
         queue_(zone),
         control_(zone),
-        component_head_(NULL),
+        component_entry_(NULL),
         component_start_(NULL),
         component_end_(NULL) {}
 
@@ -250,6 +246,7 @@ class CFGBuilder {
   // backwards from end through control edges, building and connecting the
   // basic blocks for control nodes.
   void Run() {
+    ResetDataStructures();
     Queue(scheduler_->graph_->end());
 
     while (!queue_.empty()) {  // Breadth-first backwards traversal.
@@ -267,35 +264,45 @@ class CFGBuilder {
   }
 
   // Run the control flow graph construction for a minimal control-connected
-  // component ending in {node} and merge that component into an existing
+  // component ending in {exit} and merge that component into an existing
   // control flow graph at the bottom of {block}.
-  void Run(BasicBlock* block, Node* node) {
-    Queue(node);
+  void Run(BasicBlock* block, Node* exit) {
+    ResetDataStructures();
+    Queue(exit);
 
+    component_entry_ = NULL;
     component_start_ = block;
-    component_end_ = schedule_->block(node);
+    component_end_ = schedule_->block(exit);
+    scheduler_->equivalence_->Run(exit);
     while (!queue_.empty()) {  // Breadth-first backwards traversal.
       Node* node = queue_.front();
       queue_.pop();
-      bool is_dom = true;
+
+      // Use control dependence equivalence to find a canonical single-entry
+      // single-exit region that makes up a minimal component to be scheduled.
+      if (IsSingleEntrySingleExitRegion(node, exit)) {
+        Trace("Found SESE at #%d:%s\n", node->id(), node->op()->mnemonic());
+        DCHECK_EQ(NULL, component_entry_);
+        component_entry_ = node;
+        continue;
+      }
+
       int max = NodeProperties::PastControlIndex(node);
       for (int i = NodeProperties::FirstControlIndex(node); i < max; i++) {
-        is_dom = is_dom &&
-            !scheduler_->GetData(node->InputAt(i))->is_floating_control_;
         Queue(node->InputAt(i));
       }
-      // TODO(mstarzinger): This is a hacky way to find component dominator.
-      if (is_dom) component_head_ = node;
     }
-    DCHECK_NOT_NULL(component_head_);
+    DCHECK_NE(NULL, component_entry_);
 
     for (NodeVector::iterator i = control_.begin(); i != control_.end(); ++i) {
-      scheduler_->GetData(*i)->is_floating_control_ = false;
       ConnectBlocks(*i);  // Connect block to its predecessor/successors.
     }
   }
 
  private:
+  // TODO(mstarzinger): Only for Scheduler::FuseFloatingControl.
+  friend class Scheduler;
+
   void FixNode(BasicBlock* block, Node* node) {
     schedule_->AddNode(block, node);
     scheduler_->UpdatePlacement(node, Scheduler::kFixed);
@@ -303,16 +310,14 @@ class CFGBuilder {
 
   void Queue(Node* node) {
     // Mark the connected control nodes as they are queued.
-    Scheduler::SchedulerData* data = scheduler_->GetData(node);
-    if (!data->is_connected_control_) {
-      data->is_connected_control_ = true;
+    if (!queued_.Get(node)) {
       BuildBlocks(node);
       queue_.push(node);
+      queued_.Set(node, true);
       control_.push_back(node);
     }
   }
 
-
   void BuildBlocks(Node* node) {
     switch (node->opcode()) {
       case IrOpcode::kEnd:
@@ -386,14 +391,14 @@ class CFGBuilder {
                                    IrOpcode::Value false_opcode) {
     buffer[0] = NULL;
     buffer[1] = NULL;
-    for (UseIter i = node->uses().begin(); i != node->uses().end(); ++i) {
-      if ((*i)->opcode() == true_opcode) {
+    for (Node* use : node->uses()) {
+      if (use->opcode() == true_opcode) {
         DCHECK_EQ(NULL, buffer[0]);
-        buffer[0] = *i;
+        buffer[0] = use;
       }
-      if ((*i)->opcode() == false_opcode) {
+      if (use->opcode() == false_opcode) {
         DCHECK_EQ(NULL, buffer[1]);
-        buffer[1] = *i;
+        buffer[1] = use;
       }
     }
     DCHECK_NE(NULL, buffer[0]);
@@ -426,7 +431,7 @@ class CFGBuilder {
         break;
     }
 
-    if (branch == component_head_) {
+    if (branch == component_entry_) {
       TraceConnect(branch, component_start_, successor_blocks[0]);
       TraceConnect(branch, component_start_, successor_blocks[1]);
       schedule_->InsertBranch(component_start_, component_end_, branch,
@@ -451,9 +456,8 @@ class CFGBuilder {
     DCHECK(block != NULL);
     // For all of the merge's control inputs, add a goto at the end to the
     // merge's basic block.
-    for (InputIter j = merge->inputs().begin(); j != merge->inputs().end();
-         ++j) {
-      BasicBlock* predecessor_block = schedule_->block(*j);
+    for (Node* const input : merge->inputs()) {
+      BasicBlock* predecessor_block = schedule_->block(input);
       TraceConnect(merge, predecessor_block, block);
       schedule_->AddGoto(predecessor_block, block);
     }
@@ -482,23 +486,39 @@ class CFGBuilder {
             node == scheduler_->graph_->end()->InputAt(0));
   }
 
+  bool IsSingleEntrySingleExitRegion(Node* entry, Node* exit) const {
+    size_t entry_class = scheduler_->equivalence_->ClassOf(entry);
+    size_t exit_class = scheduler_->equivalence_->ClassOf(exit);
+    return entry != exit && entry_class == exit_class;
+  }
+
+  void ResetDataStructures() {
+    control_.clear();
+    DCHECK(queue_.empty());
+    DCHECK(control_.empty());
+  }
+
   Scheduler* scheduler_;
   Schedule* schedule_;
-  ZoneQueue<Node*> queue_;
-  NodeVector control_;
-  Node* component_head_;
-  BasicBlock* component_start_;
-  BasicBlock* component_end_;
+  NodeMarker<bool> queued_;      // Mark indicating whether node is queued.
+  ZoneQueue<Node*> queue_;       // Queue used for breadth-first traversal.
+  NodeVector control_;           // List of encountered control nodes.
+  Node* component_entry_;        // Component single-entry node.
+  BasicBlock* component_start_;  // Component single-entry block.
+  BasicBlock* component_end_;    // Component single-exit block.
 };
 
 
 void Scheduler::BuildCFG() {
   Trace("--- CREATING CFG -------------------------------------------\n");
 
+  // Instantiate a new control equivalence algorithm for the graph.
+  equivalence_ = new (zone_) ControlEquivalence(zone_, graph_);
+
   // Build a control-flow graph for the main control-connected component that
   // is being spanned by the graph's start and end nodes.
-  CFGBuilder cfg_builder(zone_, this);
-  cfg_builder.Run();
+  control_flow_builder_ = new (zone_) CFGBuilder(zone_, this);
+  control_flow_builder_->Run();
 
   // Initialize per-block data.
   scheduled_nodes_.resize(schedule_->BasicBlockCount(), NodeVector(zone_));
@@ -526,18 +546,18 @@ class SpecialRPONumberer : public ZoneObject {
       : zone_(zone),
         schedule_(schedule),
         order_(NULL),
-        loops_(zone),
         beyond_end_(NULL),
-        backedges_(1, zone),
+        loops_(zone),
+        backedges_(zone),
         stack_(zone),
         previous_block_count_(0) {}
 
   // Computes the special reverse-post-order for the main control flow graph,
   // that is for the graph spanned between the schedule's start and end blocks.
   void ComputeSpecialRPO() {
+    DCHECK(schedule_->end()->SuccessorCount() == 0);
     DCHECK_EQ(NULL, order_);  // Main order does not exist yet.
-    // TODO(mstarzinger): Should use Schedule::end() after tests are fixed.
-    ComputeAndInsertSpecialRPO(schedule_->start(), NULL);
+    ComputeAndInsertSpecialRPO(schedule_->start(), schedule_->end());
   }
 
   // Computes the special reverse-post-order for a partial control flow graph,
@@ -548,27 +568,13 @@ class SpecialRPONumberer : public ZoneObject {
     ComputeAndInsertSpecialRPO(entry, end);
   }
 
-  // Serialize the previously computed order as an assembly order (non-deferred
-  // code first, deferred code afterwards) into the final schedule.
-  void SerializeAOIntoSchedule() {
-    int32_t number = 0;
-    for (BlockList* l = order_; l != NULL; l = l->next) {
-      if (l->block->deferred()) continue;
-      l->block->set_ao_number(number++);
-    }
-    for (BlockList* l = order_; l != NULL; l = l->next) {
-      if (!l->block->deferred()) continue;
-      l->block->set_ao_number(number++);
-    }
-  }
-
   // Serialize the previously computed order as a special reverse-post-order
   // numbering for basic blocks into the final schedule.
   void SerializeRPOIntoSchedule() {
     int32_t number = 0;
-    for (BlockList* l = order_; l != NULL; l = l->next) {
-      l->block->set_rpo_number(number++);
-      schedule_->rpo_order()->push_back(l->block);
+    for (BasicBlock* b = order_; b != NULL; b = b->rpo_next()) {
+      b->set_rpo_number(number++);
+      schedule_->rpo_order()->push_back(b);
     }
     BeyondEndSentinel()->set_rpo_number(number);
   }
@@ -584,9 +590,6 @@ class SpecialRPONumberer : public ZoneObject {
  private:
   typedef std::pair<BasicBlock*, size_t> Backedge;
 
-  // TODO(mstarzinger): Only for Scheduler::GenerateImmediateDominatorTree.
-  friend class Scheduler;
-
   // Numbering for BasicBlock::rpo_number for this block traversal:
   static const int kBlockOnStack = -2;
   static const int kBlockVisited1 = -3;
@@ -599,32 +602,13 @@ class SpecialRPONumberer : public ZoneObject {
     size_t index;
   };
 
-  struct BlockList {
-    BasicBlock* block;
-    BlockList* next;
-
-    BlockList* Add(Zone* zone, BasicBlock* b) {
-      BlockList* list = static_cast<BlockList*>(zone->New(sizeof(BlockList)));
-      list->block = b;
-      list->next = this;
-      return list;
-    }
-
-    BlockList* FindForBlock(BasicBlock* b) {
-      for (BlockList* l = this; l != NULL; l = l->next) {
-        if (l->block == b) return l;
-      }
-      return NULL;
-    }
-  };
-
   struct LoopInfo {
     BasicBlock* header;
     ZoneList<BasicBlock*>* outgoing;
     BitVector* members;
     LoopInfo* prev;
-    BlockList* end;
-    BlockList* start;
+    BasicBlock* end;
+    BasicBlock* start;
 
     void AddOutgoing(Zone* zone, BasicBlock* block) {
       if (outgoing == NULL) {
@@ -645,14 +629,17 @@ class SpecialRPONumberer : public ZoneObject {
     return depth;
   }
 
-  // We are hijacking the {ao_number} to enumerate loops temporarily. Note that
-  // these numbers are only valid within this class.
-  static int GetLoopNumber(BasicBlock* block) { return block->ao_number(); }
+  BasicBlock* PushFront(BasicBlock* head, BasicBlock* block) {
+    block->set_rpo_next(head);
+    return block;
+  }
+
+  static int GetLoopNumber(BasicBlock* block) { return block->loop_number(); }
   static void SetLoopNumber(BasicBlock* block, int loop_number) {
-    return block->set_ao_number(loop_number);
+    return block->set_loop_number(loop_number);
   }
   static bool HasLoopNumber(BasicBlock* block) {
-    return block->ao_number() >= 0;
+    return block->loop_number() >= 0;
   }
 
   // TODO(mstarzinger): We only need this special sentinel because some tests
@@ -670,14 +657,13 @@ class SpecialRPONumberer : public ZoneObject {
   // mutating any existing order so that the result is still valid.
   void ComputeAndInsertSpecialRPO(BasicBlock* entry, BasicBlock* end) {
     // RPO should not have been serialized for this schedule yet.
-    CHECK_EQ(kBlockUnvisited1, schedule_->start()->ao_number());
+    CHECK_EQ(kBlockUnvisited1, schedule_->start()->loop_number());
     CHECK_EQ(kBlockUnvisited1, schedule_->start()->rpo_number());
     CHECK_EQ(0, static_cast<int>(schedule_->rpo_order()->size()));
 
     // Find correct insertion point within existing order.
-    BlockList* insert_before = order_->FindForBlock(entry);
-    BlockList* insert_after = insert_before ? insert_before->next : NULL;
-    BlockList* order = insert_after;
+    BasicBlock* insertion_point = entry->rpo_next();
+    BasicBlock* order = insertion_point;
 
     // Perform an iterative RPO traversal using an explicit stack,
     // recording backedges that form cycles. O(|B|).
@@ -685,7 +671,7 @@ class SpecialRPONumberer : public ZoneObject {
     stack_.resize(schedule_->BasicBlockCount() - previous_block_count_);
     previous_block_count_ = schedule_->BasicBlockCount();
     int stack_depth = Push(stack_, 0, entry, kBlockUnvisited1);
-    int num_loops = 0;
+    int num_loops = static_cast<int>(loops_.size());
 
     while (stack_depth > 0) {
       int current = stack_depth - 1;
@@ -698,7 +684,7 @@ class SpecialRPONumberer : public ZoneObject {
         if (succ->rpo_number() == kBlockVisited1) continue;
         if (succ->rpo_number() == kBlockOnStack) {
           // The successor is on the stack, so this is a backedge (cycle).
-          backedges_.Add(Backedge(frame->block, frame->index - 1), zone_);
+          backedges_.push_back(Backedge(frame->block, frame->index - 1));
           if (!HasLoopNumber(succ)) {
             // Assign a new loop number to the header if it doesn't have one.
             SetLoopNumber(succ, num_loops++);
@@ -710,14 +696,14 @@ class SpecialRPONumberer : public ZoneObject {
         }
       } else {
         // Finished with all successors; pop the stack and add the block.
-        order = order->Add(zone_, frame->block);
+        order = PushFront(order, frame->block);
         frame->block->set_rpo_number(kBlockVisited1);
         stack_depth--;
       }
     }
 
     // If no loops were encountered, then the order we computed was correct.
-    if (num_loops != 0) {
+    if (num_loops > static_cast<int>(loops_.size())) {
       // Otherwise, compute the loop information from the backedges in order
       // to perform a traversal that groups loop bodies together.
       ComputeLoopInfo(stack_, num_loops, &backedges_);
@@ -725,7 +711,7 @@ class SpecialRPONumberer : public ZoneObject {
       // Initialize the "loop stack". Note the entry could be a loop header.
       LoopInfo* loop =
           HasLoopNumber(entry) ? &loops_[GetLoopNumber(entry)] : NULL;
-      order = NULL;
+      order = insertion_point;
 
       // Perform an iterative post-order traversal, visiting loop bodies before
       // edges that lead out of loops. Visits each block once, but linking loop
@@ -737,7 +723,7 @@ class SpecialRPONumberer : public ZoneObject {
         BasicBlock* block = frame->block;
         BasicBlock* succ = NULL;
 
-        if (frame->index < block->SuccessorCount()) {
+        if (block != end && frame->index < block->SuccessorCount()) {
           // Process the next normal successor.
           succ = block->SuccessorAt(frame->index++);
         } else if (HasLoopNumber(block)) {
@@ -746,7 +732,7 @@ class SpecialRPONumberer : public ZoneObject {
             // Finish the loop body the first time the header is left on the
             // stack.
             DCHECK(loop != NULL && loop->header == block);
-            loop->start = order->Add(zone_, block);
+            loop->start = PushFront(order, block);
             order = loop->end;
             block->set_rpo_number(kBlockVisited2);
             // Pop the loop stack and continue visiting outgoing edges within
@@ -761,7 +747,7 @@ class SpecialRPONumberer : public ZoneObject {
               static_cast<int>(frame->index - block->SuccessorCount());
           LoopInfo* info = &loops_[GetLoopNumber(block)];
           DCHECK(loop != info);
-          if (info->outgoing != NULL &&
+          if (block != entry && info->outgoing != NULL &&
               outgoing_index < info->outgoing->length()) {
             succ = info->outgoing->at(outgoing_index);
             frame->index++;
@@ -794,9 +780,9 @@ class SpecialRPONumberer : public ZoneObject {
           if (HasLoopNumber(block)) {
             // If we are going to pop a loop header, then add its entire body.
             LoopInfo* info = &loops_[GetLoopNumber(block)];
-            for (BlockList* l = info->start; true; l = l->next) {
-              if (l->next == info->end) {
-                l->next = order;
+            for (BasicBlock* b = info->start; true; b = b->rpo_next()) {
+              if (b->rpo_next() == info->end) {
+                b->set_rpo_next(order);
                 info->end = order;
                 break;
               }
@@ -804,7 +790,7 @@ class SpecialRPONumberer : public ZoneObject {
             order = info->start;
           } else {
             // Pop a single node off the stack and add it to the order.
-            order = order->Add(zone_, block);
+            order = PushFront(order, block);
             block->set_rpo_number(kBlockVisited2);
           }
           stack_depth--;
@@ -812,23 +798,16 @@ class SpecialRPONumberer : public ZoneObject {
       }
     }
 
-    // Insert result into existing order.
-    if (insert_before == NULL) {
-      order_ = order;
-    } else {
-      // There already is a list element for the entry block in the list, hence
-      // we skip the first element of the sub-list to compensate duplication.
-      DCHECK_EQ(insert_before->block, order->block);
-      insert_before->next = order->next;
-    }
+    // Publish new order the first time.
+    if (order_ == NULL) order_ = order;
 
     // Compute the correct loop headers and set the correct loop ends.
     LoopInfo* current_loop = NULL;
     BasicBlock* current_header = entry->loop_header();
     int32_t loop_depth = entry->loop_depth();
     if (entry->IsLoopHeader()) --loop_depth;  // Entry might be a loop header.
-    for (BlockList* l = order; l != insert_after; l = l->next) {
-      BasicBlock* current = l->block;
+    for (BasicBlock* b = order; b != insertion_point; b = b->rpo_next()) {
+      BasicBlock* current = b;
 
       // Reset BasicBlock::rpo_number again.
       current->set_rpo_number(kBlockUnvisited1);
@@ -847,8 +826,8 @@ class SpecialRPONumberer : public ZoneObject {
       if (HasLoopNumber(current)) {
         ++loop_depth;
         current_loop = &loops_[GetLoopNumber(current)];
-        BlockList* end = current_loop->end;
-        current->set_loop_end(end == NULL ? BeyondEndSentinel() : end->block);
+        BasicBlock* end = current_loop->end;
+        current->set_loop_end(end == NULL ? BeyondEndSentinel() : end);
         current_header = current_loop->header;
         Trace("B%d is a loop header, increment loop depth to %d\n",
               current->id().ToInt(), loop_depth);
@@ -868,12 +847,21 @@ class SpecialRPONumberer : public ZoneObject {
 
   // Computes loop membership from the backedges of the control flow graph.
   void ComputeLoopInfo(ZoneVector<SpecialRPOStackFrame>& queue,
-                       size_t num_loops, ZoneList<Backedge>* backedges) {
+                       size_t num_loops, ZoneVector<Backedge>* backedges) {
+    // Extend existing loop membership vectors.
+    for (LoopInfo& loop : loops_) {
+      BitVector* new_members = new (zone_)
+          BitVector(static_cast<int>(schedule_->BasicBlockCount()), zone_);
+      new_members->CopyFrom(*loop.members);
+      loop.members = new_members;
+    }
+
+    // Extend loop information vector.
     loops_.resize(num_loops, LoopInfo());
 
     // Compute loop membership starting from backedges.
     // O(max(loop_depth) * max(|loop|)
-    for (int i = 0; i < backedges->length(); i++) {
+    for (size_t i = 0; i < backedges->size(); i++) {
       BasicBlock* member = backedges->at(i).first;
       BasicBlock* header = member->SuccessorAt(backedges->at(i).second);
       size_t loop_num = GetLoopNumber(header);
@@ -924,8 +912,7 @@ class SpecialRPONumberer : public ZoneObject {
     }
     os << ":\n";
 
-    for (BlockList* l = order_; l != NULL; l = l->next) {
-      BasicBlock* block = l->block;
+    for (BasicBlock* block = order_; block != NULL; block = block->rpo_next()) {
       BasicBlock::Id bid = block->id();
       // TODO(jarin,svenpanne): Add formatting here once we have support for
       // that in streams (we want an equivalent of PrintF("%5d:", x) here).
@@ -971,18 +958,18 @@ class SpecialRPONumberer : public ZoneObject {
 
       // Verify the start ... end list relationship.
       int links = 0;
-      BlockList* l = loop->start;
-      DCHECK(l != NULL && l->block == header);
+      BasicBlock* block = loop->start;
+      DCHECK_EQ(header, block);
       bool end_found;
       while (true) {
-        if (l == NULL || l == loop->end) {
-          end_found = (loop->end == l);
+        if (block == NULL || block == loop->end) {
+          end_found = (loop->end == block);
           break;
         }
         // The list should be in same order as the final result.
-        DCHECK(l->block->rpo_number() == links + header->rpo_number());
+        DCHECK(block->rpo_number() == links + header->rpo_number());
         links++;
-        l = l->next;
+        block = block->rpo_next();
         DCHECK(links < static_cast<int>(2 * order->size()));  // cycle?
       }
       DCHECK(links > 0);
@@ -1016,23 +1003,18 @@ class SpecialRPONumberer : public ZoneObject {
 
   Zone* zone_;
   Schedule* schedule_;
-  BlockList* order_;
-  ZoneVector<LoopInfo> loops_;
+  BasicBlock* order_;
   BasicBlock* beyond_end_;
-  ZoneList<Backedge> backedges_;
+  ZoneVector<LoopInfo> loops_;
+  ZoneVector<Backedge> backedges_;
   ZoneVector<SpecialRPOStackFrame> stack_;
   size_t previous_block_count_;
 };
 
 
-BasicBlockVector* Scheduler::ComputeSpecialRPO(ZonePool* zone_pool,
-                                               Schedule* schedule) {
-  ZonePool::Scope zone_scope(zone_pool);
-  Zone* zone = zone_scope.zone();
-
+BasicBlockVector* Scheduler::ComputeSpecialRPO(Zone* zone, Schedule* schedule) {
   SpecialRPONumberer numberer(zone, schedule);
   numberer.ComputeSpecialRPO();
-  numberer.SerializeAOIntoSchedule();
   numberer.SerializeRPOIntoSchedule();
   numberer.PrintAndVerifySpecialRPO();
   return schedule->rpo_order();
@@ -1048,19 +1030,10 @@ void Scheduler::ComputeSpecialRPONumbering() {
 }
 
 
-void Scheduler::GenerateImmediateDominatorTree() {
-  Trace("--- IMMEDIATE BLOCK DOMINATORS -----------------------------\n");
-
-  // TODO(danno): Consider using Lengauer & Tarjan's if this becomes too slow.
-
-  // Build the block dominator tree.
-  schedule_->start()->set_dominator_depth(0);
-  typedef SpecialRPONumberer::BlockList BlockList;
-  for (BlockList* l = special_rpo_->order_; l != NULL; l = l->next) {
-    BasicBlock* current = l->block;
-    if (current == schedule_->start()) continue;
-    BasicBlock::Predecessors::iterator pred = current->predecessors_begin();
-    BasicBlock::Predecessors::iterator end = current->predecessors_end();
+void Scheduler::PropagateImmediateDominators(BasicBlock* block) {
+  for (/*nop*/; block != NULL; block = block->rpo_next()) {
+    BasicBlock::Predecessors::iterator pred = block->predecessors_begin();
+    BasicBlock::Predecessors::iterator end = block->predecessors_end();
     DCHECK(pred != end);  // All blocks except start have predecessors.
     BasicBlock* dominator = *pred;
     // For multiple predecessors, walk up the dominator tree until a common
@@ -1071,16 +1044,27 @@ void Scheduler::GenerateImmediateDominatorTree() {
       if ((*pred)->dominator_depth() < 0) continue;
       dominator = GetCommonDominator(dominator, *pred);
     }
-    current->set_dominator(dominator);
-    current->set_dominator_depth(dominator->dominator_depth() + 1);
+    block->set_dominator(dominator);
+    block->set_dominator_depth(dominator->dominator_depth() + 1);
     // Propagate "deferredness" of the dominator.
-    if (dominator->deferred()) current->set_deferred(true);
-    Trace("Block B%d's idom is B%d, depth = %d\n", current->id().ToInt(),
-          dominator->id().ToInt(), current->dominator_depth());
+    if (dominator->deferred()) block->set_deferred(true);
+    Trace("Block B%d's idom is B%d, depth = %d\n", block->id().ToInt(),
+          dominator->id().ToInt(), block->dominator_depth());
   }
 }
 
 
+void Scheduler::GenerateImmediateDominatorTree() {
+  Trace("--- IMMEDIATE BLOCK DOMINATORS -----------------------------\n");
+
+  // Seed start block to be the first dominator.
+  schedule_->start()->set_dominator_depth(0);
+
+  // Build the block dominator tree resulting from the above seed.
+  PropagateImmediateDominators(schedule_->start()->rpo_next());
+}
+
+
 // -----------------------------------------------------------------------------
 // Phase 3: Prepare use counts for nodes.
 
@@ -1163,7 +1147,6 @@ class ScheduleEarlyNodeVisitor {
 
     // Fixed nodes already know their schedule early position.
     if (scheduler_->GetPlacement(node) == Scheduler::kFixed) {
-      DCHECK_EQ(schedule_->start(), data->minimum_block_);
       data->minimum_block_ = schedule_->block(node);
       Trace("Fixing #%d:%s minimum_block = B%d, dominator_depth = %d\n",
             node->id(), node->op()->mnemonic(),
@@ -1260,9 +1243,7 @@ class ScheduleLateNodeVisitor {
  private:
   void ProcessQueue(Node* root) {
     ZoneQueue<Node*>* queue = &(scheduler_->schedule_queue_);
-    for (InputIter i = root->inputs().begin(); i != root->inputs().end(); ++i) {
-      Node* node = *i;
-
+    for (Node* node : root->inputs()) {
       // Don't schedule coupled nodes on their own.
       if (scheduler_->GetPlacement(node) == Scheduler::kCoupled) {
         node = NodeProperties::GetControlInput(node);
@@ -1335,9 +1316,8 @@ class ScheduleLateNodeVisitor {
 
   BasicBlock* GetCommonDominatorOfUses(Node* node) {
     BasicBlock* block = NULL;
-    Node::Uses uses = node->uses();
-    for (Node::Uses::iterator i = uses.begin(); i != uses.end(); ++i) {
-      BasicBlock* use_block = GetBlockForUse(i.edge());
+    for (Edge edge : node->use_edges()) {
+      BasicBlock* use_block = GetBlockForUse(edge);
       block = block == NULL ? use_block : use_block == NULL
                                               ? block
                                               : scheduler_->GetCommonDominator(
@@ -1346,7 +1326,7 @@ class ScheduleLateNodeVisitor {
     return block;
   }
 
-  BasicBlock* GetBlockForUse(Node::Edge edge) {
+  BasicBlock* GetBlockForUse(Edge edge) {
     Node* use = edge.from();
     IrOpcode::Value opcode = use->opcode();
     if (opcode == IrOpcode::kPhi || opcode == IrOpcode::kEffectPhi) {
@@ -1377,7 +1357,6 @@ class ScheduleLateNodeVisitor {
   }
 
   void ScheduleFloatingControl(BasicBlock* block, Node* node) {
-    DCHECK(scheduler_->GetData(node)->is_floating_control_);
     scheduler_->FuseFloatingControl(block, node);
   }
 
@@ -1416,7 +1395,6 @@ void Scheduler::SealFinalSchedule() {
   Trace("--- SEAL FINAL SCHEDULE ------------------------------------\n");
 
   // Serialize the assembly order and reverse-post-order numbering.
-  special_rpo_->SerializeAOIntoSchedule();
   special_rpo_->SerializeRPOIntoSchedule();
   special_rpo_->PrintAndVerifySpecialRPO();
 
@@ -1443,17 +1421,39 @@ void Scheduler::FuseFloatingControl(BasicBlock* block, Node* node) {
   }
 
   // Iterate on phase 1: Build control-flow graph.
-  CFGBuilder cfg_builder(zone_, this);
-  cfg_builder.Run(block, node);
+  control_flow_builder_->Run(block, node);
 
   // Iterate on phase 2: Compute special RPO and dominator tree.
   special_rpo_->UpdateSpecialRPO(block, schedule_->block(node));
   // TODO(mstarzinger): Currently "iterate on" means "re-run". Fix that.
-  for (BasicBlock* block : schedule_->all_blocks_) {
-    block->set_dominator_depth(-1);
-    block->set_dominator(NULL);
+  for (BasicBlock* b = block->rpo_next(); b != NULL; b = b->rpo_next()) {
+    b->set_dominator_depth(-1);
+    b->set_dominator(NULL);
+  }
+  PropagateImmediateDominators(block->rpo_next());
+
+  // Iterate on phase 4: Schedule nodes early.
+  // TODO(mstarzinger): The following loop gathering the propagation roots is a
+  // temporary solution and should be merged into the rest of the scheduler as
+  // soon as the approach settled for all floating loops.
+  NodeVector propagation_roots(control_flow_builder_->control_);
+  for (Node* node : control_flow_builder_->control_) {
+    for (Node* use : node->uses()) {
+      if (use->opcode() == IrOpcode::kPhi ||
+          use->opcode() == IrOpcode::kEffectPhi) {
+        propagation_roots.push_back(use);
+      }
+    }
+  }
+  if (FLAG_trace_turbo_scheduler) {
+    Trace("propagation roots: ");
+    for (Node* node : propagation_roots) {
+      Trace("#%d:%s ", node->id(), node->op()->mnemonic());
+    }
+    Trace("\n");
   }
-  GenerateImmediateDominatorTree();
+  ScheduleEarlyNodeVisitor schedule_early_visitor(zone_, this);
+  schedule_early_visitor.Run(&propagation_roots);
 
   // Move previously planned nodes.
   // TODO(mstarzinger): Improve that by supporting bulk moves.
index 2839a0c..9da0b6d 100644 (file)
@@ -16,6 +16,8 @@ namespace v8 {
 namespace internal {
 namespace compiler {
 
+class CFGBuilder;
+class ControlEquivalence;
 class SpecialRPONumberer;
 
 // Computes a schedule from a graph, placing nodes into basic blocks and
@@ -24,11 +26,10 @@ class Scheduler {
  public:
   // The complete scheduling algorithm. Creates a new schedule and places all
   // nodes from the graph into it.
-  static Schedule* ComputeSchedule(ZonePool* zone_pool, Graph* graph);
+  static Schedule* ComputeSchedule(Zone* zone, Graph* graph);
 
   // Compute the RPO of blocks in an existing schedule.
-  static BasicBlockVector* ComputeSpecialRPO(ZonePool* zone_pool,
-                                             Schedule* schedule);
+  static BasicBlockVector* ComputeSpecialRPO(Zone* zone, Schedule* schedule);
 
  private:
   // Placement of a node changes during scheduling. The placement state
@@ -48,9 +49,6 @@ class Scheduler {
   struct SchedulerData {
     BasicBlock* minimum_block_;  // Minimum legal RPO placement.
     int unscheduled_count_;      // Number of unscheduled uses of this node.
-    bool is_connected_control_;  // {true} if control-connected to the end node.
-    bool is_floating_control_;   // {true} if control, but not control-connected
-                                 // to the end node.
     Placement placement_;        // Whether the node is fixed, schedulable,
                                  // coupled to another node, or not yet known.
   };
@@ -62,7 +60,9 @@ class Scheduler {
   NodeVector schedule_root_nodes_;       // Fixed root nodes seed the worklist.
   ZoneQueue<Node*> schedule_queue_;      // Worklist of schedulable nodes.
   ZoneVector<SchedulerData> node_data_;  // Per-node data for all nodes.
+  CFGBuilder* control_flow_builder_;     // Builds basic blocks for controls.
   SpecialRPONumberer* special_rpo_;      // Special RPO numbering of blocks.
+  ControlEquivalence* equivalence_;      // Control dependence equivalence.
 
   Scheduler(Zone* zone, Graph* graph, Schedule* schedule);
 
@@ -77,6 +77,7 @@ class Scheduler {
   void DecrementUnscheduledUseCount(Node* node, int index, Node* from);
 
   BasicBlock* GetCommonDominator(BasicBlock* b1, BasicBlock* b2);
+  void PropagateImmediateDominators(BasicBlock* block);
 
   // Phase 1: Build control-flow graph.
   friend class CFGBuilder;
index 2e51d72..edecf58 100644 (file)
@@ -6,8 +6,8 @@
 
 #include "src/compiler/common-operator.h"
 #include "src/compiler/diamond.h"
-#include "src/compiler/generic-node-inl.h"
 #include "src/compiler/graph.h"
+#include "src/compiler/node.h"
 
 namespace v8 {
 namespace internal {
@@ -26,26 +26,61 @@ Reduction SelectLowering::Reduce(Node* node) {
   if (node->opcode() != IrOpcode::kSelect) return NoChange();
   SelectParameters const p = SelectParametersOf(node->op());
 
-  Node* const cond = node->InputAt(0);
+  Node* cond = node->InputAt(0);
+  Node* vthen = node->InputAt(1);
+  Node* velse = node->InputAt(2);
+  Node* merge = nullptr;
 
   // Check if we already have a diamond for this condition.
-  auto i = merges_.find(cond);
-  if (i == merges_.end()) {
-    // Create a new diamond for this condition and remember its merge node.
-    Diamond d(graph(), common(), cond, p.hint());
-    i = merges_.insert(std::make_pair(cond, d.merge)).first;
-  }
+  auto range = merges_.equal_range(cond);
+  for (auto i = range.first;; ++i) {
+    if (i == range.second) {
+      // Create a new diamond for this condition and remember its merge node.
+      Diamond d(graph(), common(), cond, p.hint());
+      merges_.insert(std::make_pair(cond, d.merge));
+      merge = d.merge;
+      break;
+    }
 
-  DCHECK_EQ(cond, i->first);
+    // If the diamond is reachable from the Select, merging them would result in
+    // an unschedulable graph, so we cannot reuse the diamond in that case.
+    merge = i->second;
+    if (!ReachableFrom(merge, node)) {
+      break;
+    }
+  }
 
   // Create a Phi hanging off the previously determined merge.
   node->set_op(common()->Phi(p.type(), 2));
-  node->ReplaceInput(0, node->InputAt(1));
-  node->ReplaceInput(1, node->InputAt(2));
-  node->ReplaceInput(2, i->second);
+  node->ReplaceInput(0, vthen);
+  node->ReplaceInput(1, velse);
+  node->ReplaceInput(2, merge);
   return Changed(node);
 }
 
+
+bool SelectLowering::ReachableFrom(Node* const sink, Node* const source) {
+  // TODO(turbofan): This is probably horribly expensive, and it should be moved
+  // into node.h or somewhere else?!
+  Zone zone(graph()->zone()->isolate());
+  std::queue<Node*, NodeDeque> queue((NodeDeque(&zone)));
+  BoolVector visited(graph()->NodeCount(), false, &zone);
+  queue.push(source);
+  visited[source->id()] = true;
+  while (!queue.empty()) {
+    Node* current = queue.front();
+    if (current == sink) return true;
+    queue.pop();
+    for (auto input : current->inputs()) {
+      if (!visited[input->id()]) {
+        queue.push(input);
+        visited[input->id()] = true;
+      }
+    }
+  }
+  return false;
+}
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
index ae22cad..05ea0e0 100644 (file)
@@ -28,8 +28,10 @@ class SelectLowering FINAL : public Reducer {
   Reduction Reduce(Node* node) OVERRIDE;
 
  private:
-  typedef std::map<Node*, Node*, std::less<Node*>,
-                   zone_allocator<std::pair<Node* const, Node*>>> Merges;
+  typedef std::multimap<Node*, Node*, std::less<Node*>,
+                        zone_allocator<std::pair<Node* const, Node*>>> Merges;
+
+  bool ReachableFrom(Node* const sink, Node* const source);
 
   CommonOperatorBuilder* common() const { return common_; }
   Graph* graph() const { return graph_; }
index c50b338..bb57614 100644 (file)
@@ -11,6 +11,7 @@
 #include "src/compiler/common-operator.h"
 #include "src/compiler/diamond.h"
 #include "src/compiler/graph-inl.h"
+#include "src/compiler/linkage.h"
 #include "src/compiler/node-matchers.h"
 #include "src/compiler/node-properties-inl.h"
 #include "src/compiler/representation-change.h"
@@ -70,7 +71,6 @@ class RepresentationSelector {
         info_(zone->NewArray<NodeInfo>(count_)),
         nodes_(zone),
         replacements_(zone),
-        contains_js_nodes_(false),
         phase_(PROPAGATE),
         changer_(changer),
         queue_(zone) {
@@ -181,7 +181,7 @@ class RepresentationSelector {
     } else {
       // In the change phase, insert a change before the use if necessary.
       MachineTypeUnion output = GetInfo(input)->output;
-      if ((output & kRepWord32) == 0) {
+      if ((output & (kRepBit | kRepWord8 | kRepWord16 | kRepWord32)) == 0) {
         // Output representation doesn't match usage.
         TRACE(("  truncate-to-int32: #%d:%s(@%d #%d:%s) ", node->id(),
                node->op()->mnemonic(), index, input->id(),
@@ -239,19 +239,19 @@ class RepresentationSelector {
   // context, effect, and control inputs, assuming that value inputs should have
   // {kRepTagged} representation and can observe all output values {kTypeAny}.
   void VisitInputs(Node* node) {
-    InputIter i = node->inputs().begin();
+    auto i = node->input_edges().begin();
     for (int j = node->op()->ValueInputCount(); j > 0; ++i, j--) {
-      ProcessInput(node, i.index(), kMachAnyTagged);  // Value inputs
+      ProcessInput(node, (*i).index(), kMachAnyTagged);  // Value inputs
     }
     for (int j = OperatorProperties::GetContextInputCount(node->op()); j > 0;
          ++i, j--) {
-      ProcessInput(node, i.index(), kMachAnyTagged);  // Context inputs
+      ProcessInput(node, (*i).index(), kMachAnyTagged);  // Context inputs
     }
     for (int j = node->op()->EffectInputCount(); j > 0; ++i, j--) {
-      Enqueue(*i);  // Effect inputs: just visit
+      Enqueue((*i).to());  // Effect inputs: just visit
     }
     for (int j = node->op()->ControlInputCount(); j > 0; ++i, j--) {
-      Enqueue(*i);  // Control inputs: just visit
+      Enqueue((*i).to());  // Control inputs: just visit
     }
     SetOutput(node, kMachAnyTagged);
   }
@@ -381,21 +381,19 @@ class RepresentationSelector {
       }
 
       // Convert inputs to the output representation of this phi.
-      Node::Inputs inputs = node->inputs();
-      for (Node::Inputs::iterator iter(inputs.begin()); iter != inputs.end();
-           ++iter, --values) {
+      for (Edge const edge : node->input_edges()) {
         // TODO(titzer): it'd be nice to have distinguished edge kinds here.
-        ProcessInput(node, iter.index(), values > 0 ? output_type : 0);
+        ProcessInput(node, edge.index(), values > 0 ? output_type : 0);
+        values--;
       }
     } else {
       // Propagate {use} of the phi to value inputs, and 0 to control.
-      Node::Inputs inputs = node->inputs();
       MachineType use_type =
           static_cast<MachineType>((use & kTypeMask) | output);
-      for (Node::Inputs::iterator iter(inputs.begin()); iter != inputs.end();
-           ++iter, --values) {
+      for (Edge const edge : node->input_edges()) {
         // TODO(titzer): it'd be nice to have distinguished edge kinds here.
-        ProcessInput(node, iter.index(), values > 0 ? use_type : 0);
+        ProcessInput(node, edge.index(), values > 0 ? use_type : 0);
+        values--;
       }
     }
   }
@@ -517,7 +515,6 @@ class RepresentationSelector {
 #define DEFINE_JS_CASE(x) case IrOpcode::k##x:
         JS_OP_LIST(DEFINE_JS_CASE)
 #undef DEFINE_JS_CASE
-        contains_js_nodes_ = true;
         VisitInputs(node);
         return SetOutput(node, kRepTagged);
 
@@ -672,8 +669,11 @@ class RepresentationSelector {
           VisitUnop(node, kTypeInt32 | use_rep, kTypeInt32 | use_rep);
           if (lower()) DeferReplacement(node, node->InputAt(0));
         } else if ((in & kTypeMask) == kTypeUint32 ||
-                   (in & kTypeMask) == kTypeInt32 ||
-                   in_upper->Is(Type::Unsigned32()) ||
+                   in_upper->Is(Type::Unsigned32())) {
+          // Just change representation if necessary.
+          VisitUnop(node, kTypeUint32 | kRepWord32, kTypeInt32 | kRepWord32);
+          if (lower()) DeferReplacement(node, node->InputAt(0));
+        } else if ((in & kTypeMask) == kTypeInt32 ||
                    (in & kRepMask) == kRepWord32) {
           // Just change representation if necessary.
           VisitUnop(node, kTypeInt32 | kRepWord32, kTypeInt32 | kRepWord32);
@@ -697,12 +697,15 @@ class RepresentationSelector {
           VisitUnop(node, kTypeUint32 | use_rep, kTypeUint32 | use_rep);
           if (lower()) DeferReplacement(node, node->InputAt(0));
         } else if ((in & kTypeMask) == kTypeUint32 ||
-                   (in & kTypeMask) == kTypeInt32 ||
-                   in_upper->Is(Type::Signed32()) ||
-                   (in & kRepMask) == kRepWord32) {
+                   in_upper->Is(Type::Unsigned32())) {
           // Just change representation if necessary.
           VisitUnop(node, kTypeUint32 | kRepWord32, kTypeUint32 | kRepWord32);
           if (lower()) DeferReplacement(node, node->InputAt(0));
+        } else if ((in & kTypeMask) == kTypeInt32 ||
+                   (in & kRepMask) == kRepWord32) {
+          // Just change representation if necessary.
+          VisitUnop(node, kTypeInt32 | kRepWord32, kTypeUint32 | kRepWord32);
+          if (lower()) DeferReplacement(node, node->InputAt(0));
         } else {
           // Require the input in float64 format and perform truncation.
           // TODO(turbofan): avoid a truncation with a smi check.
@@ -754,30 +757,58 @@ class RepresentationSelector {
         if (lower()) lowering->DoStoreField(node);
         break;
       }
-      case IrOpcode::kLoadElement: {
-        ElementAccess access = ElementAccessOf(node->op());
-        ProcessInput(node, 0, changer_->TypeForBasePointer(access));
-        ProcessInput(node, 1, kMachInt32);  // element index
+      case IrOpcode::kLoadBuffer: {
+        BufferAccess access = BufferAccessOf(node->op());
+        ProcessInput(node, 0, kMachPtr);    // buffer
+        ProcessInput(node, 1, kMachInt32);  // offset
         ProcessInput(node, 2, kMachInt32);  // length
         ProcessRemainingInputs(node, 3);
         // Tagged overrides everything if we have to do a typed array bounds
         // check, because we may need to return undefined then.
-        MachineType output_type =
-            (access.bounds_check == kTypedArrayBoundsCheck &&
-             (use & kRepTagged))
-                ? kMachAnyTagged
-                : access.machine_type;
+        MachineType output_type;
+        if (use & kRepTagged) {
+          output_type = kMachAnyTagged;
+        } else if (use & kRepFloat64) {
+          if (access.machine_type() & kRepFloat32) {
+            output_type = access.machine_type();
+          } else {
+            output_type = kMachFloat64;
+          }
+        } else if (use & kRepFloat32) {
+          output_type = kMachFloat32;
+        } else {
+          output_type = access.machine_type();
+        }
         SetOutput(node, output_type);
-        if (lower()) lowering->DoLoadElement(node, output_type);
+        if (lower()) lowering->DoLoadBuffer(node, output_type, changer_);
+        break;
+      }
+      case IrOpcode::kStoreBuffer: {
+        BufferAccess access = BufferAccessOf(node->op());
+        ProcessInput(node, 0, kMachPtr);               // buffer
+        ProcessInput(node, 1, kMachInt32);             // offset
+        ProcessInput(node, 2, kMachInt32);             // length
+        ProcessInput(node, 3, access.machine_type());  // value
+        ProcessRemainingInputs(node, 4);
+        SetOutput(node, 0);
+        if (lower()) lowering->DoStoreBuffer(node);
+        break;
+      }
+      case IrOpcode::kLoadElement: {
+        ElementAccess access = ElementAccessOf(node->op());
+        ProcessInput(node, 0, changer_->TypeForBasePointer(access));  // base
+        ProcessInput(node, 1, kMachInt32);                            // index
+        ProcessRemainingInputs(node, 2);
+        SetOutput(node, access.machine_type);
+        if (lower()) lowering->DoLoadElement(node);
         break;
       }
       case IrOpcode::kStoreElement: {
         ElementAccess access = ElementAccessOf(node->op());
-        ProcessInput(node, 0, changer_->TypeForBasePointer(access));
-        ProcessInput(node, 1, kMachInt32);  // element index
-        ProcessInput(node, 2, kMachInt32);  // length
-        ProcessInput(node, 3, access.machine_type);
-        ProcessRemainingInputs(node, 4);
+        ProcessInput(node, 0, changer_->TypeForBasePointer(access));  // base
+        ProcessInput(node, 1, kMachInt32);                            // index
+        ProcessInput(node, 2, access.machine_type);                   // value
+        ProcessRemainingInputs(node, 3);
         SetOutput(node, 0);
         if (lower()) lowering->DoStoreElement(node);
         break;
@@ -1000,7 +1031,6 @@ class RepresentationSelector {
   NodeInfo* info_;                  // node id -> usage information
   NodeVector nodes_;                // collected nodes
   NodeVector replacements_;         // replacements to be done after lowering
-  bool contains_js_nodes_;          // {true} if a JS operator was seen
   Phase phase_;                     // current phase of algorithm
   RepresentationChanger* changer_;  // for inserting representation changes
   ZoneQueue<Node*> queue_;          // queue for traversing the graph
@@ -1107,156 +1137,95 @@ Node* SimplifiedLowering::ComputeIndex(const ElementAccess& access,
 }
 
 
-namespace {
-
-intptr_t AddressForOutOfBoundsLoad(MachineType type) {
-  switch (RepresentationOf(type)) {
-    case kRepFloat32: {
-      static const float dummy = std::numeric_limits<float>::quiet_NaN();
-      return bit_cast<intptr_t>(&dummy);
-    }
-    case kRepFloat64: {
-      static const double dummy = std::numeric_limits<double>::quiet_NaN();
-      return bit_cast<intptr_t>(&dummy);
-    }
-    case kRepBit:
-    case kRepWord8:
-    case kRepWord16:
-    case kRepWord32: {
-      static const int32_t dummy = 0;
-      return bit_cast<intptr_t>(&dummy);
+void SimplifiedLowering::DoLoadBuffer(Node* node, MachineType output_type,
+                                      RepresentationChanger* changer) {
+  DCHECK_EQ(IrOpcode::kLoadBuffer, node->opcode());
+  DCHECK_NE(kMachNone, RepresentationOf(output_type));
+  MachineType const type = BufferAccessOf(node->op()).machine_type();
+  if (output_type != type) {
+    Node* const buffer = node->InputAt(0);
+    Node* const offset = node->InputAt(1);
+    Node* const length = node->InputAt(2);
+    Node* const effect = node->InputAt(3);
+    Node* const control = node->InputAt(4);
+    Node* const index =
+        machine()->Is64()
+            ? graph()->NewNode(machine()->ChangeUint32ToUint64(), offset)
+            : offset;
+
+    Node* check = graph()->NewNode(machine()->Uint32LessThan(), offset, length);
+    Node* branch =
+        graph()->NewNode(common()->Branch(BranchHint::kTrue), check, control);
+
+    Node* if_true = graph()->NewNode(common()->IfTrue(), branch);
+    Node* etrue =
+        graph()->NewNode(machine()->Load(type), buffer, index, effect, if_true);
+    Node* vtrue = changer->GetRepresentationFor(etrue, type, output_type);
+
+    Node* if_false = graph()->NewNode(common()->IfFalse(), branch);
+    Node* efalse = effect;
+    Node* vfalse;
+    if (output_type & kRepTagged) {
+      vfalse = jsgraph()->UndefinedConstant();
+    } else if (output_type & kRepFloat64) {
+      vfalse =
+          jsgraph()->Float64Constant(std::numeric_limits<double>::quiet_NaN());
+    } else if (output_type & kRepFloat32) {
+      vfalse =
+          jsgraph()->Float32Constant(std::numeric_limits<float>::quiet_NaN());
+    } else {
+      vfalse = jsgraph()->Int32Constant(0);
     }
-    default:
-      break;
+
+    Node* merge = graph()->NewNode(common()->Merge(2), if_true, if_false);
+    Node* ephi = graph()->NewNode(common()->EffectPhi(2), etrue, efalse, merge);
+
+    // Replace effect uses of {node} with the {ephi}.
+    NodeProperties::ReplaceWithValue(node, node, ephi);
+
+    // Turn the {node} into a Phi.
+    node->set_op(common()->Phi(output_type, 2));
+    node->ReplaceInput(0, vtrue);
+    node->ReplaceInput(1, vfalse);
+    node->ReplaceInput(2, merge);
+    node->TrimInputCount(3);
+  } else {
+    node->set_op(machine()->CheckedLoad(type));
   }
-  UNREACHABLE();
-  return 0;
 }
 
 
-intptr_t AddressForOutOfBoundsStore() {
-  static volatile double dummy = 0;
-  return bit_cast<intptr_t>(&dummy);
+void SimplifiedLowering::DoStoreBuffer(Node* node) {
+  DCHECK_EQ(IrOpcode::kStoreBuffer, node->opcode());
+  MachineType const type = BufferAccessOf(node->op()).machine_type();
+  node->set_op(machine()->CheckedStore(type));
 }
 
-}  // namespace
-
 
-void SimplifiedLowering::DoLoadElement(Node* node, MachineType output_type) {
+void SimplifiedLowering::DoLoadElement(Node* node) {
   const ElementAccess& access = ElementAccessOf(node->op());
-  const Operator* op = machine()->Load(access.machine_type);
-  Node* key = node->InputAt(1);
-  Node* index = ComputeIndex(access, key);
-  Node* effect = node->InputAt(3);
-  if (access.bounds_check == kNoBoundsCheck) {
-    DCHECK_EQ(access.machine_type, output_type);
-    node->set_op(op);
-    node->ReplaceInput(1, index);
-    node->ReplaceInput(2, effect);
-    node->ReplaceInput(3, graph()->start());
-  } else {
-    DCHECK_EQ(kTypedArrayBoundsCheck, access.bounds_check);
-
-    Node* base = node->InputAt(0);
-    Node* length = node->InputAt(2);
-    Node* check = graph()->NewNode(machine()->Uint32LessThan(), key, length);
-
-    IntPtrMatcher mbase(base);
-    if (mbase.HasValue() && (output_type & kRepTagged) == 0) {
-      Node* select = graph()->NewNode(
-          common()->Select(kMachIntPtr, BranchHint::kTrue), check, index,
-          jsgraph()->IntPtrConstant(AddressForOutOfBoundsLoad(output_type) -
-                                    mbase.Value()));
-
-      node->set_op(op);
-      node->ReplaceInput(1, select);
-      node->ReplaceInput(2, effect);
-      node->ReplaceInput(3, graph()->start());
-    } else {
-      Diamond d(graph(), common(), check, BranchHint::kTrue);
-
-      Node* load = graph()->NewNode(op, base, index, effect, d.if_true);
-      Node* result = load;
-      if (output_type & kRepTagged) {
-        // TODO(turbofan): This is ugly as hell!
-        SimplifiedOperatorBuilder simplified(graph()->zone());
-        RepresentationChanger changer(jsgraph(), &simplified,
-                                      graph()->zone()->isolate());
-        result =
-            changer.GetTaggedRepresentationFor(result, access.machine_type);
-      }
-
-      Node* undefined;
-      if (output_type & kRepTagged) {
-        DCHECK_EQ(0, access.machine_type & kRepTagged);
-        undefined = jsgraph()->UndefinedConstant();
-      } else if (output_type & kRepFloat32) {
-        undefined =
-            jsgraph()->Float32Constant(std::numeric_limits<float>::quiet_NaN());
-      } else if (output_type & kRepFloat64) {
-        undefined = jsgraph()->Float64Constant(
-            std::numeric_limits<double>::quiet_NaN());
-      } else {
-        undefined = jsgraph()->Int32Constant(0);
-      }
-
-      // Replace effect uses of node with the effect phi.
-      NodeProperties::ReplaceWithValue(node, node, d.EffectPhi(load, effect));
-
-      d.OverwriteWithPhi(node, output_type, result, undefined);
-    }
-  }
+  node->set_op(machine()->Load(access.machine_type));
+  node->ReplaceInput(1, ComputeIndex(access, node->InputAt(1)));
 }
 
 
 void SimplifiedLowering::DoStoreElement(Node* node) {
   const ElementAccess& access = ElementAccessOf(node->op());
-  const Operator* op = machine()->Store(StoreRepresentation(
+  node->set_op(machine()->Store(StoreRepresentation(
       access.machine_type,
       ComputeWriteBarrierKind(access.base_is_tagged, access.machine_type,
-                              access.type)));
-  Node* key = node->InputAt(1);
-  Node* index = ComputeIndex(access, key);
-  if (access.bounds_check == kNoBoundsCheck) {
-    node->set_op(op);
-    node->ReplaceInput(1, index);
-    node->RemoveInput(2);
-  } else {
-    DCHECK_EQ(kTypedArrayBoundsCheck, access.bounds_check);
-
-    Node* base = node->InputAt(0);
-    Node* length = node->InputAt(2);
-    Node* value = node->InputAt(3);
-    Node* effect = node->InputAt(4);
-    Node* control = node->InputAt(5);
-    Node* check = graph()->NewNode(machine()->Uint32LessThan(), key, length);
-
-    IntPtrMatcher mbase(base);
-    if (mbase.HasValue()) {
-      Node* select = graph()->NewNode(
-          common()->Select(kMachIntPtr, BranchHint::kTrue), check, index,
-          jsgraph()->IntPtrConstant(AddressForOutOfBoundsStore() -
-                                    mbase.Value()));
-
-      node->set_op(op);
-      node->ReplaceInput(1, select);
-      node->RemoveInput(2);
-    } else {
-      Diamond d(graph(), common(), check, BranchHint::kTrue);
-      d.Chain(control);
-      Node* store = graph()->NewNode(op, base, index, value, effect, d.if_true);
-      d.OverwriteWithEffectPhi(node, store, effect);
-    }
-  }
+                              access.type))));
+  node->ReplaceInput(1, ComputeIndex(access, node->InputAt(1)));
 }
 
 
 void SimplifiedLowering::DoStringAdd(Node* node) {
+  Operator::Properties properties = node->op()->properties();
   Callable callable = CodeFactory::StringAdd(
       zone()->isolate(), STRING_ADD_CHECK_NONE, NOT_TENURED);
   CallDescriptor::Flags flags = CallDescriptor::kNoFlags;
-  CallDescriptor* desc =
-      Linkage::GetStubCallDescriptor(callable.descriptor(), 0, flags, zone());
+  CallDescriptor* desc = Linkage::GetStubCallDescriptor(
+      callable.descriptor(), 0, flags, properties, zone());
   node->set_op(common()->Call(desc));
   node->InsertInput(graph()->zone(), 0,
                     jsgraph()->HeapConstant(callable.code()));
@@ -1319,28 +1288,98 @@ Node* SimplifiedLowering::Int32Div(Node* const node) {
 Node* SimplifiedLowering::Int32Mod(Node* const node) {
   Int32BinopMatcher m(node);
   Node* const zero = jsgraph()->Int32Constant(0);
+  Node* const minus_one = jsgraph()->Int32Constant(-1);
   Node* const lhs = m.left().node();
   Node* const rhs = m.right().node();
 
   if (m.right().Is(-1) || m.right().Is(0)) {
     return zero;
-  } else if (machine()->Int32ModIsSafe() || m.right().HasValue()) {
+  } else if (m.right().HasValue()) {
     return graph()->NewNode(machine()->Int32Mod(), lhs, rhs, graph()->start());
   }
 
-  Diamond if_zero(graph(), common(),
-                  graph()->NewNode(machine()->Word32Equal(), rhs, zero),
-                  BranchHint::kFalse);
+  // General case for signed integer modulus, with optimization for (unknown)
+  // power of 2 right hand side.
+  //
+  //   if 0 < rhs then
+  //     msk = rhs - 1
+  //     if rhs & msk != 0 then
+  //       lhs % rhs
+  //     else
+  //       if lhs < 0 then
+  //         -(-lhs & msk)
+  //       else
+  //         lhs & msk
+  //   else
+  //     if rhs < -1 then
+  //       lhs % rhs
+  //     else
+  //       zero
+  //
+  // Note: We do not use the Diamond helper class here, because it really hurts
+  // readability with nested diamonds.
+  const Operator* const merge_op = common()->Merge(2);
+  const Operator* const phi_op = common()->Phi(kMachInt32, 2);
+
+  Node* check0 = graph()->NewNode(machine()->Int32LessThan(), zero, rhs);
+  Node* branch0 = graph()->NewNode(common()->Branch(BranchHint::kTrue), check0,
+                                   graph()->start());
+
+  Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+  Node* true0;
+  {
+    Node* msk = graph()->NewNode(machine()->Int32Add(), rhs, minus_one);
+
+    Node* check1 = graph()->NewNode(machine()->Word32And(), rhs, msk);
+    Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_true0);
+
+    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+    Node* true1 = graph()->NewNode(machine()->Int32Mod(), lhs, rhs, if_true1);
+
+    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+    Node* false1;
+    {
+      Node* check2 = graph()->NewNode(machine()->Int32LessThan(), lhs, zero);
+      Node* branch2 = graph()->NewNode(common()->Branch(BranchHint::kFalse),
+                                       check2, if_false1);
+
+      Node* if_true2 = graph()->NewNode(common()->IfTrue(), branch2);
+      Node* true2 = graph()->NewNode(
+          machine()->Int32Sub(), zero,
+          graph()->NewNode(machine()->Word32And(),
+                           graph()->NewNode(machine()->Int32Sub(), zero, lhs),
+                           msk));
+
+      Node* if_false2 = graph()->NewNode(common()->IfFalse(), branch2);
+      Node* false2 = graph()->NewNode(machine()->Word32And(), lhs, msk);
+
+      if_false1 = graph()->NewNode(merge_op, if_true2, if_false2);
+      false1 = graph()->NewNode(phi_op, true2, false2, if_false1);
+    }
 
-  Diamond if_minus_one(graph(), common(),
-                       graph()->NewNode(machine()->Word32Equal(), rhs,
-                                        jsgraph()->Int32Constant(-1)),
-                       BranchHint::kFalse);
-  if_minus_one.Nest(if_zero, false);
-  Node* mod =
-      graph()->NewNode(machine()->Int32Mod(), lhs, rhs, if_minus_one.if_false);
+    if_true0 = graph()->NewNode(merge_op, if_true1, if_false1);
+    true0 = graph()->NewNode(phi_op, true1, false1, if_true0);
+  }
+
+  Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+  Node* false0;
+  {
+    Node* check1 = graph()->NewNode(machine()->Int32LessThan(), rhs, minus_one);
+    Node* branch1 = graph()->NewNode(common()->Branch(BranchHint::kTrue),
+                                     check1, if_false0);
+
+    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+    Node* true1 = graph()->NewNode(machine()->Int32Mod(), lhs, rhs, if_true1);
 
-  return if_zero.Phi(kMachInt32, zero, if_minus_one.Phi(kMachInt32, zero, mod));
+    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+    Node* false1 = zero;
+
+    if_false0 = graph()->NewNode(merge_op, if_true1, if_false1);
+    false0 = graph()->NewNode(phi_op, true1, false1, if_false0);
+  }
+
+  Node* merge0 = graph()->NewNode(merge_op, if_true0, if_false0);
+  return graph()->NewNode(phi_op, true0, false0, merge0);
 }
 
 
@@ -1365,20 +1404,60 @@ Node* SimplifiedLowering::Uint32Div(Node* const node) {
 
 Node* SimplifiedLowering::Uint32Mod(Node* const node) {
   Uint32BinopMatcher m(node);
+  Node* const minus_one = jsgraph()->Int32Constant(-1);
   Node* const zero = jsgraph()->Uint32Constant(0);
   Node* const lhs = m.left().node();
   Node* const rhs = m.right().node();
 
   if (m.right().Is(0)) {
     return zero;
-  } else if (machine()->Uint32ModIsSafe() || m.right().HasValue()) {
+  } else if (m.right().HasValue()) {
     return graph()->NewNode(machine()->Uint32Mod(), lhs, rhs, graph()->start());
   }
 
-  Node* check = graph()->NewNode(machine()->Word32Equal(), rhs, zero);
-  Diamond d(graph(), common(), check, BranchHint::kFalse);
-  Node* mod = graph()->NewNode(machine()->Uint32Mod(), lhs, rhs, d.if_false);
-  return d.Phi(kMachUint32, zero, mod);
+  // General case for unsigned integer modulus, with optimization for (unknown)
+  // power of 2 right hand side.
+  //
+  //   if rhs then
+  //     msk = rhs - 1
+  //     if rhs & msk != 0 then
+  //       lhs % rhs
+  //     else
+  //       lhs & msk
+  //   else
+  //     zero
+  //
+  // Note: We do not use the Diamond helper class here, because it really hurts
+  // readability with nested diamonds.
+  const Operator* const merge_op = common()->Merge(2);
+  const Operator* const phi_op = common()->Phi(kMachInt32, 2);
+
+  Node* branch0 = graph()->NewNode(common()->Branch(BranchHint::kTrue), rhs,
+                                   graph()->start());
+
+  Node* if_true0 = graph()->NewNode(common()->IfTrue(), branch0);
+  Node* true0;
+  {
+    Node* msk = graph()->NewNode(machine()->Int32Add(), rhs, minus_one);
+
+    Node* check1 = graph()->NewNode(machine()->Word32And(), rhs, msk);
+    Node* branch1 = graph()->NewNode(common()->Branch(), check1, if_true0);
+
+    Node* if_true1 = graph()->NewNode(common()->IfTrue(), branch1);
+    Node* true1 = graph()->NewNode(machine()->Uint32Mod(), lhs, rhs, if_true1);
+
+    Node* if_false1 = graph()->NewNode(common()->IfFalse(), branch1);
+    Node* false1 = graph()->NewNode(machine()->Word32And(), lhs, msk);
+
+    if_true0 = graph()->NewNode(merge_op, if_true1, if_false1);
+    true0 = graph()->NewNode(phi_op, true1, false1, if_true0);
+  }
+
+  Node* if_false0 = graph()->NewNode(common()->IfFalse(), branch0);
+  Node* false0 = zero;
+
+  Node* merge0 = graph()->NewNode(merge_op, if_true0, if_false0);
+  return graph()->NewNode(phi_op, true0, false0, merge0);
 }
 
 
index 0e8d5aa..852ac7e 100644 (file)
@@ -14,6 +14,10 @@ namespace v8 {
 namespace internal {
 namespace compiler {
 
+// Forward declarations.
+class RepresentationChanger;
+
+
 class SimplifiedLowering FINAL {
  public:
   explicit SimplifiedLowering(JSGraph* jsgraph) : jsgraph_(jsgraph) {}
@@ -26,7 +30,10 @@ class SimplifiedLowering FINAL {
   void DoStoreField(Node* node);
   // TODO(turbofan): The output_type can be removed once the result of the
   // representation analysis is stored in the node bounds.
-  void DoLoadElement(Node* node, MachineType output_type);
+  void DoLoadBuffer(Node* node, MachineType output_type,
+                    RepresentationChanger* changer);
+  void DoStoreBuffer(Node* node);
+  void DoLoadElement(Node* node);
   void DoStoreElement(Node* node);
   void DoStringAdd(Node* node);
   void DoStringEqual(Node* node);
index a1a6a02..0868cab 100644 (file)
@@ -2,7 +2,6 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/compiler/generic-node-inl.h"
 #include "src/compiler/js-graph.h"
 #include "src/compiler/machine-operator.h"
 #include "src/compiler/node-matchers.h"
@@ -99,38 +98,6 @@ Reduction SimplifiedOperatorReducer::Reduce(Node* node) {
       if (m.HasValue()) return ReplaceNumber(FastUI2D(m.Value()));
       break;
     }
-    case IrOpcode::kLoadElement: {
-      ElementAccess access = ElementAccessOf(node->op());
-      if (access.bounds_check == kTypedArrayBoundsCheck) {
-        NumberMatcher mkey(node->InputAt(1));
-        NumberMatcher mlength(node->InputAt(2));
-        if (mkey.HasValue() && mlength.HasValue()) {
-          // Skip the typed array bounds check if key and length are constant.
-          if (mkey.Value() >= 0 && mkey.Value() < mlength.Value()) {
-            access.bounds_check = kNoBoundsCheck;
-            node->set_op(simplified()->LoadElement(access));
-            return Changed(node);
-          }
-        }
-      }
-      break;
-    }
-    case IrOpcode::kStoreElement: {
-      ElementAccess access = ElementAccessOf(node->op());
-      if (access.bounds_check == kTypedArrayBoundsCheck) {
-        NumberMatcher mkey(node->InputAt(1));
-        NumberMatcher mlength(node->InputAt(2));
-        if (mkey.HasValue() && mlength.HasValue()) {
-          // Skip the typed array bounds check if key and length are constant.
-          if (mkey.Value() >= 0 && mkey.Value() < mlength.Value()) {
-            access.bounds_check = kNoBoundsCheck;
-            node->set_op(simplified()->StoreElement(access));
-            return Changed(node);
-          }
-        }
-      }
-      break;
-    }
     default:
       break;
   }
index 88e9311..8f6c9aa 100644 (file)
@@ -23,9 +23,9 @@ class MachineOperatorBuilder;
 class SimplifiedOperatorReducer FINAL : public Reducer {
  public:
   explicit SimplifiedOperatorReducer(JSGraph* jsgraph);
-  virtual ~SimplifiedOperatorReducer();
+  ~SimplifiedOperatorReducer() FINAL;
 
-  virtual Reduction Reduce(Node* node) OVERRIDE;
+  Reduction Reduce(Node* node) FINAL;
 
  private:
   Reduction Change(Node* node, const Operator* op, Node* a);
index 15b34f9..e082a4b 100644 (file)
@@ -25,6 +25,64 @@ std::ostream& operator<<(std::ostream& os, BaseTaggedness base_taggedness) {
 }
 
 
+MachineType BufferAccess::machine_type() const {
+  switch (external_array_type_) {
+    case kExternalUint8Array:
+    case kExternalUint8ClampedArray:
+      return kMachUint8;
+    case kExternalInt8Array:
+      return kMachInt8;
+    case kExternalUint16Array:
+      return kMachUint16;
+    case kExternalInt16Array:
+      return kMachInt16;
+    case kExternalUint32Array:
+      return kMachUint32;
+    case kExternalInt32Array:
+      return kMachInt32;
+    case kExternalFloat32Array:
+      return kMachFloat32;
+    case kExternalFloat64Array:
+      return kMachFloat64;
+  }
+  UNREACHABLE();
+  return kMachNone;
+}
+
+
+bool operator==(BufferAccess lhs, BufferAccess rhs) {
+  return lhs.external_array_type() == rhs.external_array_type();
+}
+
+
+bool operator!=(BufferAccess lhs, BufferAccess rhs) { return !(lhs == rhs); }
+
+
+size_t hash_value(BufferAccess access) {
+  return base::hash<ExternalArrayType>()(access.external_array_type());
+}
+
+
+std::ostream& operator<<(std::ostream& os, BufferAccess access) {
+  switch (access.external_array_type()) {
+#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) \
+  case kExternal##Type##Array:                          \
+    return os << #Type;
+    TYPED_ARRAYS(TYPED_ARRAY_CASE)
+#undef TYPED_ARRAY_CASE
+  }
+  UNREACHABLE();
+  return os;
+}
+
+
+BufferAccess const BufferAccessOf(const Operator* op) {
+  DCHECK(op->opcode() == IrOpcode::kLoadBuffer ||
+         op->opcode() == IrOpcode::kStoreBuffer);
+  return OpParameter<BufferAccess>(op);
+}
+
+
 bool operator==(FieldAccess const& lhs, FieldAccess const& rhs) {
   return lhs.base_is_tagged == rhs.base_is_tagged && lhs.offset == rhs.offset &&
          lhs.machine_type == rhs.machine_type;
@@ -57,18 +115,6 @@ std::ostream& operator<<(std::ostream& os, FieldAccess const& access) {
 }
 
 
-std::ostream& operator<<(std::ostream& os, BoundsCheckMode bounds_check_mode) {
-  switch (bounds_check_mode) {
-    case kNoBoundsCheck:
-      return os << "no bounds check";
-    case kTypedArrayBoundsCheck:
-      return os << "ignore out of bounds";
-  }
-  UNREACHABLE();
-  return os;
-}
-
-
 bool operator==(ElementAccess const& lhs, ElementAccess const& rhs) {
   return lhs.base_is_tagged == rhs.base_is_tagged &&
          lhs.header_size == rhs.header_size &&
@@ -90,7 +136,7 @@ size_t hash_value(ElementAccess const& access) {
 std::ostream& operator<<(std::ostream& os, ElementAccess const& access) {
   os << access.base_is_tagged << ", " << access.header_size << ", ";
   access.type->PrintTo(os);
-  os << ", " << access.machine_type << ", " << access.bounds_check;
+  os << ", " << access.machine_type;
   return os;
 }
 
@@ -150,6 +196,26 @@ struct SimplifiedOperatorGlobalCache FINAL {
   Name##Operator k##Name;
   PURE_OP_LIST(PURE)
 #undef PURE
+
+#define BUFFER_ACCESS(Type, type, TYPE, ctype, size)                          \
+  struct LoadBuffer##Type##Operator FINAL : public Operator1<BufferAccess> {  \
+    LoadBuffer##Type##Operator()                                              \
+        : Operator1<BufferAccess>(IrOpcode::kLoadBuffer,                      \
+                                  Operator::kNoThrow | Operator::kNoWrite,    \
+                                  "LoadBuffer", 3, 1, 1, 1, 1, 0,             \
+                                  BufferAccess(kExternal##Type##Array)) {}    \
+  };                                                                          \
+  struct StoreBuffer##Type##Operator FINAL : public Operator1<BufferAccess> { \
+    StoreBuffer##Type##Operator()                                             \
+        : Operator1<BufferAccess>(IrOpcode::kStoreBuffer,                     \
+                                  Operator::kNoRead | Operator::kNoThrow,     \
+                                  "StoreBuffer", 4, 1, 1, 0, 1, 0,            \
+                                  BufferAccess(kExternal##Type##Array)) {}    \
+  };                                                                          \
+  LoadBuffer##Type##Operator kLoadBuffer##Type;                               \
+  StoreBuffer##Type##Operator kStoreBuffer##Type;
+  TYPED_ARRAYS(BUFFER_ACCESS)
+#undef BUFFER_ACCESS
 };
 
 
@@ -175,11 +241,37 @@ const Operator* SimplifiedOperatorBuilder::ReferenceEqual(Type* type) {
 }
 
 
+const Operator* SimplifiedOperatorBuilder::LoadBuffer(BufferAccess access) {
+  switch (access.external_array_type()) {
+#define LOAD_BUFFER(Type, type, TYPE, ctype, size) \
+  case kExternal##Type##Array:                     \
+    return &cache_.kLoadBuffer##Type;
+    TYPED_ARRAYS(LOAD_BUFFER)
+#undef LOAD_BUFFER
+  }
+  UNREACHABLE();
+  return nullptr;
+}
+
+
+const Operator* SimplifiedOperatorBuilder::StoreBuffer(BufferAccess access) {
+  switch (access.external_array_type()) {
+#define STORE_BUFFER(Type, type, TYPE, ctype, size) \
+  case kExternal##Type##Array:                      \
+    return &cache_.kStoreBuffer##Type;
+    TYPED_ARRAYS(STORE_BUFFER)
+#undef STORE_BUFFER
+  }
+  UNREACHABLE();
+  return nullptr;
+}
+
+
 #define ACCESS_OP_LIST(V)                                    \
   V(LoadField, FieldAccess, Operator::kNoWrite, 1, 1, 1)     \
   V(StoreField, FieldAccess, Operator::kNoRead, 2, 1, 0)     \
-  V(LoadElement, ElementAccess, Operator::kNoWrite, 3, 0, 1) \
-  V(StoreElement, ElementAccess, Operator::kNoRead, 4, 1, 0)
+  V(LoadElement, ElementAccess, Operator::kNoWrite, 2, 1, 1) \
+  V(StoreElement, ElementAccess, Operator::kNoRead, 3, 1, 0)
 
 
 #define ACCESS(Name, Type, properties, value_input_count, control_input_count, \
index c918042..72608ee 100644 (file)
@@ -33,6 +33,29 @@ enum BaseTaggedness { kUntaggedBase, kTaggedBase };
 std::ostream& operator<<(std::ostream&, BaseTaggedness);
 
 
+// An access descriptor for loads/stores of array buffers.
+class BufferAccess FINAL {
+ public:
+  explicit BufferAccess(ExternalArrayType external_array_type)
+      : external_array_type_(external_array_type) {}
+
+  ExternalArrayType external_array_type() const { return external_array_type_; }
+  MachineType machine_type() const;
+
+ private:
+  ExternalArrayType const external_array_type_;
+};
+
+bool operator==(BufferAccess, BufferAccess);
+bool operator!=(BufferAccess, BufferAccess);
+
+size_t hash_value(BufferAccess);
+
+std::ostream& operator<<(std::ostream&, BufferAccess);
+
+BufferAccess const BufferAccessOf(const Operator* op) WARN_UNUSED_RESULT;
+
+
 // An access descriptor for loads/stores of fixed structures like field
 // accesses of heap objects. Accesses from either tagged or untagged base
 // pointers are supported; untagging is done automatically during lowering.
@@ -53,11 +76,7 @@ size_t hash_value(FieldAccess const&);
 
 std::ostream& operator<<(std::ostream&, FieldAccess const&);
 
-
-// The bound checking mode for ElementAccess below.
-enum BoundsCheckMode { kNoBoundsCheck, kTypedArrayBoundsCheck };
-
-std::ostream& operator<<(std::ostream&, BoundsCheckMode);
+FieldAccess const& FieldAccessOf(const Operator* op) WARN_UNUSED_RESULT;
 
 
 // An access descriptor for loads/stores of indexed structures like characters
@@ -65,7 +84,6 @@ std::ostream& operator<<(std::ostream&, BoundsCheckMode);
 // untagged base pointers are supported; untagging is done automatically during
 // lowering.
 struct ElementAccess {
-  BoundsCheckMode bounds_check;   // specifies the bounds checking mode.
   BaseTaggedness base_is_tagged;  // specifies if the base pointer is tagged.
   int header_size;                // size of the header, without tag.
   Type* type;                     // type of the element.
@@ -81,13 +99,7 @@ size_t hash_value(ElementAccess const&);
 
 std::ostream& operator<<(std::ostream&, ElementAccess const&);
 
-
-// If the accessed object is not a heap object, add this to the header_size.
-static const int kNonHeapObjectHeaderSize = kHeapObjectTag;
-
-
-const FieldAccess& FieldAccessOf(const Operator* op) WARN_UNUSED_RESULT;
-const ElementAccess& ElementAccessOf(const Operator* op) WARN_UNUSED_RESULT;
+ElementAccess const& ElementAccessOf(const Operator* op) WARN_UNUSED_RESULT;
 
 
 // Interface for building simplified operators, which represent the
@@ -149,8 +161,14 @@ class SimplifiedOperatorBuilder FINAL {
   const Operator* ObjectIsSmi();
   const Operator* ObjectIsNonNegativeSmi();
 
-  const Operator* LoadField(const FieldAccess&);
-  const Operator* StoreField(const FieldAccess&);
+  const Operator* LoadField(FieldAccess const&);
+  const Operator* StoreField(FieldAccess const&);
+
+  // load-buffer buffer, offset, length
+  const Operator* LoadBuffer(BufferAccess);
+
+  // store-buffer buffer, offset, length, value
+  const Operator* StoreBuffer(BufferAccess);
 
   // load-element [base + index], length
   const Operator* LoadElement(ElementAccess const&);
index 9503010..9e21ae4 100644 (file)
@@ -10,12 +10,12 @@ namespace v8 {
 namespace internal {
 namespace compiler {
 
-class SourcePositionTable::Decorator : public GraphDecorator {
+class SourcePositionTable::Decorator FINAL : public GraphDecorator {
  public:
   explicit Decorator(SourcePositionTable* source_positions)
       : source_positions_(source_positions) {}
 
-  virtual void Decorate(Node* node) {
+  void Decorate(Node* node) FINAL {
     DCHECK(!source_positions_->current_position_.IsInvalid());
     source_positions_->table_.Set(node, source_positions_->current_position_);
   }
index 0fa2b95..a170a71 100644 (file)
@@ -4,6 +4,7 @@
 
 #include "src/bootstrapper.h"
 #include "src/compiler/graph-inl.h"
+#include "src/compiler/graph-reducer.h"
 #include "src/compiler/js-operator.h"
 #include "src/compiler/node.h"
 #include "src/compiler/node-properties-inl.h"
@@ -15,10 +16,132 @@ namespace v8 {
 namespace internal {
 namespace compiler {
 
-class Typer::Decorator : public GraphDecorator {
+#define NATIVE_TYPES(V) \
+  V(Int8)               \
+  V(Uint8)              \
+  V(Int16)              \
+  V(Uint16)             \
+  V(Int32)              \
+  V(Uint32)             \
+  V(Float32)            \
+  V(Float64)
+
+enum LazyCachedType {
+  kNumberFunc0,
+  kNumberFunc1,
+  kNumberFunc2,
+  kImulFunc,
+  kClz32Func,
+  kArrayBufferFunc,
+#define NATIVE_TYPE_CASE(Type) k##Type, k##Type##Array, k##Type##ArrayFunc,
+  NATIVE_TYPES(NATIVE_TYPE_CASE)
+#undef NATIVE_TYPE_CASE
+  kNumLazyCachedTypes
+};
+
+
+// Constructs and caches types lazily.
+// TODO(turbofan): these types could be globally cached or cached per isolate.
+class LazyTypeCache FINAL : public ZoneObject {
+ public:
+  explicit LazyTypeCache(Zone* zone) : zone_(zone) {
+    memset(cache_, 0, sizeof(cache_));
+  }
+
+  inline Type* Get(LazyCachedType type) {
+    int index = static_cast<int>(type);
+    DCHECK(index < kNumLazyCachedTypes);
+    if (cache_[index] == NULL) cache_[index] = Create(type);
+    return cache_[index];
+  }
+
+ private:
+  Type* Create(LazyCachedType type) {
+    switch (type) {
+      case kInt8:
+        return CreateNative(CreateRange<int8_t>(), Type::UntaggedSigned8());
+      case kUint8:
+        return CreateNative(CreateRange<uint8_t>(), Type::UntaggedUnsigned8());
+      case kInt16:
+        return CreateNative(CreateRange<int16_t>(), Type::UntaggedSigned16());
+      case kUint16:
+        return CreateNative(CreateRange<uint16_t>(),
+                            Type::UntaggedUnsigned16());
+      case kInt32:
+        return CreateNative(Type::Signed32(), Type::UntaggedSigned32());
+      case kUint32:
+        return CreateNative(Type::Unsigned32(), Type::UntaggedUnsigned32());
+      case kFloat32:
+        return CreateNative(Type::Number(), Type::UntaggedFloat32());
+      case kFloat64:
+        return CreateNative(Type::Number(), Type::UntaggedFloat64());
+      case kNumberFunc0:
+        return Type::Function(Type::Number(), zone());
+      case kNumberFunc1:
+        return Type::Function(Type::Number(), Type::Number(), zone());
+      case kNumberFunc2:
+        return Type::Function(Type::Number(), Type::Number(), Type::Number(),
+                              zone());
+      case kImulFunc:
+        return Type::Function(Type::Signed32(), Type::Integral32(),
+                              Type::Integral32(), zone());
+      case kClz32Func:
+        return Type::Function(CreateRange(0, 32), Type::Number(), zone());
+      case kArrayBufferFunc:
+        return Type::Function(Type::Object(zone()), Type::Unsigned32(), zone());
+#define NATIVE_TYPE_CASE(Type)        \
+  case k##Type##Array:                \
+    return CreateArray(Get(k##Type)); \
+  case k##Type##ArrayFunc:            \
+    return CreateArrayFunction(Get(k##Type##Array));
+        NATIVE_TYPES(NATIVE_TYPE_CASE)
+#undef NATIVE_TYPE_CASE
+      case kNumLazyCachedTypes:
+        break;
+    }
+    UNREACHABLE();
+    return NULL;
+  }
+
+  Type* CreateArray(Type* element) const {
+    return Type::Array(element, zone());
+  }
+
+  Type* CreateArrayFunction(Type* array) const {
+    Type* arg1 = Type::Union(Type::Unsigned32(), Type::Object(), zone());
+    Type* arg2 = Type::Union(Type::Unsigned32(), Type::Undefined(), zone());
+    Type* arg3 = arg2;
+    return Type::Function(array, arg1, arg2, arg3, zone());
+  }
+
+  Type* CreateNative(Type* semantic, Type* representation) const {
+    return Type::Intersect(semantic, representation, zone());
+  }
+
+  template <typename T>
+  Type* CreateRange() const {
+    return CreateRange(std::numeric_limits<T>::min(),
+                       std::numeric_limits<T>::max());
+  }
+
+  Type* CreateRange(double min, double max) const {
+    return Type::Range(factory()->NewNumber(min), factory()->NewNumber(max),
+                       zone());
+  }
+
+  Factory* factory() const { return isolate()->factory(); }
+  Isolate* isolate() const { return zone()->isolate(); }
+  Zone* zone() const { return zone_; }
+
+  Type* cache_[kNumLazyCachedTypes];
+  Zone* zone_;
+};
+
+
+class Typer::Decorator FINAL : public GraphDecorator {
  public:
   explicit Decorator(Typer* typer) : typer_(typer) {}
-  virtual void Decorate(Node* node);
+  void Decorate(Node* node) FINAL;
 
  private:
   Typer* typer_;
@@ -29,6 +152,7 @@ Typer::Typer(Graph* graph, MaybeHandle<Context> context)
     : graph_(graph),
       context_(context),
       decorator_(NULL),
+      cache_(new (graph->zone()) LazyTypeCache(graph->zone())),
       weaken_min_limits_(graph->zone()),
       weaken_max_limits_(graph->zone()) {
   Zone* zone = this->zone();
@@ -39,83 +163,42 @@ Typer::Typer(Graph* graph, MaybeHandle<Context> context)
   Handle<Object> infinity = f->NewNumber(+V8_INFINITY);
   Handle<Object> minusinfinity = f->NewNumber(-V8_INFINITY);
 
-  negative_signed32 = Type::Union(
-      Type::SignedSmall(), Type::OtherSigned32(), zone);
-  non_negative_signed32 = Type::Union(
-      Type::UnsignedSmall(), Type::OtherUnsigned31(), zone);
+  Type* number = Type::Number();
+  Type* signed32 = Type::Signed32();
+  Type* unsigned32 = Type::Unsigned32();
+  Type* nan_or_minuszero = Type::Union(Type::NaN(), Type::MinusZero(), zone);
+  Type* truncating_to_zero =
+      Type::Union(Type::Union(Type::Constant(infinity, zone),
+                              Type::Constant(minusinfinity, zone), zone),
+                  nan_or_minuszero, zone);
+
+  boolean_or_number = Type::Union(Type::Boolean(), Type::Number(), zone);
   undefined_or_null = Type::Union(Type::Undefined(), Type::Null(), zone);
+  undefined_or_number = Type::Union(Type::Undefined(), Type::Number(), zone);
   singleton_false = Type::Constant(f->false_value(), zone);
   singleton_true = Type::Constant(f->true_value(), zone);
   singleton_zero = Type::Range(zero, zero, zone);
   singleton_one = Type::Range(one, one, zone);
   zero_or_one = Type::Union(singleton_zero, singleton_one, zone);
-  zeroish = Type::Union(
-      singleton_zero, Type::Union(Type::NaN(), Type::MinusZero(), zone), zone);
+  zeroish = Type::Union(singleton_zero, nan_or_minuszero, zone);
+  signed32ish = Type::Union(signed32, truncating_to_zero, zone);
+  unsigned32ish = Type::Union(unsigned32, truncating_to_zero, zone);
   falsish = Type::Union(Type::Undetectable(),
-      Type::Union(zeroish, undefined_or_null, zone), zone);
+                        Type::Union(Type::Union(singleton_false, zeroish, zone),
+                                    undefined_or_null, zone),
+                        zone);
+  truish = Type::Union(
+      singleton_true,
+      Type::Union(Type::DetectableReceiver(), Type::Symbol(), zone), zone);
   integer = Type::Range(minusinfinity, infinity, zone);
-  weakint = Type::Union(
-      integer, Type::Union(Type::NaN(), Type::MinusZero(), zone), zone);
-
-  Type* number = Type::Number();
-  Type* signed32 = Type::Signed32();
-  Type* unsigned32 = Type::Unsigned32();
-  Type* integral32 = Type::Integral32();
-  Type* object = Type::Object();
-  Type* undefined = Type::Undefined();
+  weakint = Type::Union(integer, nan_or_minuszero, zone);
 
   number_fun0_ = Type::Function(number, zone);
   number_fun1_ = Type::Function(number, number, zone);
   number_fun2_ = Type::Function(number, number, number, zone);
+
   weakint_fun1_ = Type::Function(weakint, number, zone);
-  imul_fun_ = Type::Function(signed32, integral32, integral32, zone);
-  clz32_fun_ = Type::Function(
-      Type::Range(zero, f->NewNumber(32), zone), number, zone);
-  random_fun_ = Type::Function(Type::Union(
-      Type::UnsignedSmall(), Type::OtherNumber(), zone), zone);
-
-  Type* int8 = Type::Intersect(
-      Type::Range(f->NewNumber(-0x7F), f->NewNumber(0x7F-1), zone),
-      Type::UntaggedInt8(), zone);
-  Type* int16 = Type::Intersect(
-      Type::Range(f->NewNumber(-0x7FFF), f->NewNumber(0x7FFF-1), zone),
-      Type::UntaggedInt16(), zone);
-  Type* uint8 = Type::Intersect(
-      Type::Range(zero, f->NewNumber(0xFF-1), zone),
-      Type::UntaggedInt8(), zone);
-  Type* uint16 = Type::Intersect(
-      Type::Range(zero, f->NewNumber(0xFFFF-1), zone),
-      Type::UntaggedInt16(), zone);
-
-#define NATIVE_TYPE(sem, rep) \
-    Type::Intersect(Type::sem(), Type::rep(), zone)
-  Type* int32 = NATIVE_TYPE(Signed32, UntaggedInt32);
-  Type* uint32 = NATIVE_TYPE(Unsigned32, UntaggedInt32);
-  Type* float32 = NATIVE_TYPE(Number, UntaggedFloat32);
-  Type* float64 = NATIVE_TYPE(Number, UntaggedFloat64);
-#undef NATIVE_TYPE
-
-  Type* buffer = Type::Buffer(zone);
-  Type* int8_array = Type::Array(int8, zone);
-  Type* int16_array = Type::Array(int16, zone);
-  Type* int32_array = Type::Array(int32, zone);
-  Type* uint8_array = Type::Array(uint8, zone);
-  Type* uint16_array = Type::Array(uint16, zone);
-  Type* uint32_array = Type::Array(uint32, zone);
-  Type* float32_array = Type::Array(float32, zone);
-  Type* float64_array = Type::Array(float64, zone);
-  Type* arg1 = Type::Union(unsigned32, object, zone);
-  Type* arg2 = Type::Union(unsigned32, undefined, zone);
-  Type* arg3 = arg2;
-  array_buffer_fun_ = Type::Function(buffer, unsigned32, zone);
-  int8_array_fun_ = Type::Function(int8_array, arg1, arg2, arg3, zone);
-  int16_array_fun_ = Type::Function(int16_array, arg1, arg2, arg3, zone);
-  int32_array_fun_ = Type::Function(int32_array, arg1, arg2, arg3, zone);
-  uint8_array_fun_ = Type::Function(uint8_array, arg1, arg2, arg3, zone);
-  uint16_array_fun_ = Type::Function(uint16_array, arg1, arg2, arg3, zone);
-  uint32_array_fun_ = Type::Function(uint32_array, arg1, arg2, arg3, zone);
-  float32_array_fun_ = Type::Function(float32_array, arg1, arg2, arg3, zone);
-  float64_array_fun_ = Type::Function(float64_array, arg1, arg2, arg3, zone);
+  random_fun_ = Type::Function(Type::OrderedNumber(), zone);
 
   const int limits_count = 20;
 
@@ -141,10 +224,42 @@ Typer::~Typer() {
 }
 
 
-class Typer::Visitor : public NullNodeVisitor {
+class Typer::Visitor : public Reducer {
  public:
   explicit Visitor(Typer* typer) : typer_(typer) {}
 
+  Reduction Reduce(Node* node) OVERRIDE {
+    if (node->op()->ValueOutputCount() == 0) return NoChange();
+    switch (node->opcode()) {
+#define DECLARE_CASE(x) \
+  case IrOpcode::k##x:  \
+    return UpdateBounds(node, TypeBinaryOp(node, x##Typer));
+      JS_SIMPLE_BINOP_LIST(DECLARE_CASE)
+#undef DECLARE_CASE
+
+#define DECLARE_CASE(x) \
+  case IrOpcode::k##x:  \
+    return UpdateBounds(node, Type##x(node));
+      DECLARE_CASE(Start)
+      // VALUE_OP_LIST without JS_SIMPLE_BINOP_LIST:
+      COMMON_OP_LIST(DECLARE_CASE)
+      SIMPLIFIED_OP_LIST(DECLARE_CASE)
+      MACHINE_OP_LIST(DECLARE_CASE)
+      JS_SIMPLE_UNOP_LIST(DECLARE_CASE)
+      JS_OBJECT_OP_LIST(DECLARE_CASE)
+      JS_CONTEXT_OP_LIST(DECLARE_CASE)
+      JS_OTHER_OP_LIST(DECLARE_CASE)
+#undef DECLARE_CASE
+
+#define DECLARE_CASE(x) case IrOpcode::k##x:
+      DECLARE_CASE(End)
+      INNER_CONTROL_OP_LIST(DECLARE_CASE)
+#undef DECLARE_CASE
+      break;
+    }
+    return NoChange();
+  }
+
   Bounds TypeNode(Node* node) {
     switch (node->opcode()) {
 #define DECLARE_CASE(x) \
@@ -176,7 +291,10 @@ class Typer::Visitor : public NullNodeVisitor {
 
   Type* TypeConstant(Handle<Object> value);
 
- protected:
+ private:
+  Typer* typer_;
+  MaybeHandle<Context> context_;
+
 #define DECLARE_METHOD(x) inline Bounds Type##x(Node* node);
   DECLARE_METHOD(Start)
   VALUE_OP_LIST(DECLARE_METHOD)
@@ -207,10 +325,6 @@ class Typer::Visitor : public NullNodeVisitor {
   Graph* graph() { return typer_->graph(); }
   MaybeHandle<Context> context() { return typer_->context(); }
 
- private:
-  Typer* typer_;
-  MaybeHandle<Context> context_;
-
   typedef Type* (*UnaryTyperFun)(Type*, Typer* t);
   typedef Type* (*BinaryTyperFun)(Type*, Type*, Typer* t);
 
@@ -243,97 +357,60 @@ class Typer::Visitor : public NullNodeVisitor {
   static Type* JSUnaryNotTyper(Type*, Typer*);
   static Type* JSLoadPropertyTyper(Type*, Type*, Typer*);
   static Type* JSCallFunctionTyper(Type*, Typer*);
-};
-
 
-class Typer::RunVisitor : public Typer::Visitor {
- public:
-  explicit RunVisitor(Typer* typer)
-      : Visitor(typer),
-        redo(NodeSet::key_compare(), NodeSet::allocator_type(typer->zone())) {}
-
-  void Post(Node* node) {
-    if (node->op()->ValueOutputCount() > 0) {
-      Bounds bounds = TypeNode(node);
-      NodeProperties::SetBounds(node, bounds);
-      // Remember incompletely typed nodes for least fixpoint iteration.
-      if (!NodeProperties::AllValueInputsAreTyped(node)) redo.insert(node);
+  Reduction UpdateBounds(Node* node, Bounds current) {
+    if (NodeProperties::IsTyped(node)) {
+      // Widen the bounds of a previously typed node.
+      Bounds previous = NodeProperties::GetBounds(node);
+      // Speed up termination in the presence of range types:
+      current.upper = Weaken(current.upper, previous.upper);
+      current.lower = Weaken(current.lower, previous.lower);
+
+      // Types should not get less precise.
+      DCHECK(previous.lower->Is(current.lower));
+      DCHECK(previous.upper->Is(current.upper));
+
+      NodeProperties::SetBounds(node, current);
+      if (!(previous.Narrows(current) && current.Narrows(previous))) {
+        // If something changed, revisit all uses.
+        return Changed(node);
+      }
+      return NoChange();
+    } else {
+      // No previous type, simply update the bounds.
+      NodeProperties::SetBounds(node, current);
+      return Changed(node);
     }
   }
-
-  NodeSet redo;
 };
 
 
-class Typer::WidenVisitor : public Typer::Visitor {
- public:
-  explicit WidenVisitor(Typer* typer)
-      : Visitor(typer),
-        local_zone_(zone()->isolate()),
-        enabled_(graph()->NodeCount(), true, &local_zone_),
-        queue_(&local_zone_) {}
-
-  void Run(NodeSet* nodes) {
-    // Queue all the roots.
-    for (Node* node : *nodes) {
-      Queue(node);
-    }
-
-    while (!queue_.empty()) {
-      Node* node = queue_.front();
-      queue_.pop();
-
-      if (node->op()->ValueOutputCount() > 0) {
-        // Enable future queuing (and thus re-typing) of this node.
-        enabled_[node->id()] = true;
-
-        // Compute the new type.
-        Bounds previous = BoundsOrNone(node);
-        Bounds current = TypeNode(node);
-
-        // Speed up termination in the presence of range types:
-        current.upper = Weaken(current.upper, previous.upper);
-        current.lower = Weaken(current.lower, previous.lower);
-
-        // Types should not get less precise.
-        DCHECK(previous.lower->Is(current.lower));
-        DCHECK(previous.upper->Is(current.upper));
-
-        NodeProperties::SetBounds(node, current);
-        // If something changed, push all uses into the queue.
-        if (!(previous.Narrows(current) && current.Narrows(previous))) {
-          for (Node* use : node->uses()) {
-            Queue(use);
-          }
+void Typer::Run() {
+  {
+    // TODO(titzer): this is a hack. Reset types for interior nodes first.
+    NodeDeque deque(zone());
+    NodeMarker<bool> marked(graph(), 2);
+    deque.push_front(graph()->end());
+    marked.Set(graph()->end(), true);
+    while (!deque.empty()) {
+      Node* node = deque.front();
+      deque.pop_front();
+      // TODO(titzer): there shouldn't be a need to retype constants.
+      if (node->op()->ValueOutputCount() > 0)
+        NodeProperties::RemoveBounds(node);
+      for (Node* input : node->inputs()) {
+        if (!marked.Get(input)) {
+          marked.Set(input, true);
+          deque.push_back(input);
         }
       }
-      // If there is no value output, we deliberately leave the node disabled
-      // for queuing - there is no need to type it.
     }
   }
 
-  void Queue(Node* node) {
-    // If the node is enabled for queuing, push it to the queue and disable it
-    // (to avoid queuing it multiple times).
-    if (enabled_[node->id()]) {
-      queue_.push(node);
-      enabled_[node->id()] = false;
-    }
-  }
-
- private:
-  Zone local_zone_;
-  BoolVector enabled_;
-  ZoneQueue<Node*> queue_;
-};
-
-
-void Typer::Run() {
-  RunVisitor typing(this);
-  graph_->VisitNodeInputsFromEnd(&typing);
-  // Find least fixpoint.
-  WidenVisitor widen(this);
-  widen.Run(&typing.redo);
+  Visitor visitor(this);
+  GraphReducer graph_reducer(graph(), zone());
+  graph_reducer.AddReducer(&visitor);
+  graph_reducer.ReduceGraph();
 }
 
 
@@ -439,8 +516,8 @@ Type* Typer::Visitor::ToPrimitive(Type* type, Typer* t) {
 Type* Typer::Visitor::ToBoolean(Type* type, Typer* t) {
   if (type->Is(Type::Boolean())) return type;
   if (type->Is(t->falsish)) return t->singleton_false;
-  if (type->Is(Type::DetectableReceiver())) return t->singleton_true;
-  if (type->Is(Type::OrderedNumber()) && (type->Max() < 0 || 0 < type->Min())) {
+  if (type->Is(t->truish)) return t->singleton_true;
+  if (type->Is(Type::PlainNumber()) && (type->Max() < 0 || 0 < type->Min())) {
     return t->singleton_true;  // Ruled out nan, -0 and +0.
   }
   return Type::Boolean();
@@ -449,10 +526,22 @@ Type* Typer::Visitor::ToBoolean(Type* type, Typer* t) {
 
 Type* Typer::Visitor::ToNumber(Type* type, Typer* t) {
   if (type->Is(Type::Number())) return type;
+  if (type->Is(Type::Null())) return t->singleton_zero;
   if (type->Is(Type::Undefined())) return Type::NaN();
+  if (type->Is(t->undefined_or_null)) {
+    return Type::Union(Type::NaN(), t->singleton_zero, t->zone());
+  }
+  if (type->Is(t->undefined_or_number)) {
+    return Type::Union(Type::Intersect(type, Type::Number(), t->zone()),
+                       Type::NaN(), t->zone());
+  }
   if (type->Is(t->singleton_false)) return t->singleton_zero;
   if (type->Is(t->singleton_true)) return t->singleton_one;
   if (type->Is(Type::Boolean())) return t->zero_or_one;
+  if (type->Is(t->boolean_or_number)) {
+    return Type::Union(Type::Intersect(type, Type::Number(), t->zone()),
+                       t->zero_or_one, t->zone());
+  }
   return Type::Number();
 }
 
@@ -467,6 +556,10 @@ Type* Typer::Visitor::NumberToInt32(Type* type, Typer* t) {
   // TODO(neis): DCHECK(type->Is(Type::Number()));
   if (type->Is(Type::Signed32())) return type;
   if (type->Is(t->zeroish)) return t->singleton_zero;
+  if (type->Is(t->signed32ish)) {
+    return Type::Intersect(Type::Union(type, t->singleton_zero, t->zone()),
+                           Type::Signed32(), t->zone());
+  }
   return Type::Signed32();
 }
 
@@ -475,6 +568,10 @@ Type* Typer::Visitor::NumberToUint32(Type* type, Typer* t) {
   // TODO(neis): DCHECK(type->Is(Type::Number()));
   if (type->Is(Type::Unsigned32())) return type;
   if (type->Is(t->zeroish)) return t->singleton_zero;
+  if (type->Is(t->unsigned32ish)) {
+    return Type::Intersect(Type::Union(type, t->singleton_zero, t->zone()),
+                           Type::Unsigned32(), t->zone());
+  }
   return Type::Unsigned32();
 }
 
@@ -502,11 +599,12 @@ Bounds Typer::Visitor::TypeInt32Constant(Node* node) {
   Factory* f = isolate()->factory();
   Handle<Object> number = f->NewNumber(OpParameter<int32_t>(node));
   return Bounds(Type::Intersect(
-      Type::Range(number, number, zone()), Type::UntaggedInt32(), zone()));
+      Type::Range(number, number, zone()), Type::UntaggedSigned32(), zone()));
 }
 
 
 Bounds Typer::Visitor::TypeInt64Constant(Node* node) {
+  // TODO(rossberg): This actually seems to be a PointerConstant so far...
   return Bounds(Type::Internal());  // TODO(rossberg): Add int64 bitset type?
 }
 
@@ -533,7 +631,7 @@ Bounds Typer::Visitor::TypeNumberConstant(Node* node) {
 
 
 Bounds Typer::Visitor::TypeHeapConstant(Node* node) {
-  return Bounds(TypeConstant(OpParameter<Unique<Object> >(node).handle()));
+  return Bounds(TypeConstant(OpParameter<Unique<HeapObject> >(node).handle()));
 }
 
 
@@ -717,16 +815,26 @@ Type* Typer::Visitor::JSBitwiseOrTyper(Type* lhs, Type* rhs, Typer* t) {
   double rmax = rhs->Max();
   // Or-ing any two values results in a value no smaller than their minimum.
   // Even no smaller than their maximum if both values are non-negative.
-  Handle<Object> min = f->NewNumber(
-      lmin >= 0 && rmin >= 0 ? std::max(lmin, rmin) : std::min(lmin, rmin));
+  double min =
+      lmin >= 0 && rmin >= 0 ? std::max(lmin, rmin) : std::min(lmin, rmin);
+  double max = Type::Signed32()->Max();
+
+  // Or-ing with 0 is essentially a conversion to int32.
+  if (rmin == 0 && rmax == 0) {
+    min = lmin;
+    max = lmax;
+  }
+  if (lmin == 0 && lmax == 0) {
+    min = rmin;
+    max = rmax;
+  }
+
   if (lmax < 0 || rmax < 0) {
     // Or-ing two values of which at least one is negative results in a negative
     // value.
-    Handle<Object> max = f->NewNumber(-1);
-    return Type::Range(min, max, t->zone());
+    max = std::min(max, -1.0);
   }
-  Handle<Object> max = f->NewNumber(Type::Signed32()->Max());
-  return Type::Range(min, max, t->zone());
+  return Type::Range(f->NewNumber(min), f->NewNumber(max), t->zone());
   // TODO(neis): Be precise for singleton inputs, here and elsewhere.
 }
 
@@ -739,18 +847,22 @@ Type* Typer::Visitor::JSBitwiseAndTyper(Type* lhs, Type* rhs, Typer* t) {
   double rmin = rhs->Min();
   double lmax = lhs->Max();
   double rmax = rhs->Max();
+  double min = Type::Signed32()->Min();
   // And-ing any two values results in a value no larger than their maximum.
   // Even no larger than their minimum if both values are non-negative.
-  Handle<Object> max = f->NewNumber(
-      lmin >= 0 && rmin >= 0 ? std::min(lmax, rmax) : std::max(lmax, rmax));
-  if (lmin >= 0 || rmin >= 0) {
-    // And-ing two values of which at least one is non-negative results in a
-    // non-negative value.
-    Handle<Object> min = f->NewNumber(0);
-    return Type::Range(min, max, t->zone());
-  }
-  Handle<Object> min = f->NewNumber(Type::Signed32()->Min());
-  return Type::Range(min, max, t->zone());
+  double max =
+      lmin >= 0 && rmin >= 0 ? std::min(lmax, rmax) : std::max(lmax, rmax);
+  // And-ing with a non-negative value x causes the result to be between
+  // zero and x.
+  if (lmin >= 0) {
+    min = 0;
+    max = std::min(max, lmax);
+  }
+  if (rmin >= 0) {
+    min = 0;
+    max = std::min(max, rmax);
+  }
+  return Type::Range(f->NewNumber(min), f->NewNumber(max), t->zone());
 }
 
 
@@ -763,11 +875,12 @@ Type* Typer::Visitor::JSBitwiseXorTyper(Type* lhs, Type* rhs, Typer* t) {
   double rmax = rhs->Max();
   if ((lmin >= 0 && rmin >= 0) || (lmax < 0 && rmax < 0)) {
     // Xor-ing negative or non-negative values results in a non-negative value.
-    return t->non_negative_signed32;
+    return Type::NonNegativeSigned32();
   }
   if ((lmax < 0 && rmin >= 0) || (lmin >= 0 && rmax < 0)) {
     // Xor-ing a negative and a non-negative value results in a negative value.
-    return t->negative_signed32;
+    // TODO(jarin) Use a range here.
+    return Type::NegativeSigned32();
   }
   return Type::Signed32();
 }
@@ -780,18 +893,31 @@ Type* Typer::Visitor::JSShiftLeftTyper(Type* lhs, Type* rhs, Typer* t) {
 
 Type* Typer::Visitor::JSShiftRightTyper(Type* lhs, Type* rhs, Typer* t) {
   lhs = NumberToInt32(ToNumber(lhs, t), t);
-  Factory* f = t->isolate()->factory();
+  rhs = NumberToUint32(ToNumber(rhs, t), t);
+  double min = kMinInt;
+  double max = kMaxInt;
   if (lhs->Min() >= 0) {
     // Right-shifting a non-negative value cannot make it negative, nor larger.
-    Handle<Object> min = f->NewNumber(0);
-    Handle<Object> max = f->NewNumber(lhs->Max());
-    return Type::Range(min, max, t->zone());
+    min = std::max(min, 0.0);
+    max = std::min(max, lhs->Max());
   }
   if (lhs->Max() < 0) {
     // Right-shifting a negative value cannot make it non-negative, nor smaller.
-    Handle<Object> min = f->NewNumber(lhs->Min());
-    Handle<Object> max = f->NewNumber(-1);
-    return Type::Range(min, max, t->zone());
+    min = std::max(min, lhs->Min());
+    max = std::min(max, -1.0);
+  }
+  if (rhs->Min() > 0 && rhs->Max() <= 31) {
+    // Right-shifting by a positive value yields a small integer value.
+    double shift_min = kMinInt >> static_cast<int>(rhs->Min());
+    double shift_max = kMaxInt >> static_cast<int>(rhs->Min());
+    min = std::max(min, shift_min);
+    max = std::min(max, shift_max);
+  }
+  // TODO(jarin) Ideally, the following micro-optimization should be performed
+  // by the type constructor.
+  if (max != Type::Signed32()->Max() || min != Type::Signed32()->Min()) {
+    Factory* f = t->isolate()->factory();
+    return Type::Range(f->NewNumber(min), f->NewNumber(max), t->zone());
   }
   return Type::Signed32();
 }
@@ -1033,8 +1159,7 @@ Type* Typer::Visitor::JSModulusTyper(Type* lhs, Type* rhs, Typer* t) {
   lhs = Rangify(lhs, t);
   rhs = Rangify(rhs, t);
   if (lhs->IsRange() && rhs->IsRange()) {
-    // TODO(titzer): fix me.
-    //    return JSModulusRanger(lhs->AsRange(), rhs->AsRange(), t);
+    return JSModulusRanger(lhs->AsRange(), rhs->AsRange(), t);
   }
   return Type::OrderedNumber();
 }
@@ -1062,12 +1187,12 @@ Bounds Typer::Visitor::TypeJSTypeOf(Node* node) {
 
 
 Bounds Typer::Visitor::TypeJSToBoolean(Node* node) {
-  return Bounds(Type::None(zone()), Type::Boolean(zone()));
+  return TypeUnaryOp(node, ToBoolean);
 }
 
 
 Bounds Typer::Visitor::TypeJSToNumber(Node* node) {
-  return Bounds(Type::None(zone()), Type::Number(zone()));
+  return TypeUnaryOp(node, ToNumber);
 }
 
 
@@ -1120,10 +1245,9 @@ Bounds Typer::Visitor::TypeJSLoadNamed(Node* node) {
 // in the graph. In the current implementation, we are
 // increasing the limits to the closest power of two.
 Type* Typer::Visitor::Weaken(Type* current_type, Type* previous_type) {
-  if (current_type->IsRange() && previous_type->IsRange()) {
-    Type::RangeType* previous = previous_type->AsRange();
-    Type::RangeType* current = current_type->AsRange();
-
+  Type::RangeType* previous = previous_type->GetRange();
+  Type::RangeType* current = current_type->GetRange();
+  if (previous != NULL && current != NULL) {
     double current_min = current->Min()->Number();
     Handle<Object> new_min = current->Min();
 
@@ -1153,7 +1277,9 @@ Type* Typer::Visitor::Weaken(Type* current_type, Type* previous_type) {
       }
     }
 
-    return Type::Range(new_min, new_max, typer_->zone());
+    return Type::Union(current_type,
+                       Type::Range(new_min, new_max, typer_->zone()),
+                       typer_->zone());
   }
   return current_type;
 }
@@ -1191,12 +1317,17 @@ Bounds Typer::Visitor::TypeJSInstanceOf(Node* node) {
 
 Bounds Typer::Visitor::TypeJSLoadContext(Node* node) {
   Bounds outer = Operand(node, 0);
-  DCHECK(outer.upper->Maybe(Type::Internal()));
+  Type* context_type = outer.upper;
+  if (context_type->Is(Type::None())) {
+    // Upper bound of context is not yet known.
+    return Bounds(Type::None(), Type::Any());
+  }
+
+  DCHECK(context_type->Maybe(Type::Internal()));
   // TODO(rossberg): More precisely, instead of the above assertion, we should
   // back-propagate the constraint that it has to be a subtype of Internal.
 
   ContextAccess access = OpParameter<ContextAccess>(node);
-  Type* context_type = outer.upper;
   MaybeHandle<Context> context;
   if (context_type->IsConstant()) {
     context = Handle<Context>::cast(context_type->AsConstant()->Value());
@@ -1265,7 +1396,7 @@ Bounds Typer::Visitor::TypeJSCreateModuleContext(Node* node) {
 }
 
 
-Bounds Typer::Visitor::TypeJSCreateGlobalContext(Node* node) {
+Bounds Typer::Visitor::TypeJSCreateScriptContext(Node* node) {
   Bounds outer = ContextOperand(node);
   return Bounds(Type::Context(outer.upper, zone()));
 }
@@ -1407,8 +1538,8 @@ Bounds Typer::Visitor::TypeChangeTaggedToInt32(Node* node) {
   Bounds arg = Operand(node, 0);
   // TODO(neis): DCHECK(arg.upper->Is(Type::Signed32()));
   return Bounds(
-      ChangeRepresentation(arg.lower, Type::UntaggedInt32(), zone()),
-      ChangeRepresentation(arg.upper, Type::UntaggedInt32(), zone()));
+      ChangeRepresentation(arg.lower, Type::UntaggedSigned32(), zone()),
+      ChangeRepresentation(arg.upper, Type::UntaggedSigned32(), zone()));
 }
 
 
@@ -1416,8 +1547,8 @@ Bounds Typer::Visitor::TypeChangeTaggedToUint32(Node* node) {
   Bounds arg = Operand(node, 0);
   // TODO(neis): DCHECK(arg.upper->Is(Type::Unsigned32()));
   return Bounds(
-      ChangeRepresentation(arg.lower, Type::UntaggedInt32(), zone()),
-      ChangeRepresentation(arg.upper, Type::UntaggedInt32(), zone()));
+      ChangeRepresentation(arg.lower, Type::UntaggedUnsigned32(), zone()),
+      ChangeRepresentation(arg.upper, Type::UntaggedUnsigned32(), zone()));
 }
 
 
@@ -1461,8 +1592,8 @@ Bounds Typer::Visitor::TypeChangeBoolToBit(Node* node) {
   Bounds arg = Operand(node, 0);
   // TODO(neis): DCHECK(arg.upper->Is(Type::Boolean()));
   return Bounds(
-      ChangeRepresentation(arg.lower, Type::UntaggedInt1(), zone()),
-      ChangeRepresentation(arg.upper, Type::UntaggedInt1(), zone()));
+      ChangeRepresentation(arg.lower, Type::UntaggedBit(), zone()),
+      ChangeRepresentation(arg.upper, Type::UntaggedBit(), zone()));
 }
 
 
@@ -1470,8 +1601,8 @@ Bounds Typer::Visitor::TypeChangeBitToBool(Node* node) {
   Bounds arg = Operand(node, 0);
   // TODO(neis): DCHECK(arg.upper->Is(Type::Boolean()));
   return Bounds(
-      ChangeRepresentation(arg.lower, Type::TaggedPtr(), zone()),
-      ChangeRepresentation(arg.upper, Type::TaggedPtr(), zone()));
+      ChangeRepresentation(arg.lower, Type::TaggedPointer(), zone()),
+      ChangeRepresentation(arg.upper, Type::TaggedPointer(), zone()));
 }
 
 
@@ -1480,6 +1611,23 @@ Bounds Typer::Visitor::TypeLoadField(Node* node) {
 }
 
 
+Bounds Typer::Visitor::TypeLoadBuffer(Node* node) {
+  // TODO(bmeurer): This typing is not yet correct. Since we can still access
+  // out of bounds, the type in the general case has to include Undefined.
+  switch (BufferAccessOf(node->op()).external_array_type()) {
+#define NATIVE_TYPE_CASE(Type) \
+  case kExternal##Type##Array: \
+    return Bounds(typer_->cache_->Get(k##Type));
+    NATIVE_TYPES(NATIVE_TYPE_CASE)
+#undef NATIVE_TYPE_CASE
+    case kExternalUint8ClampedArray:
+      break;
+  }
+  UNREACHABLE();
+  return Bounds();
+}
+
+
 Bounds Typer::Visitor::TypeLoadElement(Node* node) {
   return Bounds(ElementAccessOf(node->op()).type);
 }
@@ -1491,6 +1639,12 @@ Bounds Typer::Visitor::TypeStoreField(Node* node) {
 }
 
 
+Bounds Typer::Visitor::TypeStoreBuffer(Node* node) {
+  UNREACHABLE();
+  return Bounds();
+}
+
+
 Bounds Typer::Visitor::TypeStoreElement(Node* node) {
   UNREACHABLE();
   return Bounds();
@@ -1733,13 +1887,13 @@ Bounds Typer::Visitor::TypeChangeFloat32ToFloat64(Node* node) {
 
 Bounds Typer::Visitor::TypeChangeFloat64ToInt32(Node* node) {
   return Bounds(Type::Intersect(
-      Type::Signed32(), Type::UntaggedInt32(), zone()));
+      Type::Signed32(), Type::UntaggedSigned32(), zone()));
 }
 
 
 Bounds Typer::Visitor::TypeChangeFloat64ToUint32(Node* node) {
   return Bounds(Type::Intersect(
-      Type::Unsigned32(), Type::UntaggedInt32(), zone()));
+      Type::Unsigned32(), Type::UntaggedUnsigned32(), zone()));
 }
 
 
@@ -1773,13 +1927,13 @@ Bounds Typer::Visitor::TypeTruncateFloat64ToFloat32(Node* node) {
 
 Bounds Typer::Visitor::TypeTruncateFloat64ToInt32(Node* node) {
   return Bounds(Type::Intersect(
-      Type::Signed32(), Type::UntaggedInt32(), zone()));
+      Type::Signed32(), Type::UntaggedSigned32(), zone()));
 }
 
 
 Bounds Typer::Visitor::TypeTruncateInt64ToInt32(Node* node) {
   return Bounds(Type::Intersect(
-      Type::Signed32(), Type::UntaggedInt32(), zone()));
+      Type::Signed32(), Type::UntaggedSigned32(), zone()));
 }
 
 
@@ -1857,6 +2011,17 @@ Bounds Typer::Visitor::TypeLoadStackPointer(Node* node) {
 }
 
 
+Bounds Typer::Visitor::TypeCheckedLoad(Node* node) {
+  return Bounds::Unbounded(zone());
+}
+
+
+Bounds Typer::Visitor::TypeCheckedStore(Node* node) {
+  UNREACHABLE();
+  return Bounds();
+}
+
+
 // Heap constants.
 
 
@@ -1872,41 +2037,29 @@ Type* Typer::Visitor::TypeConstant(Handle<Object> value) {
           return typer_->weakint_fun1_;
         case kMathCeil:
           return typer_->weakint_fun1_;
-        case kMathAbs:
-          // TODO(rossberg): can't express overloading
-          return typer_->number_fun1_;
+        // Unary math functions.
+        case kMathAbs:  // TODO(rossberg): can't express overloading
         case kMathLog:
-          return typer_->number_fun1_;
         case kMathExp:
-          return typer_->number_fun1_;
         case kMathSqrt:
-          return typer_->number_fun1_;
-        case kMathPow:
-          return typer_->number_fun2_;
-        case kMathMax:
-          return typer_->number_fun2_;
-        case kMathMin:
-          return typer_->number_fun2_;
         case kMathCos:
-          return typer_->number_fun1_;
         case kMathSin:
-          return typer_->number_fun1_;
         case kMathTan:
-          return typer_->number_fun1_;
         case kMathAcos:
-          return typer_->number_fun1_;
         case kMathAsin:
-          return typer_->number_fun1_;
         case kMathAtan:
-          return typer_->number_fun1_;
+        case kMathFround:
+          return typer_->cache_->Get(kNumberFunc1);
+        // Binary math functions.
         case kMathAtan2:
-          return typer_->number_fun2_;
+        case kMathPow:
+        case kMathMax:
+        case kMathMin:
+          return typer_->cache_->Get(kNumberFunc2);
         case kMathImul:
-          return typer_->imul_fun_;
+          return typer_->cache_->Get(kImulFunc);
         case kMathClz32:
-          return typer_->clz32_fun_;
-        case kMathFround:
-          return typer_->number_fun1_;
+          return typer_->cache_->Get(kClz32Func);
         default:
           break;
       }
@@ -1914,29 +2067,40 @@ Type* Typer::Visitor::TypeConstant(Handle<Object> value) {
       Handle<Context> native =
           handle(context().ToHandleChecked()->native_context(), isolate());
       if (*value == native->array_buffer_fun()) {
-        return typer_->array_buffer_fun_;
+        return typer_->cache_->Get(kArrayBufferFunc);
       } else if (*value == native->int8_array_fun()) {
-        return typer_->int8_array_fun_;
+        return typer_->cache_->Get(kInt8ArrayFunc);
       } else if (*value == native->int16_array_fun()) {
-        return typer_->int16_array_fun_;
+        return typer_->cache_->Get(kInt16ArrayFunc);
       } else if (*value == native->int32_array_fun()) {
-        return typer_->int32_array_fun_;
+        return typer_->cache_->Get(kInt32ArrayFunc);
       } else if (*value == native->uint8_array_fun()) {
-        return typer_->uint8_array_fun_;
+        return typer_->cache_->Get(kUint8ArrayFunc);
       } else if (*value == native->uint16_array_fun()) {
-        return typer_->uint16_array_fun_;
+        return typer_->cache_->Get(kUint16ArrayFunc);
       } else if (*value == native->uint32_array_fun()) {
-        return typer_->uint32_array_fun_;
+        return typer_->cache_->Get(kUint32ArrayFunc);
       } else if (*value == native->float32_array_fun()) {
-        return typer_->float32_array_fun_;
+        return typer_->cache_->Get(kFloat32ArrayFunc);
       } else if (*value == native->float64_array_fun()) {
-        return typer_->float64_array_fun_;
+        return typer_->cache_->Get(kFloat64ArrayFunc);
       }
     }
+  } else if (value->IsJSTypedArray()) {
+    switch (JSTypedArray::cast(*value)->type()) {
+#define NATIVE_TYPE_CASE(Type) \
+  case kExternal##Type##Array: \
+    return typer_->cache_->Get(k##Type##Array);
+      NATIVE_TYPES(NATIVE_TYPE_CASE)
+#undef NATIVE_TYPE_CASE
+      case kExternalUint8ClampedArray:
+        // TODO(rossberg): Do we want some ClampedArray type to express this?
+        break;
+    }
   }
   return Type::Constant(value, zone());
 }
 
-}
-}
-}  // namespace v8::internal::compiler
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
index 8491d91..b65a9a5 100644 (file)
@@ -15,6 +15,10 @@ namespace v8 {
 namespace internal {
 namespace compiler {
 
+// Forward declarations.
+class LazyTypeCache;
+
+
 class Typer {
  public:
   explicit Typer(Graph* graph, MaybeHandle<Context> context);
@@ -29,8 +33,6 @@ class Typer {
 
  private:
   class Visitor;
-  class RunVisitor;
-  class WidenVisitor;
   class Decorator;
 
   Graph* graph_;
@@ -38,41 +40,37 @@ class Typer {
   Decorator* decorator_;
 
   Zone* zone_;
+  Type* boolean_or_number;
+  Type* undefined_or_null;
+  Type* undefined_or_number;
   Type* negative_signed32;
   Type* non_negative_signed32;
-  Type* undefined_or_null;
   Type* singleton_false;
   Type* singleton_true;
   Type* singleton_zero;
   Type* singleton_one;
   Type* zero_or_one;
   Type* zeroish;
+  Type* signed32ish;
+  Type* unsigned32ish;
   Type* falsish;
+  Type* truish;
   Type* integer;
   Type* weakint;
   Type* number_fun0_;
   Type* number_fun1_;
   Type* number_fun2_;
   Type* weakint_fun1_;
-  Type* imul_fun_;
-  Type* clz32_fun_;
   Type* random_fun_;
-  Type* array_buffer_fun_;
-  Type* int8_array_fun_;
-  Type* int16_array_fun_;
-  Type* int32_array_fun_;
-  Type* uint8_array_fun_;
-  Type* uint16_array_fun_;
-  Type* uint32_array_fun_;
-  Type* float32_array_fun_;
-  Type* float64_array_fun_;
+  LazyTypeCache* cache_;
 
   ZoneVector<Handle<Object> > weaken_min_limits_;
   ZoneVector<Handle<Object> > weaken_max_limits_;
   DISALLOW_COPY_AND_ASSIGN(Typer);
 };
-}
-}
-}  // namespace v8::internal::compiler
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
 
 #endif  // V8_COMPILER_TYPER_H_
index f4cf1d4..734b3e8 100644 (file)
@@ -88,8 +88,33 @@ Reduction ValueNumberingReducer::Reduce(Node* node) {
       DCHECK(size_ * kCapacityToSizeRatio < capacity_);
       return NoChange();
     }
+
     if (entry == node) {
-      return NoChange();
+      // We need to check for a certain class of collisions here. Imagine the
+      // following scenario:
+      //
+      //  1. We insert node1 with op1 and certain inputs at index i.
+      //  2. We insert node2 with op2 and certain inputs at index i+1.
+      //  3. Some other reducer changes node1 to op2 and the inputs from node2.
+      //
+      // Now we are called again to reduce node1, and we would return NoChange
+      // in this case because we find node1 first, but what we should actually
+      // do is return Replace(node2) instead.
+      for (size_t j = (i + 1) & mask;; j = (j + 1) & mask) {
+        Node* entry = entries_[j];
+        if (!entry) {
+          // No collision, {node} is fine.
+          return NoChange();
+        }
+        if (entry->IsDead()) {
+          continue;
+        }
+        if (Equals(entry, node)) {
+          // Overwrite the colliding entry with the actual entry.
+          entries_[i] = entry;
+          return Replace(entry);
+        }
+      }
     }
 
     // Skip dead entries, but remember their indices so we can reuse them.
index e0830bb..546226c 100644 (file)
@@ -16,7 +16,7 @@ class ValueNumberingReducer FINAL : public Reducer {
   explicit ValueNumberingReducer(Zone* zone);
   ~ValueNumberingReducer();
 
-  virtual Reduction Reduce(Node* node) OVERRIDE;
+  Reduction Reduce(Node* node) OVERRIDE;
 
  private:
   enum { kInitialCapacity = 256u, kCapacityToSizeRatio = 2u };
index 94d701a..84b060f 100644 (file)
@@ -11,8 +11,6 @@
 
 #include "src/bit-vector.h"
 #include "src/compiler/generic-algorithm.h"
-#include "src/compiler/generic-node-inl.h"
-#include "src/compiler/generic-node.h"
 #include "src/compiler/graph-inl.h"
 #include "src/compiler/graph.h"
 #include "src/compiler/node.h"
@@ -178,11 +176,11 @@ void Verifier::Visitor::Pre(Node* node) {
 
   // Verify all successors are projections if multiple value outputs exist.
   if (node->op()->ValueOutputCount() > 1) {
-    Node::Uses uses = node->uses();
-    for (Node::Uses::iterator it = uses.begin(); it != uses.end(); ++it) {
-      CHECK(!NodeProperties::IsValueEdge(it.edge()) ||
-            (*it)->opcode() == IrOpcode::kProjection ||
-            (*it)->opcode() == IrOpcode::kParameter);
+    for (Edge edge : node->use_edges()) {
+      Node* use = edge.from();
+      CHECK(!NodeProperties::IsValueEdge(edge) ||
+            use->opcode() == IrOpcode::kProjection ||
+            use->opcode() == IrOpcode::kParameter);
     }
   }
 
@@ -291,7 +289,7 @@ void Verifier::Visitor::Pre(Node* node) {
       // Constants have no inputs.
       CHECK_EQ(0, input_count);
       // Type can be anything represented as a heap pointer.
-      CheckUpperIs(node, Type::TaggedPtr());
+      CheckUpperIs(node, Type::TaggedPointer());
       break;
     case IrOpcode::kExternalConstant:
       // Constants have no inputs.
@@ -460,7 +458,7 @@ void Verifier::Visitor::Pre(Node* node) {
     case IrOpcode::kJSCreateWithContext:
     case IrOpcode::kJSCreateBlockContext:
     case IrOpcode::kJSCreateModuleContext:
-    case IrOpcode::kJSCreateGlobalContext: {
+    case IrOpcode::kJSCreateScriptContext: {
       // Type is Context, and operand is Internal.
       Node* context = NodeProperties::GetContextInput(node);
       // TODO(rossberg): This should really be Is(Internal), but the typer
@@ -635,6 +633,8 @@ void Verifier::Visitor::Pre(Node* node) {
       // CheckValueInputIs(node, 0, Type::Object());
       // CheckUpperIs(node, Field(node).type));
       break;
+    case IrOpcode::kLoadBuffer:
+      break;
     case IrOpcode::kLoadElement:
       // Object -> elementtype
       // TODO(rossberg): activate once machine ops are typed.
@@ -648,6 +648,8 @@ void Verifier::Visitor::Pre(Node* node) {
       // CheckValueInputIs(node, 1, Field(node).type));
       CheckNotTyped(node);
       break;
+    case IrOpcode::kStoreBuffer:
+      break;
     case IrOpcode::kStoreElement:
       // (Object, elementtype) -> _|_
       // TODO(rossberg): activate once machine ops are typed.
@@ -725,6 +727,8 @@ void Verifier::Visitor::Pre(Node* node) {
     case IrOpcode::kChangeFloat64ToInt32:
     case IrOpcode::kChangeFloat64ToUint32:
     case IrOpcode::kLoadStackPointer:
+    case IrOpcode::kCheckedLoad:
+    case IrOpcode::kCheckedStore:
       // TODO(rossberg): Check.
       break;
   }
index be9af48..0480f9d 100644 (file)
@@ -93,8 +93,14 @@ class X64OperandConverter : public InstructionOperandConverter {
         int32_t disp = InputInt32(NextOffset(offset));
         return Operand(base, index, scale, disp);
       }
-      case kMode_M1:
+      case kMode_M1: {
+        Register base = InputRegister(NextOffset(offset));
+        int32_t disp = 0;
+        return Operand(base, disp);
+      }
       case kMode_M2:
+        UNREACHABLE();  // Should use kModeMR with more compact encoding instead
+        return Operand(no_reg, 0);
       case kMode_M4:
       case kMode_M8: {
         Register index = InputRegister(NextOffset(offset));
@@ -119,18 +125,64 @@ class X64OperandConverter : public InstructionOperandConverter {
     return Operand(no_reg, 0);
   }
 
-  Operand MemoryOperand() {
-    int first_input = 0;
+  Operand MemoryOperand(int first_input = 0) {
     return MemoryOperand(&first_input);
   }
 };
 
 
-static bool HasImmediateInput(Instruction* instr, int index) {
+namespace {
+
+bool HasImmediateInput(Instruction* instr, int index) {
   return instr->InputAt(index)->IsImmediate();
 }
 
 
+class OutOfLineLoadZero FINAL : public OutOfLineCode {
+ public:
+  OutOfLineLoadZero(CodeGenerator* gen, Register result)
+      : OutOfLineCode(gen), result_(result) {}
+
+  void Generate() FINAL { __ xorl(result_, result_); }
+
+ private:
+  Register const result_;
+};
+
+
+class OutOfLineLoadNaN FINAL : public OutOfLineCode {
+ public:
+  OutOfLineLoadNaN(CodeGenerator* gen, XMMRegister result)
+      : OutOfLineCode(gen), result_(result) {}
+
+  void Generate() FINAL { __ pcmpeqd(result_, result_); }
+
+ private:
+  XMMRegister const result_;
+};
+
+
+class OutOfLineTruncateDoubleToI FINAL : public OutOfLineCode {
+ public:
+  OutOfLineTruncateDoubleToI(CodeGenerator* gen, Register result,
+                             XMMRegister input)
+      : OutOfLineCode(gen), result_(result), input_(input) {}
+
+  void Generate() FINAL {
+    __ subp(rsp, Immediate(kDoubleSize));
+    __ movsd(MemOperand(rsp, 0), input_);
+    __ SlowTruncateToI(result_, rsp, 0);
+    __ addp(rsp, Immediate(kDoubleSize));
+  }
+
+ private:
+  Register const result_;
+  XMMRegister const input_;
+};
+
+}  // namespace
+
+
 #define ASSEMBLE_UNOP(asm_instr)         \
   do {                                   \
     if (instr->Output()->IsRegister()) { \
@@ -207,6 +259,247 @@ static bool HasImmediateInput(Instruction* instr, int index) {
   } while (0)
 
 
+#define ASSEMBLE_AVX_DOUBLE_BINOP(asm_instr)                           \
+  do {                                                                 \
+    CpuFeatureScope avx_scope(masm(), AVX);                            \
+    if (instr->InputAt(1)->IsDoubleRegister()) {                       \
+      __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
+                   i.InputDoubleRegister(1));                          \
+    } else {                                                           \
+      __ asm_instr(i.OutputDoubleRegister(), i.InputDoubleRegister(0), \
+                   i.InputOperand(1));                                 \
+    }                                                                  \
+  } while (0)
+
+
+#define ASSEMBLE_CHECKED_LOAD_FLOAT(asm_instr)                               \
+  do {                                                                       \
+    auto result = i.OutputDoubleRegister();                                  \
+    auto buffer = i.InputRegister(0);                                        \
+    auto index1 = i.InputRegister(1);                                        \
+    auto index2 = i.InputInt32(2);                                           \
+    OutOfLineCode* ool;                                                      \
+    if (instr->InputAt(3)->IsRegister()) {                                   \
+      auto length = i.InputRegister(3);                                      \
+      DCHECK_EQ(0, index2);                                                  \
+      __ cmpl(index1, length);                                               \
+      ool = new (zone()) OutOfLineLoadNaN(this, result);                     \
+    } else {                                                                 \
+      auto length = i.InputInt32(3);                                         \
+      DCHECK_LE(index2, length);                                             \
+      __ cmpq(index1, Immediate(length - index2));                           \
+      class OutOfLineLoadFloat FINAL : public OutOfLineCode {                \
+       public:                                                               \
+        OutOfLineLoadFloat(CodeGenerator* gen, XMMRegister result,           \
+                           Register buffer, Register index1, int32_t index2, \
+                           int32_t length)                                   \
+            : OutOfLineCode(gen),                                            \
+              result_(result),                                               \
+              buffer_(buffer),                                               \
+              index1_(index1),                                               \
+              index2_(index2),                                               \
+              length_(length) {}                                             \
+                                                                             \
+        void Generate() FINAL {                                              \
+          __ leal(kScratchRegister, Operand(index1_, index2_));              \
+          __ pcmpeqd(result_, result_);                                      \
+          __ cmpl(kScratchRegister, Immediate(length_));                     \
+          __ j(above_equal, exit());                                         \
+          __ asm_instr(result_,                                              \
+                       Operand(buffer_, kScratchRegister, times_1, 0));      \
+        }                                                                    \
+                                                                             \
+       private:                                                              \
+        XMMRegister const result_;                                           \
+        Register const buffer_;                                              \
+        Register const index1_;                                              \
+        int32_t const index2_;                                               \
+        int32_t const length_;                                               \
+      };                                                                     \
+      ool = new (zone())                                                     \
+          OutOfLineLoadFloat(this, result, buffer, index1, index2, length);  \
+    }                                                                        \
+    __ j(above_equal, ool->entry());                                         \
+    __ asm_instr(result, Operand(buffer, index1, times_1, index2));          \
+    __ bind(ool->exit());                                                    \
+  } while (false)
+
+
+#define ASSEMBLE_CHECKED_LOAD_INTEGER(asm_instr)                               \
+  do {                                                                         \
+    auto result = i.OutputRegister();                                          \
+    auto buffer = i.InputRegister(0);                                          \
+    auto index1 = i.InputRegister(1);                                          \
+    auto index2 = i.InputInt32(2);                                             \
+    OutOfLineCode* ool;                                                        \
+    if (instr->InputAt(3)->IsRegister()) {                                     \
+      auto length = i.InputRegister(3);                                        \
+      DCHECK_EQ(0, index2);                                                    \
+      __ cmpl(index1, length);                                                 \
+      ool = new (zone()) OutOfLineLoadZero(this, result);                      \
+    } else {                                                                   \
+      auto length = i.InputInt32(3);                                           \
+      DCHECK_LE(index2, length);                                               \
+      __ cmpq(index1, Immediate(length - index2));                             \
+      class OutOfLineLoadInteger FINAL : public OutOfLineCode {                \
+       public:                                                                 \
+        OutOfLineLoadInteger(CodeGenerator* gen, Register result,              \
+                             Register buffer, Register index1, int32_t index2, \
+                             int32_t length)                                   \
+            : OutOfLineCode(gen),                                              \
+              result_(result),                                                 \
+              buffer_(buffer),                                                 \
+              index1_(index1),                                                 \
+              index2_(index2),                                                 \
+              length_(length) {}                                               \
+                                                                               \
+        void Generate() FINAL {                                                \
+          Label oob;                                                           \
+          __ leal(kScratchRegister, Operand(index1_, index2_));                \
+          __ cmpl(kScratchRegister, Immediate(length_));                       \
+          __ j(above_equal, &oob, Label::kNear);                               \
+          __ asm_instr(result_,                                                \
+                       Operand(buffer_, kScratchRegister, times_1, 0));        \
+          __ jmp(exit());                                                      \
+          __ bind(&oob);                                                       \
+          __ xorl(result_, result_);                                           \
+        }                                                                      \
+                                                                               \
+       private:                                                                \
+        Register const result_;                                                \
+        Register const buffer_;                                                \
+        Register const index1_;                                                \
+        int32_t const index2_;                                                 \
+        int32_t const length_;                                                 \
+      };                                                                       \
+      ool = new (zone())                                                       \
+          OutOfLineLoadInteger(this, result, buffer, index1, index2, length);  \
+    }                                                                          \
+    __ j(above_equal, ool->entry());                                           \
+    __ asm_instr(result, Operand(buffer, index1, times_1, index2));            \
+    __ bind(ool->exit());                                                      \
+  } while (false)
+
+
+#define ASSEMBLE_CHECKED_STORE_FLOAT(asm_instr)                              \
+  do {                                                                       \
+    auto buffer = i.InputRegister(0);                                        \
+    auto index1 = i.InputRegister(1);                                        \
+    auto index2 = i.InputInt32(2);                                           \
+    auto value = i.InputDoubleRegister(4);                                   \
+    if (instr->InputAt(3)->IsRegister()) {                                   \
+      auto length = i.InputRegister(3);                                      \
+      DCHECK_EQ(0, index2);                                                  \
+      Label done;                                                            \
+      __ cmpl(index1, length);                                               \
+      __ j(above_equal, &done, Label::kNear);                                \
+      __ asm_instr(Operand(buffer, index1, times_1, index2), value);         \
+      __ bind(&done);                                                        \
+    } else {                                                                 \
+      auto length = i.InputInt32(3);                                         \
+      DCHECK_LE(index2, length);                                             \
+      __ cmpq(index1, Immediate(length - index2));                           \
+      class OutOfLineStoreFloat FINAL : public OutOfLineCode {               \
+       public:                                                               \
+        OutOfLineStoreFloat(CodeGenerator* gen, Register buffer,             \
+                            Register index1, int32_t index2, int32_t length, \
+                            XMMRegister value)                               \
+            : OutOfLineCode(gen),                                            \
+              buffer_(buffer),                                               \
+              index1_(index1),                                               \
+              index2_(index2),                                               \
+              length_(length),                                               \
+              value_(value) {}                                               \
+                                                                             \
+        void Generate() FINAL {                                              \
+          __ leal(kScratchRegister, Operand(index1_, index2_));              \
+          __ cmpl(kScratchRegister, Immediate(length_));                     \
+          __ j(above_equal, exit());                                         \
+          __ asm_instr(Operand(buffer_, kScratchRegister, times_1, 0),       \
+                       value_);                                              \
+        }                                                                    \
+                                                                             \
+       private:                                                              \
+        Register const buffer_;                                              \
+        Register const index1_;                                              \
+        int32_t const index2_;                                               \
+        int32_t const length_;                                               \
+        XMMRegister const value_;                                            \
+      };                                                                     \
+      auto ool = new (zone())                                                \
+          OutOfLineStoreFloat(this, buffer, index1, index2, length, value);  \
+      __ j(above_equal, ool->entry());                                       \
+      __ asm_instr(Operand(buffer, index1, times_1, index2), value);         \
+      __ bind(ool->exit());                                                  \
+    }                                                                        \
+  } while (false)
+
+
+#define ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Value)                  \
+  do {                                                                         \
+    auto buffer = i.InputRegister(0);                                          \
+    auto index1 = i.InputRegister(1);                                          \
+    auto index2 = i.InputInt32(2);                                             \
+    if (instr->InputAt(3)->IsRegister()) {                                     \
+      auto length = i.InputRegister(3);                                        \
+      DCHECK_EQ(0, index2);                                                    \
+      Label done;                                                              \
+      __ cmpl(index1, length);                                                 \
+      __ j(above_equal, &done, Label::kNear);                                  \
+      __ asm_instr(Operand(buffer, index1, times_1, index2), value);           \
+      __ bind(&done);                                                          \
+    } else {                                                                   \
+      auto length = i.InputInt32(3);                                           \
+      DCHECK_LE(index2, length);                                               \
+      __ cmpq(index1, Immediate(length - index2));                             \
+      class OutOfLineStoreInteger FINAL : public OutOfLineCode {               \
+       public:                                                                 \
+        OutOfLineStoreInteger(CodeGenerator* gen, Register buffer,             \
+                              Register index1, int32_t index2, int32_t length, \
+                              Value value)                                     \
+            : OutOfLineCode(gen),                                              \
+              buffer_(buffer),                                                 \
+              index1_(index1),                                                 \
+              index2_(index2),                                                 \
+              length_(length),                                                 \
+              value_(value) {}                                                 \
+                                                                               \
+        void Generate() FINAL {                                                \
+          __ leal(kScratchRegister, Operand(index1_, index2_));                \
+          __ cmpl(kScratchRegister, Immediate(length_));                       \
+          __ j(above_equal, exit());                                           \
+          __ asm_instr(Operand(buffer_, kScratchRegister, times_1, 0),         \
+                       value_);                                                \
+        }                                                                      \
+                                                                               \
+       private:                                                                \
+        Register const buffer_;                                                \
+        Register const index1_;                                                \
+        int32_t const index2_;                                                 \
+        int32_t const length_;                                                 \
+        Value const value_;                                                    \
+      };                                                                       \
+      auto ool = new (zone())                                                  \
+          OutOfLineStoreInteger(this, buffer, index1, index2, length, value);  \
+      __ j(above_equal, ool->entry());                                         \
+      __ asm_instr(Operand(buffer, index1, times_1, index2), value);           \
+      __ bind(ool->exit());                                                    \
+    }                                                                          \
+  } while (false)
+
+
+#define ASSEMBLE_CHECKED_STORE_INTEGER(asm_instr)                \
+  do {                                                           \
+    if (instr->InputAt(4)->IsRegister()) {                       \
+      Register value = i.InputRegister(4);                       \
+      ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Register);  \
+    } else {                                                     \
+      Immediate value = i.InputImmediate(4);                     \
+      ASSEMBLE_CHECKED_STORE_INTEGER_IMPL(asm_instr, Immediate); \
+    }                                                            \
+  } while (false)
+
+
 // Assembles an instruction after register allocation, producing machine code.
 void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
   X64OperandConverter i(this, instr);
@@ -238,7 +531,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
       break;
     }
     case kArchJmp:
-      __ jmp(GetLabel(i.InputRpo(0)));
+      AssembleArchJump(i.InputRpo(0));
       break;
     case kArchNop:
       // don't emit code for nops.
@@ -249,9 +542,16 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
     case kArchStackPointer:
       __ movq(i.OutputRegister(), rsp);
       break;
-    case kArchTruncateDoubleToI:
-      __ TruncateDoubleToI(i.OutputRegister(), i.InputDoubleRegister(0));
+    case kArchTruncateDoubleToI: {
+      auto result = i.OutputRegister();
+      auto input = i.InputDoubleRegister(0);
+      auto ool = new (zone()) OutOfLineTruncateDoubleToI(this, result, input);
+      __ cvttsd2siq(result, input);
+      __ cmpq(result, Immediate(1));
+      __ j(overflow, ool->entry());
+      __ bind(ool->exit());
       break;
+    }
     case kX64Add32:
       ASSEMBLE_BINOP(addl);
       break;
@@ -396,7 +696,8 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
       __ fprem();
       // The following 2 instruction implicitly use rax.
       __ fnstsw_ax();
-      if (CpuFeatures::IsSupported(SAHF) && masm()->IsEnabled(SAHF)) {
+      if (CpuFeatures::IsSupported(SAHF)) {
+        CpuFeatureScope sahf_scope(masm(), SAHF);
         __ sahf();
       } else {
         __ shrl(rax, Immediate(8));
@@ -482,8 +783,27 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
       }
       __ cvtqsi2sd(i.OutputDoubleRegister(), kScratchRegister);
       break;
+    case kAVXFloat64Add:
+      ASSEMBLE_AVX_DOUBLE_BINOP(vaddsd);
+      break;
+    case kAVXFloat64Sub:
+      ASSEMBLE_AVX_DOUBLE_BINOP(vsubsd);
+      break;
+    case kAVXFloat64Mul:
+      ASSEMBLE_AVX_DOUBLE_BINOP(vmulsd);
+      break;
+    case kAVXFloat64Div:
+      ASSEMBLE_AVX_DOUBLE_BINOP(vdivsd);
+      break;
     case kX64Movsxbl:
-      __ movsxbl(i.OutputRegister(), i.MemoryOperand());
+      if (instr->addressing_mode() != kMode_None) {
+        __ movsxbl(i.OutputRegister(), i.MemoryOperand());
+      } else if (instr->InputAt(0)->IsRegister()) {
+        __ movsxbl(i.OutputRegister(), i.InputRegister(0));
+      } else {
+        __ movsxbl(i.OutputRegister(), i.InputOperand(0));
+      }
+      __ AssertZeroExtended(i.OutputRegister());
       break;
     case kX64Movzxbl:
       __ movzxbl(i.OutputRegister(), i.MemoryOperand());
@@ -499,10 +819,18 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
       break;
     }
     case kX64Movsxwl:
-      __ movsxwl(i.OutputRegister(), i.MemoryOperand());
+      if (instr->addressing_mode() != kMode_None) {
+        __ movsxwl(i.OutputRegister(), i.MemoryOperand());
+      } else if (instr->InputAt(0)->IsRegister()) {
+        __ movsxwl(i.OutputRegister(), i.InputRegister(0));
+      } else {
+        __ movsxwl(i.OutputRegister(), i.InputOperand(0));
+      }
+      __ AssertZeroExtended(i.OutputRegister());
       break;
     case kX64Movzxwl:
       __ movzxwl(i.OutputRegister(), i.MemoryOperand());
+      __ AssertZeroExtended(i.OutputRegister());
       break;
     case kX64Movw: {
       int index = 0;
@@ -525,6 +853,7 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
         } else {
           __ movl(i.OutputRegister(), i.MemoryOperand());
         }
+        __ AssertZeroExtended(i.OutputRegister());
       } else {
         int index = 0;
         Operand operand = i.MemoryOperand(&index);
@@ -574,12 +903,49 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
         __ movsd(operand, i.InputDoubleRegister(index));
       }
       break;
-    case kX64Lea32:
-      __ leal(i.OutputRegister(), i.MemoryOperand());
+    case kX64Lea32: {
+      AddressingMode mode = AddressingModeField::decode(instr->opcode());
+      // Shorten "leal" to "addl", "subl" or "shll" if the register allocation
+      // and addressing mode just happens to work out. The "addl"/"subl" forms
+      // in these cases are faster based on measurements.
+      if (i.InputRegister(0).is(i.OutputRegister())) {
+        if (mode == kMode_MRI) {
+          int32_t constant_summand = i.InputInt32(1);
+          if (constant_summand > 0) {
+            __ addl(i.OutputRegister(), Immediate(constant_summand));
+          } else if (constant_summand < 0) {
+            __ subl(i.OutputRegister(), Immediate(-constant_summand));
+          }
+        } else if (mode == kMode_MR1) {
+          if (i.InputRegister(1).is(i.OutputRegister())) {
+            __ shll(i.OutputRegister(), Immediate(1));
+          } else {
+            __ leal(i.OutputRegister(), i.MemoryOperand());
+          }
+        } else if (mode == kMode_M2) {
+          __ shll(i.OutputRegister(), Immediate(1));
+        } else if (mode == kMode_M4) {
+          __ shll(i.OutputRegister(), Immediate(2));
+        } else if (mode == kMode_M8) {
+          __ shll(i.OutputRegister(), Immediate(3));
+        } else {
+          __ leal(i.OutputRegister(), i.MemoryOperand());
+        }
+      } else {
+        __ leal(i.OutputRegister(), i.MemoryOperand());
+      }
+      __ AssertZeroExtended(i.OutputRegister());
       break;
+    }
     case kX64Lea:
       __ leaq(i.OutputRegister(), i.MemoryOperand());
       break;
+    case kX64Dec32:
+      __ decl(i.OutputRegister());
+      break;
+    case kX64Inc32:
+      __ incl(i.OutputRegister());
+      break;
     case kX64Push:
       if (HasImmediateInput(instr, 0)) {
         __ pushq(i.InputImmediate(0));
@@ -603,27 +969,54 @@ void CodeGenerator::AssembleArchInstruction(Instruction* instr) {
       __ RecordWrite(object, index, value, mode);
       break;
     }
+    case kCheckedLoadInt8:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(movsxbl);
+      break;
+    case kCheckedLoadUint8:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(movzxbl);
+      break;
+    case kCheckedLoadInt16:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(movsxwl);
+      break;
+    case kCheckedLoadUint16:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(movzxwl);
+      break;
+    case kCheckedLoadWord32:
+      ASSEMBLE_CHECKED_LOAD_INTEGER(movl);
+      break;
+    case kCheckedLoadFloat32:
+      ASSEMBLE_CHECKED_LOAD_FLOAT(movss);
+      break;
+    case kCheckedLoadFloat64:
+      ASSEMBLE_CHECKED_LOAD_FLOAT(movsd);
+      break;
+    case kCheckedStoreWord8:
+      ASSEMBLE_CHECKED_STORE_INTEGER(movb);
+      break;
+    case kCheckedStoreWord16:
+      ASSEMBLE_CHECKED_STORE_INTEGER(movw);
+      break;
+    case kCheckedStoreWord32:
+      ASSEMBLE_CHECKED_STORE_INTEGER(movl);
+      break;
+    case kCheckedStoreFloat32:
+      ASSEMBLE_CHECKED_STORE_FLOAT(movss);
+      break;
+    case kCheckedStoreFloat64:
+      ASSEMBLE_CHECKED_STORE_FLOAT(movsd);
+      break;
   }
 }
 
 
 // Assembles branches after this instruction.
-void CodeGenerator::AssembleArchBranch(Instruction* instr,
-                                       FlagsCondition condition) {
+void CodeGenerator::AssembleArchBranch(Instruction* instr, BranchInfo* branch) {
   X64OperandConverter i(this, instr);
-  Label done;
-
-  // Emit a branch. The true and false targets are always the last two inputs
-  // to the instruction.
-  BasicBlock::RpoNumber tblock =
-      i.InputRpo(static_cast<int>(instr->InputCount()) - 2);
-  BasicBlock::RpoNumber fblock =
-      i.InputRpo(static_cast<int>(instr->InputCount()) - 1);
-  bool fallthru = IsNextInAssemblyOrder(fblock);
-  Label* tlabel = GetLabel(tblock);
-  Label* flabel = fallthru ? &done : GetLabel(fblock);
-  Label::Distance flabel_distance = fallthru ? Label::kNear : Label::kFar;
-  switch (condition) {
+  Label::Distance flabel_distance =
+      branch->fallthru ? Label::kNear : Label::kFar;
+  Label* tlabel = branch->true_label;
+  Label* flabel = branch->false_label;
+  switch (branch->condition) {
     case kUnorderedEqual:
       __ j(parity_even, flabel, flabel_distance);
     // Fall through.
@@ -679,8 +1072,12 @@ void CodeGenerator::AssembleArchBranch(Instruction* instr,
       __ j(no_overflow, tlabel);
       break;
   }
-  if (!fallthru) __ jmp(flabel, flabel_distance);  // no fallthru to flabel.
-  __ bind(&done);
+  if (!branch->fallthru) __ jmp(flabel, flabel_distance);
+}
+
+
+void CodeGenerator::AssembleArchJump(BasicBlock::RpoNumber target) {
+  if (!IsNextInAssemblyOrder(target)) __ jmp(GetLabel(target));
 }
 
 
@@ -799,23 +1196,6 @@ void CodeGenerator::AssemblePrologue() {
     __ Prologue(info->IsCodePreAgingActive());
     frame()->SetRegisterSaveAreaSize(
         StandardFrameConstants::kFixedFrameSizeFromFp);
-
-    // Sloppy mode functions and builtins need to replace the receiver with the
-    // global proxy when called as functions (without an explicit receiver
-    // object).
-    // TODO(mstarzinger/verwaest): Should this be moved back into the CallIC?
-    if (info->strict_mode() == SLOPPY && !info->is_native()) {
-      Label ok;
-      StackArgumentsAccessor args(rbp, info->scope()->num_parameters());
-      __ movp(rcx, args.GetReceiverOperand());
-      __ CompareRoot(rcx, Heap::kUndefinedValueRootIndex);
-      __ j(not_equal, &ok, Label::kNear);
-      __ movp(rcx, GlobalObjectOperand());
-      __ movp(rcx, FieldOperand(rcx, GlobalObject::kGlobalProxyOffset));
-      __ movp(args.GetReceiverOperand(), rcx);
-      __ bind(&ok);
-    }
-
   } else {
     __ StubPrologue();
     frame()->SetRegisterSaveAreaSize(
@@ -918,6 +1298,9 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
         case Constant::kHeapObject:
           __ Move(dst, src.ToHeapObject());
           break;
+        case Constant::kRpoNumber:
+          UNREACHABLE();  // TODO(dcarney): load of labels on x64.
+          break;
       }
       if (destination->IsStackSlot()) {
         __ movq(g.ToOperand(destination), kScratchRegister);
@@ -1001,7 +1384,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
     __ movsd(xmm0, src);
     __ movsd(src, dst);
     __ movsd(dst, xmm0);
-  } else if (source->IsDoubleRegister() && destination->IsDoubleRegister()) {
+  } else if (source->IsDoubleRegister() && destination->IsDoubleStackSlot()) {
     // XMM register-memory swap.  We rely on having xmm0
     // available as a fixed scratch register.
     XMMRegister src = g.ToDoubleRegister(source);
index 9d4f59c..77e3e52 100644 (file)
@@ -62,6 +62,10 @@ namespace compiler {
   V(SSEFloat64ToUint32)            \
   V(SSEInt32ToFloat64)             \
   V(SSEUint32ToFloat64)            \
+  V(AVXFloat64Add)                 \
+  V(AVXFloat64Sub)                 \
+  V(AVXFloat64Mul)                 \
+  V(AVXFloat64Div)                 \
   V(X64Movsxbl)                    \
   V(X64Movzxbl)                    \
   V(X64Movb)                       \
@@ -75,6 +79,8 @@ namespace compiler {
   V(X64Movss)                      \
   V(X64Lea32)                      \
   V(X64Lea)                        \
+  V(X64Dec32)                      \
+  V(X64Inc32)                      \
   V(X64Push)                       \
   V(X64StoreWriteBarrier)
 
index c70944b..aba480d 100644 (file)
@@ -2,7 +2,6 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/compiler/generic-node-inl.h"
 #include "src/compiler/instruction-selector-impl.h"
 #include "src/compiler/node-matchers.h"
 
@@ -34,6 +33,71 @@ class X64OperandGenerator FINAL : public OperandGenerator {
     }
   }
 
+  AddressingMode GenerateMemoryOperandInputs(Node* index, int scale_exponent,
+                                             Node* base, Node* displacement,
+                                             InstructionOperand* inputs[],
+                                             size_t* input_count) {
+    AddressingMode mode = kMode_MRI;
+    if (base != NULL) {
+      inputs[(*input_count)++] = UseRegister(base);
+      if (index != NULL) {
+        DCHECK(scale_exponent >= 0 && scale_exponent <= 3);
+        inputs[(*input_count)++] = UseRegister(index);
+        if (displacement != NULL) {
+          inputs[(*input_count)++] = UseImmediate(displacement);
+          static const AddressingMode kMRnI_modes[] = {kMode_MR1I, kMode_MR2I,
+                                                       kMode_MR4I, kMode_MR8I};
+          mode = kMRnI_modes[scale_exponent];
+        } else {
+          static const AddressingMode kMRn_modes[] = {kMode_MR1, kMode_MR2,
+                                                      kMode_MR4, kMode_MR8};
+          mode = kMRn_modes[scale_exponent];
+        }
+      } else {
+        if (displacement == NULL) {
+          mode = kMode_MR;
+        } else {
+          inputs[(*input_count)++] = UseImmediate(displacement);
+          mode = kMode_MRI;
+        }
+      }
+    } else {
+      DCHECK(index != NULL);
+      DCHECK(scale_exponent >= 0 && scale_exponent <= 3);
+      inputs[(*input_count)++] = UseRegister(index);
+      if (displacement != NULL) {
+        inputs[(*input_count)++] = UseImmediate(displacement);
+        static const AddressingMode kMnI_modes[] = {kMode_MRI, kMode_M2I,
+                                                    kMode_M4I, kMode_M8I};
+        mode = kMnI_modes[scale_exponent];
+      } else {
+        static const AddressingMode kMn_modes[] = {kMode_MR, kMode_MR1,
+                                                   kMode_M4, kMode_M8};
+        mode = kMn_modes[scale_exponent];
+        if (mode == kMode_MR1) {
+          // [%r1 + %r1*1] has a smaller encoding than [%r1*2+0]
+          inputs[(*input_count)++] = UseRegister(index);
+        }
+      }
+    }
+    return mode;
+  }
+
+  AddressingMode GetEffectiveAddressMemoryOperand(Node* operand,
+                                                  InstructionOperand* inputs[],
+                                                  size_t* input_count) {
+    BaseWithIndexAndDisplacement64Matcher m(operand, true);
+    DCHECK(m.matches());
+    if ((m.displacement() == NULL || CanBeImmediate(m.displacement()))) {
+      return GenerateMemoryOperandInputs(m.index(), m.scale(), m.base(),
+                                         m.displacement(), inputs, input_count);
+    } else {
+      inputs[(*input_count)++] = UseRegister(operand->InputAt(0));
+      inputs[(*input_count)++] = UseRegister(operand->InputAt(1));
+      return kMode_MR1;
+    }
+  }
+
   bool CanBeBetterLeftOperand(Node* node) const {
     return !selector()->IsLive(node);
   }
@@ -44,8 +108,6 @@ void InstructionSelector::VisitLoad(Node* node) {
   MachineType rep = RepresentationOf(OpParameter<LoadRepresentation>(node));
   MachineType typ = TypeOf(OpParameter<LoadRepresentation>(node));
   X64OperandGenerator g(this);
-  Node* const base = node->InputAt(0);
-  Node* const index = node->InputAt(1);
 
   ArchOpcode opcode;
   switch (rep) {
@@ -73,19 +135,15 @@ void InstructionSelector::VisitLoad(Node* node) {
       UNREACHABLE();
       return;
   }
-  if (g.CanBeImmediate(base)) {
-    // load [#base + %index]
-    Emit(opcode | AddressingModeField::encode(kMode_MRI),
-         g.DefineAsRegister(node), g.UseRegister(index), g.UseImmediate(base));
-  } else if (g.CanBeImmediate(index)) {
-    // load [%base + #index]
-    Emit(opcode | AddressingModeField::encode(kMode_MRI),
-         g.DefineAsRegister(node), g.UseRegister(base), g.UseImmediate(index));
-  } else {
-    // load [%base + %index*1]
-    Emit(opcode | AddressingModeField::encode(kMode_MR1),
-         g.DefineAsRegister(node), g.UseRegister(base), g.UseRegister(index));
-  }
+
+  InstructionOperand* outputs[1];
+  outputs[0] = g.DefineAsRegister(node);
+  InstructionOperand* inputs[3];
+  size_t input_count = 0;
+  AddressingMode mode =
+      g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
+  InstructionCode code = opcode | AddressingModeField::encode(mode);
+  Emit(code, 1, outputs, input_count, inputs);
 }
 
 
@@ -135,21 +193,112 @@ void InstructionSelector::VisitStore(Node* node) {
       UNREACHABLE();
       return;
   }
+  InstructionOperand* inputs[4];
+  size_t input_count = 0;
+  AddressingMode mode =
+      g.GetEffectiveAddressMemoryOperand(node, inputs, &input_count);
+  InstructionCode code = opcode | AddressingModeField::encode(mode);
   InstructionOperand* value_operand =
       g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value);
-  if (g.CanBeImmediate(base)) {
-    // store [#base + %index], %|#value
-    Emit(opcode | AddressingModeField::encode(kMode_MRI), nullptr,
-         g.UseRegister(index), g.UseImmediate(base), value_operand);
-  } else if (g.CanBeImmediate(index)) {
-    // store [%base + #index], %|#value
-    Emit(opcode | AddressingModeField::encode(kMode_MRI), nullptr,
-         g.UseRegister(base), g.UseImmediate(index), value_operand);
-  } else {
-    // store [%base + %index*1], %|#value
-    Emit(opcode | AddressingModeField::encode(kMode_MR1), nullptr,
-         g.UseRegister(base), g.UseRegister(index), value_operand);
+  inputs[input_count++] = value_operand;
+  Emit(code, 0, static_cast<InstructionOperand**>(NULL), input_count, inputs);
+}
+
+
+void InstructionSelector::VisitCheckedLoad(Node* node) {
+  MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
+  MachineType typ = TypeOf(OpParameter<MachineType>(node));
+  X64OperandGenerator g(this);
+  Node* const buffer = node->InputAt(0);
+  Node* const offset = node->InputAt(1);
+  Node* const length = node->InputAt(2);
+  ArchOpcode opcode;
+  switch (rep) {
+    case kRepWord8:
+      opcode = typ == kTypeInt32 ? kCheckedLoadInt8 : kCheckedLoadUint8;
+      break;
+    case kRepWord16:
+      opcode = typ == kTypeInt32 ? kCheckedLoadInt16 : kCheckedLoadUint16;
+      break;
+    case kRepWord32:
+      opcode = kCheckedLoadWord32;
+      break;
+    case kRepFloat32:
+      opcode = kCheckedLoadFloat32;
+      break;
+    case kRepFloat64:
+      opcode = kCheckedLoadFloat64;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+  if (offset->opcode() == IrOpcode::kInt32Add && CanCover(node, offset)) {
+    Int32Matcher mlength(length);
+    Int32BinopMatcher moffset(offset);
+    if (mlength.HasValue() && moffset.right().HasValue() &&
+        moffset.right().Value() >= 0 &&
+        mlength.Value() >= moffset.right().Value()) {
+      Emit(opcode, g.DefineAsRegister(node), g.UseRegister(buffer),
+           g.UseRegister(moffset.left().node()),
+           g.UseImmediate(moffset.right().node()), g.UseImmediate(length));
+      return;
+    }
   }
+  InstructionOperand* length_operand =
+      g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
+  Emit(opcode, g.DefineAsRegister(node), g.UseRegister(buffer),
+       g.UseRegister(offset), g.TempImmediate(0), length_operand);
+}
+
+
+void InstructionSelector::VisitCheckedStore(Node* node) {
+  MachineType rep = RepresentationOf(OpParameter<MachineType>(node));
+  X64OperandGenerator g(this);
+  Node* const buffer = node->InputAt(0);
+  Node* const offset = node->InputAt(1);
+  Node* const length = node->InputAt(2);
+  Node* const value = node->InputAt(3);
+  ArchOpcode opcode;
+  switch (rep) {
+    case kRepWord8:
+      opcode = kCheckedStoreWord8;
+      break;
+    case kRepWord16:
+      opcode = kCheckedStoreWord16;
+      break;
+    case kRepWord32:
+      opcode = kCheckedStoreWord32;
+      break;
+    case kRepFloat32:
+      opcode = kCheckedStoreFloat32;
+      break;
+    case kRepFloat64:
+      opcode = kCheckedStoreFloat64;
+      break;
+    default:
+      UNREACHABLE();
+      return;
+  }
+  InstructionOperand* value_operand =
+      g.CanBeImmediate(value) ? g.UseImmediate(value) : g.UseRegister(value);
+  if (offset->opcode() == IrOpcode::kInt32Add && CanCover(node, offset)) {
+    Int32Matcher mlength(length);
+    Int32BinopMatcher moffset(offset);
+    if (mlength.HasValue() && moffset.right().HasValue() &&
+        moffset.right().Value() >= 0 &&
+        mlength.Value() >= moffset.right().Value()) {
+      Emit(opcode, nullptr, g.UseRegister(buffer),
+           g.UseRegister(moffset.left().node()),
+           g.UseImmediate(moffset.right().node()), g.UseImmediate(length),
+           value_operand);
+      return;
+    }
+  }
+  InstructionOperand* length_operand =
+      g.CanBeImmediate(length) ? g.UseImmediate(length) : g.UseRegister(length);
+  Emit(opcode, nullptr, g.UseRegister(buffer), g.UseRegister(offset),
+       g.TempImmediate(0), length_operand, value_operand);
 }
 
 
@@ -275,12 +424,6 @@ void VisitWord32Shift(InstructionSelector* selector, Node* node,
     selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
                    g.UseImmediate(right));
   } else {
-    if (m.right().IsWord32And()) {
-      Int32BinopMatcher mright(right);
-      if (mright.right().Is(0x1F)) {
-        right = mright.left().node();
-      }
-    }
     selector->Emit(opcode, g.DefineSameAsFirst(node), g.UseRegister(left),
                    g.UseFixed(right, rcx));
   }
@@ -311,10 +454,39 @@ void VisitWord64Shift(InstructionSelector* selector, Node* node,
   }
 }
 
+
+void EmitLea(InstructionSelector* selector, InstructionCode opcode,
+             Node* result, Node* index, int scale, Node* base,
+             Node* displacement) {
+  X64OperandGenerator g(selector);
+
+  InstructionOperand* inputs[4];
+  size_t input_count = 0;
+  AddressingMode mode = g.GenerateMemoryOperandInputs(
+      index, scale, base, displacement, inputs, &input_count);
+
+  DCHECK_NE(0, static_cast<int>(input_count));
+  DCHECK_GE(arraysize(inputs), input_count);
+
+  InstructionOperand* outputs[1];
+  outputs[0] = g.DefineAsRegister(result);
+
+  opcode = AddressingModeField::encode(mode) | opcode;
+
+  selector->Emit(opcode, 1, outputs, input_count, inputs);
+}
+
 }  // namespace
 
 
 void InstructionSelector::VisitWord32Shl(Node* node) {
+  Int32ScaleMatcher m(node, true);
+  if (m.matches()) {
+    Node* index = node->InputAt(0);
+    Node* base = m.power_of_two_plus_one() ? index : NULL;
+    EmitLea(this, kX64Lea32, node, index, m.scale(), base, NULL);
+    return;
+  }
   VisitWord32Shift(this, node, kX64Shl32);
 }
 
@@ -346,6 +518,18 @@ void InstructionSelector::VisitWord64Shr(Node* node) {
 
 
 void InstructionSelector::VisitWord32Sar(Node* node) {
+  X64OperandGenerator g(this);
+  Int32BinopMatcher m(node);
+  if (CanCover(m.node(), m.left().node()) && m.left().IsWord32Shl()) {
+    Int32BinopMatcher mleft(m.left().node());
+    if (mleft.right().Is(16) && m.right().Is(16)) {
+      Emit(kX64Movsxwl, g.DefineAsRegister(node), g.Use(mleft.left().node()));
+      return;
+    } else if (mleft.right().Is(24) && m.right().Is(24)) {
+      Emit(kX64Movsxbl, g.DefineAsRegister(node), g.Use(mleft.left().node()));
+      return;
+    }
+  }
   VisitWord32Shift(this, node, kX64Sar32);
 }
 
@@ -366,6 +550,18 @@ void InstructionSelector::VisitWord64Ror(Node* node) {
 
 
 void InstructionSelector::VisitInt32Add(Node* node) {
+  X64OperandGenerator g(this);
+
+  // Try to match the Add to a leal pattern
+  BaseWithIndexAndDisplacement32Matcher m(node);
+  if (m.matches() &&
+      (m.displacement() == NULL || g.CanBeImmediate(m.displacement()))) {
+    EmitLea(this, kX64Lea32, node, m.index(), m.scale(), m.base(),
+            m.displacement());
+    return;
+  }
+
+  // No leal pattern match, use addl
   VisitBinop(this, node, kX64Add32);
 }
 
@@ -381,6 +577,14 @@ void InstructionSelector::VisitInt32Sub(Node* node) {
   if (m.left().Is(0)) {
     Emit(kX64Neg32, g.DefineSameAsFirst(node), g.UseRegister(m.right().node()));
   } else {
+    if (m.right().HasValue() && g.CanBeImmediate(m.right().node())) {
+      // Turn subtractions of constant values into immediate "leal" instructions
+      // by negating the value.
+      Emit(kX64Lea32 | AddressingModeField::encode(kMode_MRI),
+           g.DefineAsRegister(node), g.UseRegister(m.left().node()),
+           g.TempImmediate(-m.right().Value()));
+      return;
+    }
     VisitBinop(this, node, kX64Sub32);
   }
 }
@@ -452,6 +656,13 @@ void VisitMod(InstructionSelector* selector, Node* node, ArchOpcode opcode) {
 
 
 void InstructionSelector::VisitInt32Mul(Node* node) {
+  Int32ScaleMatcher m(node, true);
+  if (m.matches()) {
+    Node* index = node->InputAt(0);
+    Node* base = m.power_of_two_plus_one() ? index : NULL;
+    EmitLea(this, kX64Lea32, node, index, m.scale(), base, NULL);
+    return;
+  }
   VisitMul(this, node, kX64Imul32);
 }
 
@@ -615,29 +826,49 @@ void InstructionSelector::VisitTruncateInt64ToInt32(Node* node) {
 
 void InstructionSelector::VisitFloat64Add(Node* node) {
   X64OperandGenerator g(this);
-  Emit(kSSEFloat64Add, g.DefineSameAsFirst(node),
-       g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
+  if (IsSupported(AVX)) {
+    Emit(kAVXFloat64Add, g.DefineAsRegister(node),
+         g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
+  } else {
+    Emit(kSSEFloat64Add, g.DefineSameAsFirst(node),
+         g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
+  }
 }
 
 
 void InstructionSelector::VisitFloat64Sub(Node* node) {
   X64OperandGenerator g(this);
-  Emit(kSSEFloat64Sub, g.DefineSameAsFirst(node),
-       g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
+  if (IsSupported(AVX)) {
+    Emit(kAVXFloat64Sub, g.DefineAsRegister(node),
+         g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
+  } else {
+    Emit(kSSEFloat64Sub, g.DefineSameAsFirst(node),
+         g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
+  }
 }
 
 
 void InstructionSelector::VisitFloat64Mul(Node* node) {
   X64OperandGenerator g(this);
-  Emit(kSSEFloat64Mul, g.DefineSameAsFirst(node),
-       g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
+  if (IsSupported(AVX)) {
+    Emit(kAVXFloat64Mul, g.DefineAsRegister(node),
+         g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
+  } else {
+    Emit(kSSEFloat64Mul, g.DefineSameAsFirst(node),
+         g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
+  }
 }
 
 
 void InstructionSelector::VisitFloat64Div(Node* node) {
   X64OperandGenerator g(this);
-  Emit(kSSEFloat64Div, g.DefineSameAsFirst(node),
-       g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
+  if (IsSupported(AVX)) {
+    Emit(kAVXFloat64Div, g.DefineAsRegister(node),
+         g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
+  } else {
+    Emit(kSSEFloat64Div, g.DefineSameAsFirst(node),
+         g.UseRegister(node->InputAt(0)), g.Use(node->InputAt(1)));
+  }
 }
 
 
@@ -693,7 +924,7 @@ void InstructionSelector::VisitFloat64RoundTiesAway(Node* node) {
 
 void InstructionSelector::VisitCall(Node* node) {
   X64OperandGenerator g(this);
-  CallDescriptor* descriptor = OpParameter<CallDescriptor*>(node);
+  const CallDescriptor* descriptor = OpParameter<const CallDescriptor*>(node);
 
   FrameStateDescriptor* frame_state_descriptor = NULL;
   if (descriptor->NeedsFrameState()) {
@@ -812,12 +1043,6 @@ void InstructionSelector::VisitBranch(Node* branch, BasicBlock* tbranch,
 
   FlagsContinuation cont(kNotEqual, tbranch, fbranch);
 
-  // If we can fall through to the true block, invert the branch.
-  if (IsNextInAssemblyOrder(tbranch)) {
-    cont.Negate();
-    cont.SwapBlocks();
-  }
-
   // Try to combine with comparisons against 0 by simply inverting the branch.
   while (CanCover(user, value)) {
     if (value->opcode() == IrOpcode::kWord32Equal) {
@@ -1083,10 +1308,12 @@ InstructionSelector::SupportedMachineOperatorFlags() {
   if (CpuFeatures::IsSupported(SSE4_1)) {
     return MachineOperatorBuilder::kFloat64Floor |
            MachineOperatorBuilder::kFloat64Ceil |
-           MachineOperatorBuilder::kFloat64RoundTruncate;
+           MachineOperatorBuilder::kFloat64RoundTruncate |
+           MachineOperatorBuilder::kWord32ShiftIsSafe;
   }
   return MachineOperatorBuilder::kNoFlags;
 }
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
index 7bf2c78..0b76cc7 100644 (file)
@@ -65,9 +65,9 @@ CallDescriptor* Linkage::GetRuntimeCallDescriptor(
 
 CallDescriptor* Linkage::GetStubCallDescriptor(
     const CallInterfaceDescriptor& descriptor, int stack_parameter_count,
-    CallDescriptor::Flags flags, Zone* zone) {
+    CallDescriptor::Flags flags, Operator::Properties properties, Zone* zone) {
   return LH::GetStubCallDescriptor(zone, descriptor, stack_parameter_count,
-                                   flags);
+                                   flags, properties);
 }
 
 
index 537d92d..57490a1 100644 (file)
 namespace v8 {
 namespace internal {
 
+
+Handle<ScriptContextTable> ScriptContextTable::Extend(
+    Handle<ScriptContextTable> table, Handle<Context> script_context) {
+  Handle<ScriptContextTable> result;
+  int used = table->used();
+  int length = table->length();
+  CHECK(used >= 0 && length > 0 && used < length);
+  if (used + 1 == length) {
+    CHECK(length < Smi::kMaxValue / 2);
+    result = Handle<ScriptContextTable>::cast(
+        FixedArray::CopySize(table, length * 2));
+  } else {
+    result = table;
+  }
+  result->set_used(used + 1);
+
+  DCHECK(script_context->IsScriptContext());
+  result->set(used + 1, *script_context);
+  return result;
+}
+
+
+bool ScriptContextTable::Lookup(Handle<ScriptContextTable> table,
+                                Handle<String> name, LookupResult* result) {
+  for (int i = 0; i < table->used(); i++) {
+    Handle<Context> context = GetContext(table, i);
+    DCHECK(context->IsScriptContext());
+    Handle<ScopeInfo> scope_info(ScopeInfo::cast(context->extension()));
+    int slot_index = ScopeInfo::ContextSlotIndex(
+        scope_info, name, &result->mode, &result->init_flag,
+        &result->maybe_assigned_flag);
+
+    if (slot_index >= 0) {
+      result->context_index = i;
+      result->slot_index = slot_index;
+      return true;
+    }
+  }
+  return false;
+}
+
+
 Context* Context::declaration_context() {
   Context* current = this;
-  while (!current->IsFunctionContext() && !current->IsNativeContext()) {
+  while (!current->IsFunctionContext() && !current->IsNativeContext() &&
+         !current->IsScriptContext()) {
     current = current->previous();
     DCHECK(current->closure() == closure());
   }
@@ -32,9 +75,9 @@ JSBuiltinsObject* Context::builtins() {
 }
 
 
-Context* Context::global_context() {
+Context* Context::script_context() {
   Context* current = this;
-  while (!current->IsGlobalContext()) {
+  while (!current->IsScriptContext()) {
     current = current->previous();
   }
   return current;
@@ -82,8 +125,7 @@ static Maybe<PropertyAttributes> UnscopableLookup(LookupIterator* it) {
   DCHECK(attrs.has_value || isolate->has_pending_exception());
   if (!attrs.has_value || attrs.value == ABSENT) return attrs;
 
-  Handle<Symbol> unscopables_symbol(
-      isolate->native_context()->unscopables_symbol(), isolate);
+  Handle<Symbol> unscopables_symbol = isolate->factory()->unscopables_symbol();
   Handle<Object> receiver = it->GetReceiver();
   Handle<Object> unscopables;
   MaybeHandle<Object> maybe_unscopables =
@@ -92,16 +134,64 @@ static Maybe<PropertyAttributes> UnscopableLookup(LookupIterator* it) {
     return Maybe<PropertyAttributes>();
   }
   if (!unscopables->IsSpecObject()) return attrs;
-  Maybe<bool> blacklist = JSReceiver::HasProperty(
-      Handle<JSReceiver>::cast(unscopables), it->name());
-  if (!blacklist.has_value) {
+  Handle<Object> blacklist;
+  MaybeHandle<Object> maybe_blacklist =
+      Object::GetProperty(unscopables, it->name());
+  if (!maybe_blacklist.ToHandle(&blacklist)) {
     DCHECK(isolate->has_pending_exception());
     return Maybe<PropertyAttributes>();
   }
-  if (blacklist.value) return maybe(ABSENT);
+  if (!blacklist->IsUndefined()) return maybe(ABSENT);
   return attrs;
 }
 
+static void GetAttributesAndBindingFlags(VariableMode mode,
+                                         InitializationFlag init_flag,
+                                         PropertyAttributes* attributes,
+                                         BindingFlags* binding_flags) {
+  switch (mode) {
+    case INTERNAL:  // Fall through.
+    case VAR:
+      *attributes = NONE;
+      *binding_flags = MUTABLE_IS_INITIALIZED;
+      break;
+    case LET:
+      *attributes = NONE;
+      *binding_flags = (init_flag == kNeedsInitialization)
+                           ? MUTABLE_CHECK_INITIALIZED
+                           : MUTABLE_IS_INITIALIZED;
+      break;
+    case CONST_LEGACY:
+      *attributes = READ_ONLY;
+      *binding_flags = (init_flag == kNeedsInitialization)
+                           ? IMMUTABLE_CHECK_INITIALIZED
+                           : IMMUTABLE_IS_INITIALIZED;
+      break;
+    case CONST:
+      *attributes = READ_ONLY;
+      *binding_flags = (init_flag == kNeedsInitialization)
+                           ? IMMUTABLE_CHECK_INITIALIZED_HARMONY
+                           : IMMUTABLE_IS_INITIALIZED_HARMONY;
+      break;
+    case MODULE:
+      *attributes = READ_ONLY;
+      *binding_flags = IMMUTABLE_IS_INITIALIZED_HARMONY;
+      break;
+    case DYNAMIC:
+    case DYNAMIC_GLOBAL:
+    case DYNAMIC_LOCAL:
+    case TEMPORARY:
+      // Note: Fixed context slots are statically allocated by the compiler.
+      // Statically allocated variables always have a statically known mode,
+      // which is the mode with which they were declared when added to the
+      // scope. Thus, the DYNAMIC mode (which corresponds to dynamically
+      // declared variables that were introduced through declaration nodes)
+      // must not appear here.
+      UNREACHABLE();
+      break;
+  }
+}
+
 
 Handle<Object> Context::Lookup(Handle<String> name,
                                ContextLookupFlags flags,
@@ -122,29 +212,14 @@ Handle<Object> Context::Lookup(Handle<String> name,
     PrintF(")\n");
   }
 
-  bool visited_global_context = false;
-
   do {
     if (FLAG_trace_contexts) {
       PrintF(" - looking in context %p", reinterpret_cast<void*>(*context));
-      if (context->IsGlobalContext()) PrintF(" (global context)");
+      if (context->IsScriptContext()) PrintF(" (script context)");
       if (context->IsNativeContext()) PrintF(" (native context)");
       PrintF("\n");
     }
 
-    if (follow_context_chain && FLAG_harmony_scoping &&
-        !visited_global_context &&
-        (context->IsGlobalContext() || context->IsNativeContext())) {
-      // For lexical scoping, on a top level, we might resolve to the
-      // lexical bindings introduced by later scrips. Therefore we need to
-      // switch to the the last added global context during lookup here.
-      context = Handle<Context>(context->global_object()->global_context());
-      visited_global_context = true;
-      if (FLAG_trace_contexts) {
-        PrintF("   - switching to current global context %p\n",
-               reinterpret_cast<void*>(*context));
-      }
-    }
 
     // 1. Check global objects, subjects of with, and extension objects.
     if (context->IsNativeContext() ||
@@ -152,6 +227,30 @@ Handle<Object> Context::Lookup(Handle<String> name,
         (context->IsFunctionContext() && context->has_extension())) {
       Handle<JSReceiver> object(
           JSReceiver::cast(context->extension()), isolate);
+
+      if (context->IsNativeContext()) {
+        if (FLAG_trace_contexts) {
+          PrintF(" - trying other script contexts\n");
+        }
+        // Try other script contexts.
+        Handle<ScriptContextTable> script_contexts(
+            context->global_object()->native_context()->script_context_table());
+        ScriptContextTable::LookupResult r;
+        if (ScriptContextTable::Lookup(script_contexts, name, &r)) {
+          if (FLAG_trace_contexts) {
+            Handle<Context> c = ScriptContextTable::GetContext(script_contexts,
+                                                               r.context_index);
+            PrintF("=> found property in script context %d: %p\n",
+                   r.context_index, reinterpret_cast<void*>(*c));
+          }
+          *index = r.slot_index;
+          GetAttributesAndBindingFlags(r.mode, r.init_flag, attributes,
+                                       binding_flags);
+          return ScriptContextTable::GetContext(script_contexts,
+                                                r.context_index);
+        }
+      }
+
       // Context extension objects needs to behave as if they have no
       // prototype.  So even if we want to follow prototype chains, we need
       // to only do a local lookup for context extension objects.
@@ -181,7 +280,7 @@ Handle<Object> Context::Lookup(Handle<String> name,
 
     // 2. Check the context proper if it has slots.
     if (context->IsFunctionContext() || context->IsBlockContext() ||
-        (FLAG_harmony_scoping && context->IsGlobalContext())) {
+        (FLAG_harmony_scoping && context->IsScriptContext())) {
       // Use serialized scope information of functions and blocks to search
       // for the context index.
       Handle<ScopeInfo> scope_info;
@@ -206,45 +305,8 @@ Handle<Object> Context::Lookup(Handle<String> name,
                  slot_index, mode);
         }
         *index = slot_index;
-        // Note: Fixed context slots are statically allocated by the compiler.
-        // Statically allocated variables always have a statically known mode,
-        // which is the mode with which they were declared when added to the
-        // scope. Thus, the DYNAMIC mode (which corresponds to dynamically
-        // declared variables that were introduced through declaration nodes)
-        // must not appear here.
-        switch (mode) {
-          case INTERNAL:  // Fall through.
-          case VAR:
-            *attributes = NONE;
-            *binding_flags = MUTABLE_IS_INITIALIZED;
-            break;
-          case LET:
-            *attributes = NONE;
-            *binding_flags = (init_flag == kNeedsInitialization)
-                ? MUTABLE_CHECK_INITIALIZED : MUTABLE_IS_INITIALIZED;
-            break;
-          case CONST_LEGACY:
-            *attributes = READ_ONLY;
-            *binding_flags = (init_flag == kNeedsInitialization)
-                ? IMMUTABLE_CHECK_INITIALIZED : IMMUTABLE_IS_INITIALIZED;
-            break;
-          case CONST:
-            *attributes = READ_ONLY;
-            *binding_flags = (init_flag == kNeedsInitialization)
-                ? IMMUTABLE_CHECK_INITIALIZED_HARMONY :
-                IMMUTABLE_IS_INITIALIZED_HARMONY;
-            break;
-          case MODULE:
-            *attributes = READ_ONLY;
-            *binding_flags = IMMUTABLE_IS_INITIALIZED_HARMONY;
-            break;
-          case DYNAMIC:
-          case DYNAMIC_GLOBAL:
-          case DYNAMIC_LOCAL:
-          case TEMPORARY:
-            UNREACHABLE();
-            break;
-        }
+        GetAttributesAndBindingFlags(mode, init_flag, attributes,
+                                     binding_flags);
         return context;
       }
 
@@ -419,7 +481,7 @@ bool Context::IsBootstrappingOrValidParentContext(
   if (child->GetIsolate()->bootstrapper()->IsActive()) return true;
   if (!object->IsContext()) return false;
   Context* context = Context::cast(object);
-  return context->IsNativeContext() || context->IsGlobalContext() ||
+  return context->IsNativeContext() || context->IsScriptContext() ||
          context->IsModuleContext() || !child->IsModuleContext();
 }
 
index dc77861..cd3ff14 100644 (file)
@@ -98,6 +98,7 @@ enum BindingFlags {
   V(TO_INTEGER_FUN_INDEX, JSFunction, to_integer_fun)                          \
   V(TO_UINT32_FUN_INDEX, JSFunction, to_uint32_fun)                            \
   V(TO_INT32_FUN_INDEX, JSFunction, to_int32_fun)                              \
+  V(TO_LENGTH_FUN_INDEX, JSFunction, to_length_fun)                            \
   V(GLOBAL_EVAL_FUN_INDEX, JSFunction, global_eval_fun)                        \
   V(INSTANTIATE_FUN_INDEX, JSFunction, instantiate_fun)                        \
   V(CONFIGURE_INSTANCE_FUN_INDEX, JSFunction, configure_instance_fun)          \
@@ -181,9 +182,63 @@ enum BindingFlags {
   V(ITERATOR_RESULT_MAP_INDEX, Map, iterator_result_map)                       \
   V(MAP_ITERATOR_MAP_INDEX, Map, map_iterator_map)                             \
   V(SET_ITERATOR_MAP_INDEX, Map, set_iterator_map)                             \
-  V(ITERATOR_SYMBOL_INDEX, Symbol, iterator_symbol)                            \
-  V(UNSCOPABLES_SYMBOL_INDEX, Symbol, unscopables_symbol)                      \
-  V(ARRAY_VALUES_ITERATOR_INDEX, JSFunction, array_values_iterator)
+  V(ARRAY_VALUES_ITERATOR_INDEX, JSFunction, array_values_iterator)            \
+  V(SCRIPT_CONTEXT_TABLE_INDEX, ScriptContextTable, script_context_table)
+
+
+// A table of all script contexts. Every loaded top-level script with top-level
+// lexical declarations contributes its ScriptContext into this table.
+//
+// The table is a fixed array, its first slot is the current used count and
+// the subsequent slots 1..used contain ScriptContexts.
+class ScriptContextTable : public FixedArray {
+ public:
+  // Conversions.
+  static ScriptContextTable* cast(Object* context) {
+    DCHECK(context->IsScriptContextTable());
+    return reinterpret_cast<ScriptContextTable*>(context);
+  }
+
+  struct LookupResult {
+    int context_index;
+    int slot_index;
+    VariableMode mode;
+    InitializationFlag init_flag;
+    MaybeAssignedFlag maybe_assigned_flag;
+  };
+
+  int used() const { return Smi::cast(get(kUsedSlot))->value(); }
+
+  void set_used(int used) { set(kUsedSlot, Smi::FromInt(used)); }
+
+  static Handle<Context> GetContext(Handle<ScriptContextTable> table, int i) {
+    DCHECK(i < table->used());
+    return Handle<Context>::cast(FixedArray::get(table, i + 1));
+  }
+
+  // Lookup a variable `name` in a ScriptContextTable.
+  // If it returns true, the variable is found and `result` contains
+  // valid information about its location.
+  // If it returns false, `result` is untouched.
+  MUST_USE_RESULT
+  static bool Lookup(Handle<ScriptContextTable> table, Handle<String> name,
+                     LookupResult* result);
+
+  MUST_USE_RESULT
+  static Handle<ScriptContextTable> Extend(Handle<ScriptContextTable> table,
+                                           Handle<Context> script_context);
+
+  static int GetContextOffset(int context_index) {
+    return kFirstContextOffset + context_index * kPointerSize;
+  }
+
+ private:
+  static const int kUsedSlot = 0;
+  static const int kFirstContextOffset =
+      FixedArray::kHeaderSize + (kUsedSlot + 1) * kPointerSize;
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(ScriptContextTable);
+};
 
 // JSFunctions are pairs (context, function code), sometimes also called
 // closures. A Context object is used to represent function contexts and
@@ -228,7 +283,9 @@ enum BindingFlags {
 // properties.
 //
 // Finally, with Harmony scoping, the JSFunction representing a top level
-// script will have the GlobalContext rather than a FunctionContext.
+// script will have the ScriptContext rather than a FunctionContext.
+// Script contexts from all top-level scripts are gathered in
+// ScriptContextTable.
 
 class Context: public FixedArray {
  public:
@@ -357,16 +414,16 @@ class Context: public FixedArray {
     ITERATOR_RESULT_MAP_INDEX,
     MAP_ITERATOR_MAP_INDEX,
     SET_ITERATOR_MAP_INDEX,
-    ITERATOR_SYMBOL_INDEX,
-    UNSCOPABLES_SYMBOL_INDEX,
     ARRAY_VALUES_ITERATOR_INDEX,
+    SCRIPT_CONTEXT_TABLE_INDEX,
+    MAP_CACHE_INDEX,
+    TO_LENGTH_FUN_INDEX,
 
     // Properties from here are treated as weak references by the full GC.
     // Scavenge treats them as strong references.
     OPTIMIZED_FUNCTIONS_LIST,  // Weak.
     OPTIMIZED_CODE_LIST,       // Weak.
     DEOPTIMIZED_CODE_LIST,     // Weak.
-    MAP_CACHE_INDEX,           // Weak.
     NEXT_CONTEXT_LINK,         // Weak.
 
     // Total number of slots.
@@ -412,8 +469,8 @@ class Context: public FixedArray {
   // The builtins object.
   JSBuiltinsObject* builtins();
 
-  // Get the innermost global context by traversing the context chain.
-  Context* global_context();
+  // Get the script context by traversing the context chain.
+  Context* script_context();
 
   // Compute the native context by traversing the context chain.
   Context* native_context();
@@ -445,9 +502,9 @@ class Context: public FixedArray {
     Map* map = this->map();
     return map == map->GetHeap()->module_context_map();
   }
-  bool IsGlobalContext() {
+  bool IsScriptContext() {
     Map* map = this->map();
-    return map == map->GetHeap()->global_context_map();
+    return map == map->GetHeap()->script_context_map();
   }
 
   bool HasSameSecurityTokenAs(Context* that) {
index 651cf54..41107cf 100644 (file)
@@ -489,7 +489,6 @@ class HistogramTimerScope BASE_EMBEDDED {
   SC(for_in, V8.ForIn)                                                         \
   SC(enum_cache_hits, V8.EnumCacheHits)                                        \
   SC(enum_cache_misses, V8.EnumCacheMisses)                                    \
-  SC(zone_segment_bytes, V8.ZoneSegmentBytes)                                  \
   SC(fast_new_closure_total, V8.FastNewClosureTotal)                           \
   SC(fast_new_closure_try_optimized, V8.FastNewClosureTryOptimized)            \
   SC(fast_new_closure_install_optimized, V8.FastNewClosureInstallOptimized)    \
index 80faf10..456770b 100644 (file)
@@ -246,7 +246,7 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag, Code* code,
     Script* script = Script::cast(shared->script());
     rec->entry->set_script_id(script->id()->value());
     rec->entry->set_bailout_reason(
-        GetBailoutReason(shared->DisableOptimizationReason()));
+        GetBailoutReason(shared->disable_optimization_reason()));
   }
   rec->size = code->ExecutableSize();
   rec->shared = shared->address();
@@ -272,8 +272,8 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag, Code* code,
         int position = static_cast<int>(it.rinfo()->data());
         if (position >= 0) {
           int pc_offset = static_cast<int>(it.rinfo()->pc() - code->address());
-          int line_number = script->GetLineNumber(position) + 1;
-          line_table->SetPosition(pc_offset, line_number);
+          int line_number = script->GetLineNumber(position);
+          line_table->SetPosition(pc_offset, line_number + 1);
         }
       }
     }
@@ -289,7 +289,7 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag, Code* code,
   rec->size = code->ExecutableSize();
   rec->shared = shared->address();
   rec->entry->set_bailout_reason(
-      GetBailoutReason(shared->DisableOptimizationReason()));
+      GetBailoutReason(shared->disable_optimization_reason()));
   processor_->Enqueue(evt_rec);
 }
 
@@ -324,7 +324,7 @@ void CpuProfiler::CodeDisableOptEvent(Code* code, SharedFunctionInfo* shared) {
   CodeEventsContainer evt_rec(CodeEventRecord::CODE_DISABLE_OPT);
   CodeDisableOptEventRecord* rec = &evt_rec.CodeDisableOptEventRecord_;
   rec->start = code->address();
-  rec->bailout_reason = GetBailoutReason(shared->DisableOptimizationReason());
+  rec->bailout_reason = GetBailoutReason(shared->disable_optimization_reason());
   processor_->Enqueue(evt_rec);
 }
 
index fb24bcc..132891e 100644 (file)
@@ -8,10 +8,6 @@
 #define V8_SHARED
 #endif
 
-#ifdef COMPRESS_STARTUP_DATA_BZ2
-#include <bzlib.h>
-#endif
-
 #include <errno.h>
 #include <stdlib.h>
 #include <string.h>
@@ -617,7 +613,7 @@ void Shell::Load(const v8::FunctionCallbackInfo<v8::Value>& args) {
 
 void Shell::Quit(const v8::FunctionCallbackInfo<v8::Value>& args) {
   int exit_code = args[0]->Int32Value();
-  OnExit();
+  OnExit(args.GetIsolate());
   exit(exit_code);
 }
 
@@ -855,7 +851,7 @@ void Shell::InstallUtilityScript(Isolate* isolate) {
   // Run the d8 shell utility script in the utility context
   int source_index = i::NativesCollection<i::D8>::GetIndex("d8");
   i::Vector<const char> shell_source =
-      i::NativesCollection<i::D8>::GetRawScriptSource(source_index);
+      i::NativesCollection<i::D8>::GetScriptSource(source_index);
   i::Vector<const char> shell_source_name =
       i::NativesCollection<i::D8>::GetScriptName(source_index);
   Handle<String> source =
@@ -883,34 +879,6 @@ void Shell::InstallUtilityScript(Isolate* isolate) {
 #endif  // !V8_SHARED
 
 
-#ifdef COMPRESS_STARTUP_DATA_BZ2
-class BZip2Decompressor : public v8::StartupDataDecompressor {
- public:
-  virtual ~BZip2Decompressor() { }
-
- protected:
-  virtual int DecompressData(char* raw_data,
-                             int* raw_data_size,
-                             const char* compressed_data,
-                             int compressed_data_size) {
-    DCHECK_EQ(v8::StartupData::kBZip2,
-              v8::V8::GetCompressedStartupDataAlgorithm());
-    unsigned int decompressed_size = *raw_data_size;
-    int result =
-        BZ2_bzBuffToBuffDecompress(raw_data,
-                                   &decompressed_size,
-                                   const_cast<char*>(compressed_data),
-                                   compressed_data_size,
-                                   0, 1);
-    if (result == BZ_OK) {
-      *raw_data_size = decompressed_size;
-    }
-    return result;
-  }
-};
-#endif
-
-
 Handle<ObjectTemplate> Shell::CreateGlobalTemplate(Isolate* isolate) {
   Handle<ObjectTemplate> global_template = ObjectTemplate::New(isolate);
   global_template->Set(String::NewFromUtf8(isolate, "print"),
@@ -967,15 +935,6 @@ Handle<ObjectTemplate> Shell::CreateGlobalTemplate(Isolate* isolate) {
 
 
 void Shell::Initialize(Isolate* isolate) {
-#ifdef COMPRESS_STARTUP_DATA_BZ2
-  BZip2Decompressor startup_data_decompressor;
-  int bz2_result = startup_data_decompressor.Decompress();
-  if (bz2_result != BZ_OK) {
-    fprintf(stderr, "bzip error code: %d\n", bz2_result);
-    Exit(1);
-  }
-#endif
-
 #ifndef V8_SHARED
   Shell::counter_map_ = new CounterMap();
   // Set up counters
@@ -1054,10 +1013,11 @@ inline bool operator<(const CounterAndKey& lhs, const CounterAndKey& rhs) {
 #endif  // !V8_SHARED
 
 
-void Shell::OnExit() {
+void Shell::OnExit(v8::Isolate* isolate) {
   LineEditor* line_editor = LineEditor::Get();
   if (line_editor) line_editor->Close();
 #ifndef V8_SHARED
+  reinterpret_cast<i::Isolate*>(isolate)->DumpAndResetCompilationStats();
   if (i::FLAG_dump_counters) {
     int number_of_counters = 0;
     for (CounterMap::Iterator i(counter_map_); i.More(); i.Next()) {
@@ -1579,25 +1539,30 @@ class ShellArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
 
 class MockArrayBufferAllocator : public v8::ArrayBuffer::Allocator {
  public:
-  virtual void* Allocate(size_t) OVERRIDE {
-    return malloc(0);
-  }
-  virtual void* AllocateUninitialized(size_t length) OVERRIDE {
-    return malloc(0);
-  }
-  virtual void Free(void* p, size_t) OVERRIDE {
-    free(p);
-  }
+  void* Allocate(size_t) OVERRIDE { return malloc(0); }
+  void* AllocateUninitialized(size_t length) OVERRIDE { return malloc(0); }
+  void Free(void* p, size_t) OVERRIDE { free(p); }
 };
 
 
 #ifdef V8_USE_EXTERNAL_STARTUP_DATA
 class StartupDataHandler {
  public:
-  StartupDataHandler(const char* natives_blob,
+  StartupDataHandler(const char* exec_path, const char* natives_blob,
                      const char* snapshot_blob) {
-    Load(natives_blob, &natives_, v8::V8::SetNativesDataBlob);
-    Load(snapshot_blob, &snapshot_, v8::V8::SetSnapshotDataBlob);
+    // If we have (at least one) explicitly given blob, use those.
+    // If not, use the default blob locations next to the d8 binary.
+    if (natives_blob || snapshot_blob) {
+      LoadFromFiles(natives_blob, snapshot_blob);
+    } else {
+      char* natives;
+      char* snapshot;
+      LoadFromFiles(RelativePath(&natives, exec_path, "natives_blob.bin"),
+                    RelativePath(&snapshot, exec_path, "snapshot_blob.bin"));
+
+      free(natives);
+      free(snapshot);
+    }
   }
 
   ~StartupDataHandler() {
@@ -1606,11 +1571,32 @@ class StartupDataHandler {
   }
 
  private:
+  static char* RelativePath(char** buffer, const char* exec_path,
+                            const char* name) {
+    DCHECK(exec_path);
+    const char* last_slash = strrchr(exec_path, '/');
+    if (last_slash) {
+      int after_slash = last_slash - exec_path + 1;
+      int name_length = strlen(name);
+      *buffer =
+          reinterpret_cast<char*>(calloc(after_slash + name_length + 1, 1));
+      strncpy(*buffer, exec_path, after_slash);
+      strncat(*buffer, name, name_length);
+    } else {
+      *buffer = strdup(name);
+    }
+    return *buffer;
+  }
+
+  void LoadFromFiles(const char* natives_blob, const char* snapshot_blob) {
+    Load(natives_blob, &natives_, v8::V8::SetNativesDataBlob);
+    Load(snapshot_blob, &snapshot_, v8::V8::SetSnapshotDataBlob);
+  }
+
   void Load(const char* blob_file,
             v8::StartupData* startup_data,
             void (*setter_fn)(v8::StartupData*)) {
     startup_data->data = NULL;
-    startup_data->compressed_size = 0;
     startup_data->raw_size = 0;
 
     if (!blob_file)
@@ -1625,13 +1611,12 @@ class StartupDataHandler {
     rewind(file);
 
     startup_data->data = new char[startup_data->raw_size];
-    startup_data->compressed_size = fread(
-        const_cast<char*>(startup_data->data), 1, startup_data->raw_size,
-        file);
+    int read_size =
+        static_cast<int>(fread(const_cast<char*>(startup_data->data), 1,
+                               startup_data->raw_size, file));
     fclose(file);
 
-    if (startup_data->raw_size == startup_data->compressed_size)
-      (*setter_fn)(startup_data);
+    if (startup_data->raw_size == read_size) (*setter_fn)(startup_data);
   }
 
   v8::StartupData natives_;
@@ -1666,7 +1651,8 @@ int Shell::Main(int argc, char* argv[]) {
   v8::V8::InitializePlatform(platform);
   v8::V8::Initialize();
 #ifdef V8_USE_EXTERNAL_STARTUP_DATA
-  StartupDataHandler startup_data(options.natives_blob, options.snapshot_blob);
+  StartupDataHandler startup_data(argv[0], options.natives_blob,
+                                  options.snapshot_blob);
 #endif
   SetFlagsFromString("--trace-hydrogen-file=hydrogen.cfg");
   SetFlagsFromString("--trace-turbo-cfg-file=turbo.cfg");
@@ -1746,6 +1732,7 @@ int Shell::Main(int argc, char* argv[]) {
       RunShell(isolate);
     }
   }
+  OnExit(isolate);
 #ifndef V8_SHARED
   // Dump basic block profiling data.
   if (i::BasicBlockProfiler* profiler =
@@ -1759,8 +1746,6 @@ int Shell::Main(int argc, char* argv[]) {
   V8::ShutdownPlatform();
   delete platform;
 
-  OnExit();
-
   return result;
 }
 
index a084979..e536584 100644 (file)
             '../tools/js2c.py',
             '<@(_outputs)',
             'D8',
-            'off',  # compress startup data
             '<@(js_files)'
           ],
         },
index 44ee09a..caa5a0a 100644 (file)
@@ -263,7 +263,7 @@ class Shell : public i::AllStatic {
   static int RunMain(Isolate* isolate, int argc, char* argv[]);
   static int Main(int argc, char* argv[]);
   static void Exit(int exit_code);
-  static void OnExit();
+  static void OnExit(Isolate* isolate);
 
 #ifndef V8_SHARED
   static Handle<Array> GetCompletions(Isolate* isolate,
index fc4c5da..09f479e 100644 (file)
@@ -1,6 +1,7 @@
 // Copyright 2012 the V8 project authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+"use strict";
 
 // Default number of frames to include in the response to backtrace request.
 var kDefaultBacktraceLength = 10;
@@ -432,7 +433,7 @@ ScriptBreakPoint.prototype.set = function (script) {
   if (IS_NULL(position)) return;
 
   // Create a break point object and set the break point.
-  break_point = MakeBreakPoint(position, this);
+  var break_point = MakeBreakPoint(position, this);
   break_point.setIgnoreCount(this.ignoreCount());
   var actual_position = %SetScriptBreakPoint(script, position,
                                              this.position_alignment_,
@@ -672,7 +673,7 @@ Debug.setBreakPointByScriptIdAndPosition = function(script_id, position,
                                                     condition, enabled,
                                                     opt_position_alignment)
 {
-  break_point = MakeBreakPoint(position);
+  var break_point = MakeBreakPoint(position);
   break_point.setCondition(condition);
   if (!enabled) {
     break_point.disable();
@@ -739,7 +740,7 @@ Debug.clearBreakPoint = function(break_point_number) {
 
 Debug.clearAllBreakPoints = function() {
   for (var i = 0; i < break_points.length; i++) {
-    break_point = break_points[i];
+    var break_point = break_points[i];
     %ClearBreakPoint(break_point);
   }
   break_points = [];
index 7fe9064..93ef1cf 100644 (file)
@@ -40,6 +40,7 @@ Debug::Debug(Isolate* isolate)
       live_edit_enabled_(true),  // TODO(yangguo): set to false by default.
       has_break_points_(false),
       break_disabled_(false),
+      in_debug_event_listener_(false),
       break_on_exception_(false),
       break_on_uncaught_exception_(false),
       script_cache_(NULL),
@@ -694,12 +695,11 @@ void ScriptCache::HandleWeakScript(
 }
 
 
-void Debug::HandleWeakDebugInfo(
-    const v8::WeakCallbackData<v8::Value, void>& data) {
+void Debug::HandlePhantomDebugInfo(
+    const v8::PhantomCallbackData<DebugInfoListNode>& data) {
   Debug* debug = reinterpret_cast<Isolate*>(data.GetIsolate())->debug();
-  DebugInfoListNode* node =
-      reinterpret_cast<DebugInfoListNode*>(data.GetParameter());
-  debug->RemoveDebugInfo(node->debug_info().location());
+  DebugInfoListNode* node = data.GetParameter();
+  debug->RemoveDebugInfo(node);
 #ifdef DEBUG
   for (DebugInfoListNode* n = debug->debug_info_list_;
        n != NULL;
@@ -714,9 +714,10 @@ DebugInfoListNode::DebugInfoListNode(DebugInfo* debug_info): next_(NULL) {
   // Globalize the request debug info object and make it weak.
   GlobalHandles* global_handles = debug_info->GetIsolate()->global_handles();
   debug_info_ = Handle<DebugInfo>::cast(global_handles->Create(debug_info));
-  GlobalHandles::MakeWeak(reinterpret_cast<Object**>(debug_info_.location()),
-                          this, Debug::HandleWeakDebugInfo,
-                          GlobalHandles::Phantom);
+  typedef PhantomCallbackData<void>::Callback Callback;
+  GlobalHandles::MakePhantom(
+      reinterpret_cast<Object**>(debug_info_.location()), this,
+      reinterpret_cast<Callback>(Debug::HandlePhantomDebugInfo));
 }
 
 
@@ -872,7 +873,7 @@ void Debug::Break(Arguments args, JavaScriptFrame* frame) {
   LiveEdit::InitializeThreadLocal(this);
 
   // Just continue if breaks are disabled or debugger cannot be loaded.
-  if (break_disabled_) return;
+  if (break_disabled()) return;
 
   // Enter the debugger.
   DebugScope debug_scope(this);
@@ -1199,6 +1200,9 @@ void Debug::ClearAllBreakPoints() {
 
 void Debug::FloodWithOneShot(Handle<JSFunction> function,
                              BreakLocatorType type) {
+  // Do not ever break in native functions.
+  if (function->IsFromNativeScript()) return;
+
   PrepareForBreakPoints();
 
   // Make sure the function is compiled and has set up the debug info.
@@ -1225,7 +1229,48 @@ void Debug::FloodBoundFunctionWithOneShot(Handle<JSFunction> function) {
   if (!bindee.is_null() && bindee->IsJSFunction() &&
       !JSFunction::cast(*bindee)->IsFromNativeScript()) {
     Handle<JSFunction> bindee_function(JSFunction::cast(*bindee));
-    FloodWithOneShot(bindee_function);
+    FloodWithOneShotGeneric(bindee_function);
+  }
+}
+
+
+void Debug::FloodDefaultConstructorWithOneShot(Handle<JSFunction> function) {
+  DCHECK(function->shared()->is_default_constructor());
+  // Instead of stepping into the function we directly step into the super class
+  // constructor.
+  Isolate* isolate = function->GetIsolate();
+  PrototypeIterator iter(isolate, function);
+  Handle<Object> proto = PrototypeIterator::GetCurrent(iter);
+  if (!proto->IsJSFunction()) return;  // Object.prototype
+  Handle<JSFunction> function_proto = Handle<JSFunction>::cast(proto);
+  FloodWithOneShotGeneric(function_proto);
+}
+
+
+void Debug::FloodWithOneShotGeneric(Handle<JSFunction> function,
+                                    Handle<Object> holder) {
+  if (function->shared()->bound()) {
+    FloodBoundFunctionWithOneShot(function);
+  } else if (function->shared()->is_default_constructor()) {
+    FloodDefaultConstructorWithOneShot(function);
+  } else {
+    Isolate* isolate = function->GetIsolate();
+    // Don't allow step into functions in the native context.
+    if (function->shared()->code() ==
+            isolate->builtins()->builtin(Builtins::kFunctionApply) ||
+        function->shared()->code() ==
+            isolate->builtins()->builtin(Builtins::kFunctionCall)) {
+      // Handle function.apply and function.call separately to flood the
+      // function to be called and not the code for Builtins::FunctionApply or
+      // Builtins::FunctionCall. The receiver of call/apply is the target
+      // function.
+      if (!holder.is_null() && holder->IsJSFunction()) {
+        Handle<JSFunction> js_function = Handle<JSFunction>::cast(holder);
+        FloodWithOneShotGeneric(js_function);
+      }
+    } else {
+      FloodWithOneShot(function);
+    }
   }
 }
 
@@ -1464,13 +1509,7 @@ void Debug::PrepareStep(StepAction step_action,
 
       if (fun->IsJSFunction()) {
         Handle<JSFunction> js_function(JSFunction::cast(fun));
-        if (js_function->shared()->bound()) {
-          FloodBoundFunctionWithOneShot(js_function);
-        } else if (!js_function->IsFromNativeScript()) {
-          // Don't step into builtins.
-          // It will also compile target function if it's not compiled yet.
-          FloodWithOneShot(js_function);
-        }
+        FloodWithOneShotGeneric(js_function);
       }
     }
 
@@ -1612,32 +1651,7 @@ void Debug::HandleStepIn(Handle<Object> function_obj, Handle<Object> holder,
   // Flood the function with one-shot break points if it is called from where
   // step into was requested, or when stepping into a new frame.
   if (fp == thread_local_.step_into_fp_ || step_frame) {
-    if (function->shared()->bound()) {
-      // Handle Function.prototype.bind
-      FloodBoundFunctionWithOneShot(function);
-    } else if (!function->IsFromNativeScript()) {
-      // Don't allow step into functions in the native context.
-      if (function->shared()->code() ==
-          isolate->builtins()->builtin(Builtins::kFunctionApply) ||
-          function->shared()->code() ==
-          isolate->builtins()->builtin(Builtins::kFunctionCall)) {
-        // Handle function.apply and function.call separately to flood the
-        // function to be called and not the code for Builtins::FunctionApply or
-        // Builtins::FunctionCall. The receiver of call/apply is the target
-        // function.
-        if (!holder.is_null() && holder->IsJSFunction()) {
-          Handle<JSFunction> js_function = Handle<JSFunction>::cast(holder);
-          if (!js_function->IsFromNativeScript()) {
-            FloodWithOneShot(js_function);
-          } else if (js_function->shared()->bound()) {
-            // Handle Function.prototype.bind
-            FloodBoundFunctionWithOneShot(js_function);
-          }
-        }
-      } else {
-        FloodWithOneShot(function);
-      }
-    }
+    FloodWithOneShotGeneric(function, holder);
   }
 }
 
@@ -2107,7 +2121,10 @@ Object* Debug::FindSharedFunctionInfoInScript(Handle<Script> script,
   Heap* heap = isolate_->heap();
   while (!done) {
     { // Extra scope for iterator.
-      HeapIterator iterator(heap);
+      // If lazy compilation is off, we won't have duplicate shared function
+      // infos that need to be filtered.
+      HeapIterator iterator(heap, FLAG_lazy ? HeapIterator::kNoFiltering
+                                            : HeapIterator::kFilterUnreachable);
       for (HeapObject* obj = iterator.next();
            obj != NULL; obj = iterator.next()) {
         bool found_next_candidate = false;
@@ -2223,10 +2240,21 @@ bool Debug::EnsureDebugInfo(Handle<SharedFunctionInfo> shared,
 }
 
 
-// This uses the location of a handle to look up the debug info in the debug
-// info list, but it doesn't use the actual debug info for anything.  Therefore
-// if the debug info has been collected by the GC, we can be sure that this
-// method will not attempt to resurrect it.
+void Debug::RemoveDebugInfo(DebugInfoListNode* prev, DebugInfoListNode* node) {
+  // Unlink from list. If prev is NULL we are looking at the first element.
+  if (prev == NULL) {
+    debug_info_list_ = node->next();
+  } else {
+    prev->set_next(node->next());
+  }
+  delete node;
+
+  // If there are no more debug info objects there are not more break
+  // points.
+  has_break_points_ = debug_info_list_ != NULL;
+}
+
+
 void Debug::RemoveDebugInfo(DebugInfo** debug_info) {
   DCHECK(debug_info_list_ != NULL);
   // Run through the debug info objects to find this one and remove it.
@@ -2234,18 +2262,25 @@ void Debug::RemoveDebugInfo(DebugInfo** debug_info) {
   DebugInfoListNode* current = debug_info_list_;
   while (current != NULL) {
     if (current->debug_info().location() == debug_info) {
-      // Unlink from list. If prev is NULL we are looking at the first element.
-      if (prev == NULL) {
-        debug_info_list_ = current->next();
-      } else {
-        prev->set_next(current->next());
-      }
-      delete current;
+      RemoveDebugInfo(prev, current);
+      return;
+    }
+    // Move to next in list.
+    prev = current;
+    current = current->next();
+  }
+  UNREACHABLE();
+}
 
-      // If there are no more debug info objects there are not more break
-      // points.
-      has_break_points_ = debug_info_list_ != NULL;
 
+void Debug::RemoveDebugInfo(DebugInfoListNode* node) {
+  DCHECK(debug_info_list_ != NULL);
+  // Run through the debug info objects to find this one and remove it.
+  DebugInfoListNode* prev = NULL;
+  DebugInfoListNode* current = debug_info_list_;
+  while (current != NULL) {
+    if (current == node) {
+      RemoveDebugInfo(prev, node);
       return;
     }
     // Move to next in list.
@@ -2613,8 +2648,12 @@ void Debug::OnException(Handle<Object> exception, bool uncaught,
 
 
 void Debug::OnCompileError(Handle<Script> script) {
-  // No more to do if not debugging.
-  if (in_debug_scope() || ignore_events()) return;
+  if (ignore_events()) return;
+
+  if (in_debug_scope()) {
+    ProcessCompileEventInDebugScope(v8::CompileError, script);
+    return;
+  }
 
   HandleScope scope(isolate_);
   DebugScope debug_scope(this);
@@ -2675,8 +2714,12 @@ void Debug::OnAfterCompile(Handle<Script> script) {
   // Add the newly compiled script to the script cache.
   if (script_cache_ != NULL) script_cache_->Add(script);
 
-  // No more to do if not debugging.
-  if (in_debug_scope() || ignore_events()) return;
+  if (ignore_events()) return;
+
+  if (in_debug_scope()) {
+    ProcessCompileEventInDebugScope(v8::AfterCompile, script);
+    return;
+  }
 
   HandleScope scope(isolate_);
   DebugScope debug_scope(this);
@@ -2802,7 +2845,8 @@ void Debug::CallEventCallback(v8::DebugEvent event,
                               Handle<Object> exec_state,
                               Handle<Object> event_data,
                               v8::Debug::ClientData* client_data) {
-  DisableBreak no_break(this, true);
+  bool previous = in_debug_event_listener_;
+  in_debug_event_listener_ = true;
   if (event_listener_->IsForeign()) {
     // Invoke the C debug event listener.
     v8::Debug::EventCallback callback =
@@ -2826,6 +2870,28 @@ void Debug::CallEventCallback(v8::DebugEvent event,
     Execution::TryCall(Handle<JSFunction>::cast(event_listener_),
                        global, arraysize(argv), argv);
   }
+  in_debug_event_listener_ = previous;
+}
+
+
+void Debug::ProcessCompileEventInDebugScope(v8::DebugEvent event,
+                                            Handle<Script> script) {
+  if (event_listener_.is_null()) return;
+
+  SuppressDebug while_processing(this);
+  DebugScope debug_scope(this);
+  if (debug_scope.failed()) return;
+
+  Handle<Object> event_data;
+  // Bail out and don't call debugger if exception.
+  if (!MakeCompileEvent(script, event).ToHandle(&event_data)) return;
+
+  // Create the execution state.
+  Handle<Object> exec_state;
+  // Bail out and don't call debugger if exception.
+  if (!MakeExecutionState().ToHandle(&exec_state)) return;
+
+  CallEventCallback(event, exec_state, event_data, NULL);
 }
 
 
@@ -3079,7 +3145,7 @@ void Debug::HandleDebugBreak() {
   // Ignore debug break during bootstrapping.
   if (isolate_->bootstrapper()->IsActive()) return;
   // Just continue if breaks are disabled.
-  if (break_disabled_) return;
+  if (break_disabled()) return;
   // Ignore debug break if debugger is not active.
   if (!is_active()) return;
 
index e486d97..0ec9024 100644 (file)
@@ -390,6 +390,9 @@ class Debug {
   void FloodWithOneShot(Handle<JSFunction> function,
                         BreakLocatorType type = ALL_BREAK_LOCATIONS);
   void FloodBoundFunctionWithOneShot(Handle<JSFunction> function);
+  void FloodDefaultConstructorWithOneShot(Handle<JSFunction> function);
+  void FloodWithOneShotGeneric(Handle<JSFunction> function,
+                               Handle<Object> holder = Handle<Object>());
   void FloodHandlerWithOneShot();
   void ChangeBreakOnException(ExceptionBreakType type, bool enable);
   bool IsBreakOnException(ExceptionBreakType type);
@@ -444,8 +447,8 @@ class Debug {
                              Object** restarter_frame_function_pointer);
 
   // Passed to MakeWeak.
-  static void HandleWeakDebugInfo(
-      const v8::WeakCallbackData<v8::Value, void>& data);
+  static void HandlePhantomDebugInfo(
+      const PhantomCallbackData<DebugInfoListNode>& data);
 
   // Threading support.
   char* ArchiveDebug(char* to);
@@ -500,6 +503,8 @@ class Debug {
     return reinterpret_cast<Address>(&thread_local_.step_into_fp_);
   }
 
+  StepAction last_step_action() { return thread_local_.last_step_action_; }
+
  private:
   explicit Debug(Isolate* isolate);
 
@@ -512,6 +517,9 @@ class Debug {
   // Check whether there are commands in the command queue.
   inline bool has_commands() const { return !command_queue_.IsEmpty(); }
   inline bool ignore_events() const { return is_suppressed_ || !is_active_; }
+  inline bool break_disabled() const {
+    return break_disabled_ || in_debug_event_listener_;
+  }
 
   void OnException(Handle<Object> exception, bool uncaught,
                    Handle<Object> promise);
@@ -545,6 +553,8 @@ class Debug {
                          Handle<Object> exec_state,
                          Handle<Object> event_data,
                          v8::Debug::ClientData* client_data);
+  void ProcessCompileEventInDebugScope(v8::DebugEvent event,
+                                       Handle<Script> script);
   void ProcessDebugEvent(v8::DebugEvent event,
                          Handle<JSObject> event_data,
                          bool auto_continue);
@@ -562,6 +572,8 @@ class Debug {
   void ClearStepNext();
   void RemoveDebugInfoAndClearFromShared(Handle<DebugInfo> debug_info);
   void RemoveDebugInfo(DebugInfo** debug_info);
+  void RemoveDebugInfo(DebugInfoListNode* node);
+  void RemoveDebugInfo(DebugInfoListNode* prev, DebugInfoListNode* node);
   Handle<Object> CheckBreakPoints(Handle<Object> break_point);
   bool CheckBreakPoint(Handle<Object> break_point_object);
 
@@ -589,6 +601,7 @@ class Debug {
   bool live_edit_enabled_;
   bool has_break_points_;
   bool break_disabled_;
+  bool in_debug_event_listener_;
   bool break_on_exception_;
   bool break_on_uncaught_exception_;
 
@@ -699,14 +712,21 @@ class DebugScope BASE_EMBEDDED {
 class DisableBreak BASE_EMBEDDED {
  public:
   explicit DisableBreak(Debug* debug, bool disable_break)
-    : debug_(debug), old_state_(debug->break_disabled_) {
+      : debug_(debug),
+        previous_break_disabled_(debug->break_disabled_),
+        previous_in_debug_event_listener_(debug->in_debug_event_listener_) {
     debug_->break_disabled_ = disable_break;
+    debug_->in_debug_event_listener_ = disable_break;
+  }
+  ~DisableBreak() {
+    debug_->break_disabled_ = previous_break_disabled_;
+    debug_->in_debug_event_listener_ = previous_in_debug_event_listener_;
   }
-  ~DisableBreak() { debug_->break_disabled_ = old_state_; }
 
  private:
   Debug* debug_;
-  bool old_state_;
+  bool previous_break_disabled_;
+  bool previous_in_debug_event_listener_;
   DISALLOW_COPY_AND_ASSIGN(DisableBreak);
 };
 
index abb0467..4e9a052 100644 (file)
@@ -560,9 +560,7 @@ class ElementsAccessorBase : public ElementsAccessor {
   typedef ElementsTraitsParam ElementsTraits;
   typedef typename ElementsTraitsParam::BackingStore BackingStore;
 
-  virtual ElementsKind kind() const FINAL OVERRIDE {
-    return ElementsTraits::Kind;
-  }
+  ElementsKind kind() const FINAL { return ElementsTraits::Kind; }
 
   static void ValidateContents(Handle<JSObject> holder, int length) {
   }
@@ -584,7 +582,7 @@ class ElementsAccessorBase : public ElementsAccessor {
     ElementsAccessorSubclass::ValidateContents(holder, length);
   }
 
-  virtual void Validate(Handle<JSObject> holder) FINAL OVERRIDE {
+  void Validate(Handle<JSObject> holder) FINAL {
     DisallowHeapAllocation no_gc;
     ElementsAccessorSubclass::ValidateImpl(holder);
   }
@@ -597,20 +595,16 @@ class ElementsAccessorBase : public ElementsAccessor {
         receiver, holder, key, backing_store) != ABSENT;
   }
 
-  virtual bool HasElement(
-      Handle<Object> receiver,
-      Handle<JSObject> holder,
-      uint32_t key,
-      Handle<FixedArrayBase> backing_store) FINAL OVERRIDE {
+  virtual bool HasElement(Handle<Object> receiver, Handle<JSObject> holder,
+                          uint32_t key,
+                          Handle<FixedArrayBase> backing_store) FINAL {
     return ElementsAccessorSubclass::HasElementImpl(
         receiver, holder, key, backing_store);
   }
 
   MUST_USE_RESULT virtual MaybeHandle<Object> Get(
-      Handle<Object> receiver,
-      Handle<JSObject> holder,
-      uint32_t key,
-      Handle<FixedArrayBase> backing_store) FINAL OVERRIDE {
+      Handle<Object> receiver, Handle<JSObject> holder, uint32_t key,
+      Handle<FixedArrayBase> backing_store) FINAL {
     if (!IsExternalArrayElementsKind(ElementsTraits::Kind) &&
         FLAG_trace_js_array_abuse) {
       CheckArrayAbuse(holder, "elements read", key);
@@ -638,10 +632,8 @@ class ElementsAccessorBase : public ElementsAccessor {
   }
 
   MUST_USE_RESULT virtual PropertyAttributes GetAttributes(
-      Handle<Object> receiver,
-      Handle<JSObject> holder,
-      uint32_t key,
-      Handle<FixedArrayBase> backing_store) FINAL OVERRIDE {
+      Handle<Object> receiver, Handle<JSObject> holder, uint32_t key,
+      Handle<FixedArrayBase> backing_store) FINAL {
     return ElementsAccessorSubclass::GetAttributesImpl(
         receiver, holder, key, backing_store);
   }
@@ -660,10 +652,8 @@ class ElementsAccessorBase : public ElementsAccessor {
   }
 
   MUST_USE_RESULT virtual MaybeHandle<AccessorPair> GetAccessorPair(
-      Handle<Object> receiver,
-      Handle<JSObject> holder,
-      uint32_t key,
-      Handle<FixedArrayBase> backing_store) FINAL OVERRIDE {
+      Handle<Object> receiver, Handle<JSObject> holder, uint32_t key,
+      Handle<FixedArrayBase> backing_store) FINAL {
     return ElementsAccessorSubclass::GetAccessorPairImpl(
         receiver, holder, key, backing_store);
   }
@@ -677,8 +667,7 @@ class ElementsAccessorBase : public ElementsAccessor {
   }
 
   MUST_USE_RESULT virtual MaybeHandle<Object> SetLength(
-      Handle<JSArray> array,
-      Handle<Object> length) FINAL OVERRIDE {
+      Handle<JSArray> array, Handle<Object> length) FINAL {
     return ElementsAccessorSubclass::SetLengthImpl(
         array, length, handle(array->elements()));
   }
@@ -688,10 +677,8 @@ class ElementsAccessorBase : public ElementsAccessor {
       Handle<Object> length,
       Handle<FixedArrayBase> backing_store);
 
-  virtual void SetCapacityAndLength(
-      Handle<JSArray> array,
-      int capacity,
-      int length) FINAL OVERRIDE {
+  virtual void SetCapacityAndLength(Handle<JSArray> array, int capacity,
+                                    int length) FINAL {
     ElementsAccessorSubclass::
         SetFastElementsCapacityAndLength(array, capacity, length);
   }
@@ -715,13 +702,9 @@ class ElementsAccessorBase : public ElementsAccessor {
     UNREACHABLE();
   }
 
-  virtual void CopyElements(
-      Handle<FixedArrayBase> from,
-      uint32_t from_start,
-      ElementsKind from_kind,
-      Handle<FixedArrayBase> to,
-      uint32_t to_start,
-      int copy_size) FINAL OVERRIDE {
+  virtual void CopyElements(Handle<FixedArrayBase> from, uint32_t from_start,
+                            ElementsKind from_kind, Handle<FixedArrayBase> to,
+                            uint32_t to_start, int copy_size) FINAL {
     DCHECK(!from.is_null());
     // NOTE: the ElementsAccessorSubclass::CopyElementsImpl() methods
     // violate the handlified function signature convention:
@@ -734,13 +717,9 @@ class ElementsAccessorBase : public ElementsAccessor {
                                                kPackedSizeNotKnown, copy_size);
   }
 
-  virtual void CopyElements(
-      JSObject* from_holder,
-      uint32_t from_start,
-      ElementsKind from_kind,
-      Handle<FixedArrayBase> to,
-      uint32_t to_start,
-      int copy_size) FINAL OVERRIDE {
+  virtual void CopyElements(JSObject* from_holder, uint32_t from_start,
+                            ElementsKind from_kind, Handle<FixedArrayBase> to,
+                            uint32_t to_start, int copy_size) FINAL {
     int packed_size = kPackedSizeNotKnown;
     bool is_packed = IsFastPackedElementsKind(from_kind) &&
         from_holder->IsJSArray();
@@ -766,10 +745,8 @@ class ElementsAccessorBase : public ElementsAccessor {
   }
 
   virtual MaybeHandle<FixedArray> AddElementsToFixedArray(
-      Handle<Object> receiver,
-      Handle<JSObject> holder,
-      Handle<FixedArray> to,
-      Handle<FixedArrayBase> from) FINAL OVERRIDE {
+      Handle<Object> receiver, Handle<JSObject> holder, Handle<FixedArray> to,
+      Handle<FixedArrayBase> from, FixedArray::KeyFilter filter) FINAL {
     int len0 = to->length();
 #ifdef ENABLE_SLOW_DCHECKS
     if (FLAG_enable_slow_asserts) {
@@ -799,6 +776,9 @@ class ElementsAccessorBase : public ElementsAccessor {
             FixedArray);
 
         DCHECK(!value->IsTheHole());
+        if (filter == FixedArray::NON_SYMBOL_KEYS && value->IsSymbol()) {
+          continue;
+        }
         if (!HasKey(to, value)) {
           extra++;
         }
@@ -832,6 +812,9 @@ class ElementsAccessorBase : public ElementsAccessor {
             isolate, value,
             ElementsAccessorSubclass::GetImpl(receiver, holder, key, from),
             FixedArray);
+        if (filter == FixedArray::NON_SYMBOL_KEYS && value->IsSymbol()) {
+          continue;
+        }
         if (!value->IsTheHole() && !HasKey(to, value)) {
           result->set(len0 + index, *value);
           index++;
@@ -847,8 +830,7 @@ class ElementsAccessorBase : public ElementsAccessor {
     return backing_store->length();
   }
 
-  virtual uint32_t GetCapacity(Handle<FixedArrayBase> backing_store)
-      FINAL OVERRIDE {
+  uint32_t GetCapacity(Handle<FixedArrayBase> backing_store) FINAL {
     return ElementsAccessorSubclass::GetCapacityImpl(backing_store);
   }
 
@@ -858,7 +840,7 @@ class ElementsAccessorBase : public ElementsAccessor {
   }
 
   virtual uint32_t GetKeyForIndex(Handle<FixedArrayBase> backing_store,
-                                  uint32_t index) FINAL OVERRIDE {
+                                  uint32_t index) FINAL {
     return ElementsAccessorSubclass::GetKeyForIndexImpl(backing_store, index);
   }
 
@@ -992,10 +974,8 @@ class FastElementsAccessor
     return isolate->factory()->true_value();
   }
 
-  virtual MaybeHandle<Object> Delete(
-      Handle<JSObject> obj,
-      uint32_t key,
-      JSReceiver::DeleteMode mode) FINAL OVERRIDE {
+  virtual MaybeHandle<Object> Delete(Handle<JSObject> obj, uint32_t key,
+                                     JSReceiver::DeleteMode mode) FINAL {
     return DeleteCommon(obj, key, mode);
   }
 
@@ -1318,9 +1298,7 @@ class TypedElementsAccessor
   }
 
   MUST_USE_RESULT virtual MaybeHandle<Object> Delete(
-      Handle<JSObject> obj,
-      uint32_t key,
-      JSReceiver::DeleteMode mode) FINAL OVERRIDE {
+      Handle<JSObject> obj, uint32_t key, JSReceiver::DeleteMode mode) FINAL {
     // External arrays always ignore deletes.
     return obj->GetIsolate()->factory()->true_value();
   }
@@ -1472,9 +1450,7 @@ class DictionaryElementsAccessor
                                     ElementsKindTraits<DICTIONARY_ELEMENTS> >;
 
   MUST_USE_RESULT virtual MaybeHandle<Object> Delete(
-      Handle<JSObject> obj,
-      uint32_t key,
-      JSReceiver::DeleteMode mode) FINAL OVERRIDE {
+      Handle<JSObject> obj, uint32_t key, JSReceiver::DeleteMode mode) FINAL {
     return DeleteCommon(obj, key, mode);
   }
 
@@ -1646,9 +1622,7 @@ class SloppyArgumentsElementsAccessor : public ElementsAccessorBase<
   }
 
   MUST_USE_RESULT virtual MaybeHandle<Object> Delete(
-      Handle<JSObject> obj,
-      uint32_t key,
-      JSReceiver::DeleteMode mode) FINAL OVERRIDE {
+      Handle<JSObject> obj, uint32_t key, JSReceiver::DeleteMode mode) FINAL {
     Isolate* isolate = obj->GetIsolate();
     Handle<FixedArray> parameter_map(FixedArray::cast(obj->elements()));
     Handle<Object> probe = GetParameterMapArg(obj, parameter_map, key);
index f4de4bb..05354ea 100644 (file)
@@ -167,22 +167,19 @@ class ElementsAccessor {
   }
 
   MUST_USE_RESULT virtual MaybeHandle<FixedArray> AddElementsToFixedArray(
-      Handle<Object> receiver,
-      Handle<JSObject> holder,
-      Handle<FixedArray> to,
-      Handle<FixedArrayBase> from) = 0;
+      Handle<Object> receiver, Handle<JSObject> holder, Handle<FixedArray> to,
+      Handle<FixedArrayBase> from, FixedArray::KeyFilter filter) = 0;
 
   MUST_USE_RESULT inline MaybeHandle<FixedArray> AddElementsToFixedArray(
-      Handle<Object> receiver,
-      Handle<JSObject> holder,
-      Handle<FixedArray> to) {
-    return AddElementsToFixedArray(
-        receiver, holder, to, handle(holder->elements()));
+      Handle<Object> receiver, Handle<JSObject> holder, Handle<FixedArray> to,
+      FixedArray::KeyFilter filter) {
+    return AddElementsToFixedArray(receiver, holder, to,
+                                   handle(holder->elements()), filter);
   }
 
   // Returns a shared ElementsAccessor for the specified ElementsKind.
   static ElementsAccessor* ForKind(ElementsKind elements_kind) {
-    DCHECK(elements_kind < kElementsKindCount);
+    DCHECK(static_cast<int>(elements_kind) < kElementsKindCount);
     return elements_accessors_[elements_kind];
   }
 
index cfe61f9..a85effd 100644 (file)
@@ -545,6 +545,12 @@ MaybeHandle<Object> Execution::ToInt32(
 }
 
 
+MaybeHandle<Object> Execution::ToLength(
+    Isolate* isolate, Handle<Object> obj) {
+  RETURN_NATIVE_CALL(to_length, { obj });
+}
+
+
 MaybeHandle<Object> Execution::NewDate(Isolate* isolate, double time) {
   Handle<Object> time_obj = isolate->factory()->NewNumber(time);
   RETURN_NATIVE_CALL(create_date, { time_obj });
@@ -707,8 +713,8 @@ Object* StackGuard::HandleInterrupts() {
   }
 
   if (CheckAndClearInterrupt(API_INTERRUPT)) {
-    // Callback must be invoked outside of ExecusionAccess lock.
-    isolate_->InvokeApiInterruptCallback();
+    // Callbacks must be invoked outside of ExecusionAccess lock.
+    isolate_->InvokeApiInterruptCallbacks();
   }
 
   isolate_->counters()->stack_interrupts()->Increment();
index 89175cd..ae263bd 100644 (file)
@@ -69,6 +69,11 @@ class Execution FINAL : public AllStatic {
   MUST_USE_RESULT static MaybeHandle<Object> ToUint32(
       Isolate* isolate, Handle<Object> obj);
 
+
+  // ES6, draft 10-14-14, section 7.1.15
+  MUST_USE_RESULT static MaybeHandle<Object> ToLength(
+      Isolate* isolate, Handle<Object> obj);
+
   // ECMA-262 9.8
   MUST_USE_RESULT static MaybeHandle<Object> ToString(
       Isolate* isolate, Handle<Object> obj);
index 72974a3..ba62341 100644 (file)
@@ -139,12 +139,12 @@ Handle<ConstantPoolArray> Factory::NewExtendedConstantPoolArray(
 
 
 Handle<OrderedHashSet> Factory::NewOrderedHashSet() {
-  return OrderedHashSet::Allocate(isolate(), 4);
+  return OrderedHashSet::Allocate(isolate(), OrderedHashSet::kMinCapacity);
 }
 
 
 Handle<OrderedHashMap> Factory::NewOrderedHashMap() {
-  return OrderedHashMap::Allocate(isolate(), 4);
+  return OrderedHashMap::Allocate(isolate(), OrderedHashMap::kMinCapacity);
 }
 
 
@@ -693,21 +693,31 @@ Handle<Context> Factory::NewNativeContext() {
 }
 
 
-Handle<Context> Factory::NewGlobalContext(Handle<JSFunction> function,
+Handle<Context> Factory::NewScriptContext(Handle<JSFunction> function,
                                           Handle<ScopeInfo> scope_info) {
   Handle<FixedArray> array =
       NewFixedArray(scope_info->ContextLength(), TENURED);
-  array->set_map_no_write_barrier(*global_context_map());
+  array->set_map_no_write_barrier(*script_context_map());
   Handle<Context> context = Handle<Context>::cast(array);
   context->set_closure(*function);
   context->set_previous(function->context());
   context->set_extension(*scope_info);
   context->set_global_object(function->context()->global_object());
-  DCHECK(context->IsGlobalContext());
+  DCHECK(context->IsScriptContext());
   return context;
 }
 
 
+Handle<ScriptContextTable> Factory::NewScriptContextTable() {
+  Handle<FixedArray> array = NewFixedArray(1);
+  array->set_map_no_write_barrier(*script_context_table_map());
+  Handle<ScriptContextTable> context_table =
+      Handle<ScriptContextTable>::cast(array);
+  context_table->set_used(0);
+  return context_table;
+}
+
+
 Handle<Context> Factory::NewModuleContext(Handle<ScopeInfo> scope_info) {
   Handle<FixedArray> array =
       NewFixedArray(scope_info->ContextLength(), TENURED);
@@ -792,6 +802,7 @@ Handle<CodeCache> Factory::NewCodeCache() {
       Handle<CodeCache>::cast(NewStruct(CODE_CACHE_TYPE));
   code_cache->set_default_cache(*empty_fixed_array(), SKIP_WRITE_BARRIER);
   code_cache->set_normal_type_cache(*undefined_value(), SKIP_WRITE_BARRIER);
+  code_cache->set_weak_cell_cache(*undefined_value(), SKIP_WRITE_BARRIER);
   return code_cache;
 }
 
@@ -1024,7 +1035,7 @@ Handle<Object> Factory::NewNumber(double value,
   // patterns is faster than using fpclassify() et al.
   if (IsMinusZero(value)) return NewHeapNumber(-0.0, IMMUTABLE, pretenure);
 
-  int int_value = FastD2I(value);
+  int int_value = FastD2IChecked(value);
   if (value == int_value && Smi::IsValid(int_value)) {
     return handle(Smi::FromInt(int_value), isolate());
   }
@@ -1298,12 +1309,11 @@ Handle<JSFunction> Factory::NewFunction(Handle<String> name,
 }
 
 
-Handle<JSFunction> Factory::NewFunction(Handle<String> name,
-                                        Handle<Code> code,
+Handle<JSFunction> Factory::NewFunction(Handle<String> name, Handle<Code> code,
                                         Handle<Object> prototype,
-                                        InstanceType type,
-                                        int instance_size,
-                                        bool read_only_prototype) {
+                                        InstanceType type, int instance_size,
+                                        bool read_only_prototype,
+                                        bool install_constructor) {
   // Allocate the function
   Handle<JSFunction> function = NewFunction(
       name, code, prototype, read_only_prototype);
@@ -1311,8 +1321,13 @@ Handle<JSFunction> Factory::NewFunction(Handle<String> name,
   ElementsKind elements_kind =
       type == JS_ARRAY_TYPE ? FAST_SMI_ELEMENTS : FAST_HOLEY_SMI_ELEMENTS;
   Handle<Map> initial_map = NewMap(type, instance_size, elements_kind);
-  if (prototype->IsTheHole() && !function->shared()->is_generator()) {
-    prototype = NewFunctionPrototype(function);
+  if (!function->shared()->is_generator()) {
+    if (prototype->IsTheHole()) {
+      prototype = NewFunctionPrototype(function);
+    } else if (install_constructor) {
+      JSObject::AddProperty(Handle<JSObject>::cast(prototype),
+                            constructor_string(), function, DONT_ENUM);
+    }
   }
 
   JSFunction::SetInitialMap(function, initial_map,
@@ -1358,6 +1373,14 @@ Handle<JSObject> Factory::NewFunctionPrototype(Handle<JSFunction> function) {
 }
 
 
+static bool ShouldOptimizeNewClosure(Isolate* isolate,
+                                     Handle<SharedFunctionInfo> info) {
+  return isolate->use_crankshaft() && !info->is_toplevel() &&
+         info->is_compiled() && info->allows_lazy_compilation() &&
+         !info->optimization_disabled() && !isolate->DebuggerHasBreakPoints();
+}
+
+
 Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
     Handle<SharedFunctionInfo> info,
     Handle<Context> context,
@@ -1395,13 +1418,7 @@ Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
     return result;
   }
 
-  if (isolate()->use_crankshaft() &&
-      FLAG_always_opt &&
-      result->is_compiled() &&
-      !info->is_toplevel() &&
-      info->allows_lazy_compilation() &&
-      !info->optimization_disabled() &&
-      !isolate()->DebuggerHasBreakPoints()) {
+  if (FLAG_always_opt && ShouldOptimizeNewClosure(isolate(), info)) {
     result->MarkForOptimization();
   }
   return result;
@@ -1575,7 +1592,7 @@ Handle<GlobalObject> Factory::NewGlobalObject(Handle<JSFunction> constructor) {
   for (int i = 0; i < map->NumberOfOwnDescriptors(); i++) {
     PropertyDetails details = descs->GetDetails(i);
     DCHECK(details.type() == CALLBACKS);  // Only accessors are expected.
-    PropertyDetails d = PropertyDetails(details.attributes(), CALLBACKS, i + 1);
+    PropertyDetails d(details.attributes(), CALLBACKS, i + 1);
     Handle<Name> name(descs->GetKey(i));
     Handle<Object> value(descs->GetCallbacksObject(i), isolate());
     Handle<PropertyCell> cell = NewPropertyCell(value);
@@ -1665,6 +1682,7 @@ void Factory::NewJSArrayStorage(Handle<JSArray> array,
     return;
   }
 
+  HandleScope inner_scope(isolate());
   Handle<FixedArrayBase> elms;
   ElementsKind elements_kind = array->GetElementsKind();
   if (IsFastDoubleElementsKind(elements_kind)) {
@@ -1861,7 +1879,7 @@ Handle<JSProxy> Factory::NewJSProxy(Handle<Object> handler,
   // TODO(rossberg): Once we optimize proxies, think about a scheme to share
   // maps. Will probably depend on the identity of the handler object, too.
   Handle<Map> map = NewMap(JS_PROXY_TYPE, JSProxy::kSize);
-  map->set_prototype(*prototype);
+  map->SetPrototype(prototype);
 
   // Allocate the proxy object.
   Handle<JSProxy> result = New<JSProxy>(map, NEW_SPACE);
@@ -1880,7 +1898,7 @@ Handle<JSProxy> Factory::NewJSFunctionProxy(Handle<Object> handler,
   // TODO(rossberg): Once we optimize proxies, think about a scheme to share
   // maps. Will probably depend on the identity of the handler object, too.
   Handle<Map> map = NewMap(JS_FUNCTION_PROXY_TYPE, JSFunctionProxy::kSize);
-  map->set_prototype(*prototype);
+  map->SetPrototype(prototype);
 
   // Allocate the proxy object.
   Handle<JSFunctionProxy> result = New<JSFunctionProxy>(map, NEW_SPACE);
@@ -1905,7 +1923,7 @@ void Factory::ReinitializeJSProxy(Handle<JSProxy> proxy, InstanceType type,
   int size_difference = proxy->map()->instance_size() - map->instance_size();
   DCHECK(size_difference >= 0);
 
-  map->set_prototype(proxy->map()->prototype());
+  map->SetPrototype(handle(proxy->map()->prototype(), proxy->GetIsolate()));
 
   // Allocate the backing storage for the properties.
   int prop_size = map->InitialPropertiesLength();
@@ -1998,9 +2016,9 @@ void Factory::BecomeJSFunction(Handle<JSProxy> proxy) {
 }
 
 
-Handle<TypeFeedbackVector> Factory::NewTypeFeedbackVector(int slot_count,
-                                                          int ic_slot_count) {
-  return TypeFeedbackVector::Allocate(isolate(), slot_count, ic_slot_count);
+Handle<TypeFeedbackVector> Factory::NewTypeFeedbackVector(
+    const FeedbackVectorSpec& spec) {
+  return TypeFeedbackVector::Allocate(isolate(), spec);
 }
 
 
@@ -2075,8 +2093,13 @@ Handle<SharedFunctionInfo> Factory::NewSharedFunctionInfo(
   share->set_script(*undefined_value(), SKIP_WRITE_BARRIER);
   share->set_debug_info(*undefined_value(), SKIP_WRITE_BARRIER);
   share->set_inferred_name(*empty_string(), SKIP_WRITE_BARRIER);
-  Handle<TypeFeedbackVector> feedback_vector = NewTypeFeedbackVector(0, 0);
+  FeedbackVectorSpec empty_spec;
+  Handle<TypeFeedbackVector> feedback_vector =
+      NewTypeFeedbackVector(empty_spec);
   share->set_feedback_vector(*feedback_vector, SKIP_WRITE_BARRIER);
+#if TRACE_MAPS
+  share->set_unique_id(isolate()->GetNextUniqueSharedFunctionInfoId());
+#endif
   share->set_profiler_ticks(0);
   share->set_ast_node_count(0);
   share->set_counters(0);
@@ -2134,7 +2157,7 @@ void Factory::SetNumberStringCache(Handle<Object> number,
       // cache in the snapshot to keep  boot-time memory usage down.
       // If we expand the number string cache already while creating
       // the snapshot then that didn't work out.
-      DCHECK(!isolate()->serializer_enabled() || FLAG_extra_code != NULL);
+      DCHECK(!isolate()->serializer_enabled());
       Handle<FixedArray> new_cache = NewFixedArray(full_size, TENURED);
       isolate()->heap()->set_number_string_cache(*new_cache);
       return;
@@ -2265,8 +2288,8 @@ Handle<JSFunction> Factory::CreateApiFunction(
         break;
     }
 
-    result = NewFunction(empty_string(), code, prototype, type,
-                         instance_size, obj->read_only_prototype());
+    result = NewFunction(empty_string(), code, prototype, type, instance_size,
+                         obj->read_only_prototype(), true);
   }
 
   result->shared()->set_length(obj->length());
@@ -2286,19 +2309,13 @@ Handle<JSFunction> Factory::CreateApiFunction(
     return result;
   }
 
-  if (prototype->IsTheHole()) {
 #ifdef DEBUG
-    LookupIterator it(handle(JSObject::cast(result->prototype())),
-                      constructor_string(),
-                      LookupIterator::OWN_SKIP_INTERCEPTOR);
-    MaybeHandle<Object> maybe_prop = Object::GetProperty(&it);
-    DCHECK(it.IsFound());
-    DCHECK(maybe_prop.ToHandleChecked().is_identical_to(result));
+  LookupIterator it(handle(JSObject::cast(result->prototype())),
+                    constructor_string(), LookupIterator::OWN_SKIP_INTERCEPTOR);
+  MaybeHandle<Object> maybe_prop = Object::GetProperty(&it);
+  DCHECK(it.IsFound());
+  DCHECK(maybe_prop.ToHandleChecked().is_identical_to(result));
 #endif
-  } else {
-    JSObject::AddProperty(handle(JSObject::cast(result->prototype())),
-                          constructor_string(), result, DONT_ENUM);
-  }
 
   // Down from here is only valid for API functions that can be used as a
   // constructor (don't set the "remove prototype" flag).
@@ -2407,35 +2424,42 @@ Handle<JSFunction> Factory::CreateApiFunction(
 }
 
 
-Handle<MapCache> Factory::AddToMapCache(Handle<Context> context,
-                                        Handle<FixedArray> keys,
-                                        Handle<Map> map) {
-  Handle<MapCache> map_cache = handle(MapCache::cast(context->map_cache()));
-  Handle<MapCache> result = MapCache::Put(map_cache, keys, map);
-  context->set_map_cache(*result);
-  return result;
-}
-
-
 Handle<Map> Factory::ObjectLiteralMapFromCache(Handle<Context> context,
-                                               Handle<FixedArray> keys) {
+                                               int number_of_properties,
+                                               bool* is_result_from_cache) {
+  const int kMapCacheSize = 128;
+
+  if (number_of_properties > kMapCacheSize) {
+    *is_result_from_cache = false;
+    return Map::Create(isolate(), number_of_properties);
+  }
+  *is_result_from_cache = true;
+  if (number_of_properties == 0) {
+    // Reuse the initial map of the Object function if the literal has no
+    // predeclared properties.
+    return handle(context->object_function()->initial_map(), isolate());
+  }
+  int cache_index = number_of_properties - 1;
   if (context->map_cache()->IsUndefined()) {
     // Allocate the new map cache for the native context.
-    Handle<MapCache> new_cache = MapCache::New(isolate(), 24);
+    Handle<FixedArray> new_cache = NewFixedArray(kMapCacheSize, TENURED);
     context->set_map_cache(*new_cache);
   }
   // Check to see whether there is a matching element in the cache.
-  Handle<MapCache> cache =
-      Handle<MapCache>(MapCache::cast(context->map_cache()));
-  Handle<Object> result = Handle<Object>(cache->Lookup(*keys), isolate());
-  if (result->IsMap()) return Handle<Map>::cast(result);
-  int length = keys->length();
-  // Create a new map and add it to the cache. Reuse the initial map of the
-  // Object function if the literal has no predeclared properties.
-  Handle<Map> map = length == 0
-                        ? handle(context->object_function()->initial_map())
-                        : Map::Create(isolate(), length);
-  AddToMapCache(context, keys, map);
+  Handle<FixedArray> cache(FixedArray::cast(context->map_cache()));
+  {
+    Object* result = cache->get(cache_index);
+    if (result->IsWeakCell()) {
+      WeakCell* cell = WeakCell::cast(result);
+      if (!cell->cleared()) {
+        return handle(Map::cast(cell->value()), isolate());
+      }
+    }
+  }
+  // Create a new map and add it to the cache.
+  Handle<Map> map = Map::Create(isolate(), number_of_properties);
+  Handle<WeakCell> cell = NewWeakCell(map);
+  cache->set(cache_index, *cell);
   return map;
 }
 
@@ -2454,6 +2478,7 @@ void Factory::SetRegExpAtomData(Handle<JSRegExp> regexp,
   regexp->set_data(*store);
 }
 
+
 void Factory::SetRegExpIrregexpData(Handle<JSRegExp> regexp,
                                     JSRegExp::Type type,
                                     Handle<String> source,
@@ -2475,7 +2500,6 @@ void Factory::SetRegExpIrregexpData(Handle<JSRegExp> regexp,
 }
 
 
-
 MaybeHandle<FunctionTemplateInfo> Factory::ConfigureInstance(
     Handle<FunctionTemplateInfo> desc, Handle<JSObject> instance) {
   // Configure the instance by adding the properties specified by the
index 9f9813c..24a6647 100644 (file)
@@ -10,8 +10,9 @@
 namespace v8 {
 namespace internal {
 
-// Interface for handle based allocation.
+class FeedbackVectorSpec;
 
+// Interface for handle based allocation.
 class Factory FINAL {
  public:
   Handle<Oddball> NewOddball(Handle<Map> map,
@@ -225,10 +226,13 @@ class Factory FINAL {
   // Create a global (but otherwise uninitialized) context.
   Handle<Context> NewNativeContext();
 
-  // Create a global context.
-  Handle<Context> NewGlobalContext(Handle<JSFunction> function,
+  // Create a script context.
+  Handle<Context> NewScriptContext(Handle<JSFunction> function,
                                    Handle<ScopeInfo> scope_info);
 
+  // Create an empty script context table.
+  Handle<ScriptContextTable> NewScriptContextTable();
+
   // Create a module context.
   Handle<Context> NewModuleContext(Handle<ScopeInfo> scope_info);
 
@@ -482,12 +486,11 @@ class Factory FINAL {
       Handle<Context> context,
       PretenureFlag pretenure = TENURED);
 
-  Handle<JSFunction> NewFunction(Handle<String> name,
-                                 Handle<Code> code,
-                                 Handle<Object> prototype,
-                                 InstanceType type,
+  Handle<JSFunction> NewFunction(Handle<String> name, Handle<Code> code,
+                                 Handle<Object> prototype, InstanceType type,
                                  int instance_size,
-                                 bool read_only_prototype = false);
+                                 bool read_only_prototype = false,
+                                 bool install_constructor = false);
   Handle<JSFunction> NewFunction(Handle<String> name,
                                  Handle<Code> code,
                                  InstanceType type,
@@ -609,6 +612,14 @@ class Factory FINAL {
   PRIVATE_SYMBOL_LIST(SYMBOL_ACCESSOR)
 #undef SYMBOL_ACCESSOR
 
+#define SYMBOL_ACCESSOR(name, varname, description)             \
+  inline Handle<Symbol> name() {                                \
+    return Handle<Symbol>(bit_cast<Symbol**>(                   \
+        &isolate()->heap()->roots_[Heap::k##name##RootIndex])); \
+  }
+  PUBLIC_SYMBOL_LIST(SYMBOL_ACCESSOR)
+#undef SYMBOL_ACCESSOR
+
   inline void set_string_table(Handle<StringTable> table) {
     isolate()->heap()->set_string_table(*table);
   }
@@ -626,8 +637,8 @@ class Factory FINAL {
                                                    MaybeHandle<Code> code);
 
   // Allocate a new type feedback vector
-  Handle<TypeFeedbackVector> NewTypeFeedbackVector(int slot_count,
-                                                   int ic_slot_count);
+  Handle<TypeFeedbackVector> NewTypeFeedbackVector(
+      const FeedbackVectorSpec& spec);
 
   // Allocates a new JSMessageObject object.
   Handle<JSMessageObject> NewJSMessageObject(
@@ -640,10 +651,11 @@ class Factory FINAL {
 
   Handle<DebugInfo> NewDebugInfo(Handle<SharedFunctionInfo> shared);
 
-  // Return a map using the map cache in the native context.
-  // The key the an ordered set of property names.
+  // Return a map for given number of properties using the map cache in the
+  // native context.
   Handle<Map> ObjectLiteralMapFromCache(Handle<Context> context,
-                                        Handle<FixedArray> keys);
+                                        int number_of_properties,
+                                        bool* is_result_from_cache);
 
   // Creates a new FixedArray that holds the data associated with the
   // atom regexp and stores it in the regexp.
@@ -686,14 +698,6 @@ class Factory FINAL {
   // Creates a code object that is not yet fully initialized yet.
   inline Handle<Code> NewCodeRaw(int object_size, bool immovable);
 
-  // Create a new map cache.
-  Handle<MapCache> NewMapCache(int at_least_space_for);
-
-  // Update the map cache in the native context with (keys, map)
-  Handle<MapCache> AddToMapCache(Handle<Context> context,
-                                 Handle<FixedArray> keys,
-                                 Handle<Map> map);
-
   // Attempt to find the number in a small cache.  If we finds it, return
   // the string representation of the number.  Otherwise return undefined.
   Handle<Object> GetNumberStringCache(Handle<Object> number);
index 2558529..76b1116 100644 (file)
@@ -34,6 +34,8 @@ class FieldIndex FINAL {
     return IsInObjectBits::decode(bit_field_);
   }
 
+  bool is_hidden_field() const { return IsHiddenField::decode(bit_field_); }
+
   bool is_double() const {
     return IsDoubleBits::decode(bit_field_);
   }
@@ -55,7 +57,7 @@ class FieldIndex FINAL {
   // Zero-based from the first inobject property. Overflows to out-of-object
   // properties.
   int property_index() const {
-    DCHECK(!IsHiddenField::decode(bit_field_));
+    DCHECK(!is_hidden_field());
     int result = index() - first_inobject_property_offset() / kPointerSize;
     if (!is_inobject()) {
       result += InObjectPropertyBits::decode(bit_field_);
@@ -86,7 +88,7 @@ class FieldIndex FINAL {
   explicit FieldIndex(int bit_field) : bit_field_(bit_field) {}
 
   int first_inobject_property_offset() const {
-    DCHECK(!IsHiddenField::decode(bit_field_));
+    DCHECK(!is_hidden_field());
     return FirstInobjectPropertyOffsetBits::decode(bit_field_);
   }
 
index ca82ce7..6f36199 100644 (file)
@@ -16,6 +16,9 @@
 #define DEFINE_NEG_IMPLICATION(whenflag, thenflag)          \
   DEFINE_VALUE_IMPLICATION(whenflag, thenflag, false)
 
+#define DEFINE_NEG_NEG_IMPLICATION(whenflag, thenflag) \
+  DEFINE_NEG_VALUE_IMPLICATION(whenflag, thenflag, false)
+
 // We want to declare the names of the variables for the header file.  Normally
 // this will just be an extern declaration, but for a readonly flag we let the
 // compiler make better optimizations by giving it the value.
@@ -54,6 +57,9 @@
 #define DEFINE_VALUE_IMPLICATION(whenflag, thenflag, value) \
   if (FLAG_##whenflag) FLAG_##thenflag = value;
 
+#define DEFINE_NEG_VALUE_IMPLICATION(whenflag, thenflag, value) \
+  if (!FLAG_##whenflag) FLAG_##thenflag = value;
+
 #else
 #error No mode supplied when including flags.defs
 #endif
 #define DEFINE_VALUE_IMPLICATION(whenflag, thenflag, value)
 #endif
 
+#ifndef DEFINE_NEG_VALUE_IMPLICATION
+#define DEFINE_NEG_VALUE_IMPLICATION(whenflag, thenflag, value)
+#endif
+
 #define COMMA ,
 
 #ifdef FLAG_MODE_DECLARE
@@ -157,29 +167,33 @@ DEFINE_BOOL(use_strict, false, "enforce strict mode")
 
 DEFINE_BOOL(es_staging, false, "enable all completed harmony features")
 DEFINE_BOOL(harmony, false, "enable all completed harmony features")
+DEFINE_BOOL(harmony_shipping, true, "enable all shipped harmony fetaures")
 DEFINE_IMPLICATION(harmony, es_staging)
-// TODO(rossberg): activate once we have staged scoping:
-// DEFINE_IMPLICATION(es_staging, harmony)
+DEFINE_IMPLICATION(es_staging, harmony)
 
 // Features that are still work in progress (behind individual flags).
-#define HARMONY_INPROGRESS(V)                                     \
-  V(harmony_scoping, "harmony block scoping")                     \
-  V(harmony_modules, "harmony modules (implies block scoping)")   \
-  V(harmony_arrays, "harmony array methods")                      \
-  V(harmony_classes,                                              \
-    "harmony classes (implies block scoping & object literal extension)") \
-  V(harmony_object_literals, "harmony object literal extensions") \
-  V(harmony_regexps, "harmony regular expression extensions")     \
-  V(harmony_arrow_functions, "harmony arrow functions")           \
-  V(harmony_tostring, "harmony toString")                         \
-  V(harmony_proxies, "harmony proxies")
+#define HARMONY_INPROGRESS(V)                                             \
+  V(harmony_modules, "harmony modules (implies block scoping)")           \
+  V(harmony_arrays, "harmony array methods")                              \
+  V(harmony_array_includes, "harmony Array.prototype.includes")           \
+  V(harmony_regexps, "harmony regular expression extensions")             \
+  V(harmony_arrow_functions, "harmony arrow functions")                   \
+  V(harmony_proxies, "harmony proxies")                                   \
+  V(harmony_sloppy, "harmony features in sloppy mode")                    \
+  V(harmony_unicode, "harmony unicode escapes")
 
 // Features that are complete (but still behind --harmony/es-staging flag).
-#define HARMONY_STAGED(V) V(harmony_strings, "harmony string methods")
+#define HARMONY_STAGED(V) V(harmony_tostring, "harmony toString")
 
 // Features that are shipping (turned on by default, but internal flag remains).
-#define HARMONY_SHIPPING(V) \
-  V(harmony_numeric_literals, "harmony numeric literals")
+#define HARMONY_SHIPPING(V)                                               \
+  V(harmony_numeric_literals, "harmony numeric literals")                 \
+  V(harmony_strings, "harmony string methods")                            \
+  V(harmony_scoping, "harmony block scoping")                             \
+  V(harmony_classes,                                                      \
+    "harmony classes (implies block scoping & object literal extension)") \
+  V(harmony_object_literals, "harmony object literal extensions")         \
+  V(harmony_templates, "harmony template literals")
 
 // Once a shipping feature has proved stable in the wild, it will be dropped
 // from HARMONY_SHIPPING, all occurrences of the FLAG_ variable are removed,
@@ -192,10 +206,6 @@ DEFINE_IMPLICATION(harmony, es_staging)
 HARMONY_INPROGRESS(FLAG_INPROGRESS_FEATURES)
 #undef FLAG_INPROGRESS_FEATURES
 
-// TODO(rossberg): temporary, remove once we have staged scoping.
-// After that, --harmony will be synonymous to --es-staging.
-DEFINE_IMPLICATION(harmony, harmony_scoping)
-
 #define FLAG_STAGED_FEATURES(id, description) \
   DEFINE_BOOL(id, false, "enable " #description) \
   DEFINE_IMPLICATION(es_staging, id)
@@ -203,7 +213,8 @@ HARMONY_STAGED(FLAG_STAGED_FEATURES)
 #undef FLAG_STAGED_FEATURES
 
 #define FLAG_SHIPPING_FEATURES(id, description) \
-  DEFINE_BOOL_READONLY(id, true, "enable " #description)
+  DEFINE_BOOL(id, true, "enable " #description) \
+  DEFINE_NEG_NEG_IMPLICATION(harmony_shipping, id)
 HARMONY_SHIPPING(FLAG_SHIPPING_FEATURES)
 #undef FLAG_SHIPPING_FEATURES
 
@@ -217,8 +228,6 @@ DEFINE_IMPLICATION(harmony_classes, harmony_object_literals)
 // Flags for experimental implementation features.
 DEFINE_BOOL(compiled_keyed_generic_loads, false,
             "use optimizing compiler to generate keyed generic load stubs")
-DEFINE_BOOL(clever_optimizations, true,
-            "Optimize object size, Array shift, DOM strings and string +")
 // TODO(hpayer): We will remove this flag as soon as we have pretenuring
 // support for specific allocation sites.
 DEFINE_BOOL(pretenuring_call_new, false, "pretenure call new")
@@ -347,7 +356,6 @@ DEFINE_BOOL(job_based_recompilation, false,
             "post tasks to v8::Platform instead of using a thread for "
             "concurrent recompilation")
 DEFINE_IMPLICATION(job_based_recompilation, concurrent_recompilation)
-DEFINE_NEG_IMPLICATION(job_based_recompilation, block_concurrent_recompilation)
 DEFINE_BOOL(trace_concurrent_recompilation, false,
             "track concurrent recompilation")
 DEFINE_INT(concurrent_recompilation_queue_length, 8,
@@ -366,12 +374,15 @@ DEFINE_BOOL(omit_map_checks_for_leaf_maps, true,
 // Flags for TurboFan.
 DEFINE_STRING(turbo_filter, "~", "optimization filter for TurboFan compiler")
 DEFINE_BOOL(trace_turbo, false, "trace generated TurboFan IR")
+DEFINE_BOOL(trace_turbo_graph, false, "trace generated TurboFan graphs")
+DEFINE_IMPLICATION(trace_turbo_graph, trace_turbo)
 DEFINE_STRING(trace_turbo_cfg_file, NULL,
               "trace turbo cfg graph (for C1 visualizer) to a given file name")
 DEFINE_BOOL(trace_turbo_types, true, "trace TurboFan's types")
 DEFINE_BOOL(trace_turbo_scheduler, false, "trace TurboFan's scheduler")
 DEFINE_BOOL(trace_turbo_reduction, false, "trace TurboFan's various reducers")
-DEFINE_BOOL(turbo_asm, false, "enable TurboFan for asm.js code")
+DEFINE_BOOL(trace_turbo_jt, false, "trace TurboFan's jump threading")
+DEFINE_BOOL(turbo_asm, true, "enable TurboFan for asm.js code")
 DEFINE_BOOL(turbo_verify, false, "verify TurboFan graphs at each phase")
 DEFINE_BOOL(turbo_stats, false, "print TurboFan statistics")
 DEFINE_BOOL(turbo_types, true, "use typed lowering in TurboFan")
@@ -388,6 +399,14 @@ DEFINE_BOOL(loop_assignment_analysis, true, "perform loop assignment analysis")
 DEFINE_IMPLICATION(turbo_inlining_intrinsics, turbo_inlining)
 DEFINE_IMPLICATION(turbo_inlining, turbo_types)
 DEFINE_BOOL(turbo_profiling, false, "enable profiling in TurboFan")
+// TODO(dcarney): this is just for experimentation, remove when default.
+DEFINE_BOOL(turbo_reuse_spill_slots, true, "reuse spill slots in TurboFan")
+// TODO(dcarney): this is just for experimentation, remove when default.
+DEFINE_BOOL(turbo_delay_ssa_decon, false,
+            "delay ssa deconstruction in TurboFan register allocator")
+// TODO(dcarney): this is just for debugging, remove eventually.
+DEFINE_BOOL(turbo_move_optimization, true, "optimize gap moves in TurboFan")
+DEFINE_BOOL(turbo_jt, true, "enable jump threading")
 
 DEFINE_INT(typed_array_max_size_in_heap, 64,
            "threshold for in-heap typed array")
@@ -414,6 +433,8 @@ DEFINE_BOOL(enable_sse4_1, true,
             "enable use of SSE4.1 instructions if available")
 DEFINE_BOOL(enable_sahf, true,
             "enable use of SAHF instruction if available (X64 only)")
+DEFINE_BOOL(enable_avx, true, "enable use of AVX instructions if available")
+DEFINE_BOOL(enable_fma3, true, "enable use of FMA3 instructions if available")
 DEFINE_BOOL(enable_vfp3, ENABLE_VFP3_DEFAULT,
             "enable use of VFP3 instructions if available")
 DEFINE_BOOL(enable_armv7, ENABLE_ARMV7_DEFAULT,
@@ -438,11 +459,6 @@ DEFINE_BOOL(enable_vldr_imm, false,
 DEFINE_BOOL(force_long_branches, false,
             "force all emitted branches to be in long mode (MIPS only)")
 
-// cpu-arm64.cc
-DEFINE_BOOL(enable_always_align_csp, true,
-            "enable alignment of csp to 16 bytes on platforms which prefer "
-            "the register to always be aligned (ARM64 only)")
-
 // bootstrapper.cc
 DEFINE_STRING(expose_natives_as, NULL, "expose natives in global object")
 DEFINE_STRING(expose_debug_as, NULL, "expose debug in global object")
@@ -482,7 +498,8 @@ DEFINE_BOOL(trace_stub_failures, false,
             "trace deoptimization of generated code stubs")
 
 DEFINE_BOOL(serialize_toplevel, true, "enable caching of toplevel scripts")
-DEFINE_BOOL(trace_code_serializer, false, "print code serializer trace")
+DEFINE_BOOL(serialize_inner, false, "enable caching of inner functions")
+DEFINE_BOOL(trace_serializer, false, "print code serializer trace")
 
 // compiler.cc
 DEFINE_INT(min_preparse_length, 1024,
@@ -533,7 +550,12 @@ DEFINE_INT(target_semi_space_size, 0,
 DEFINE_INT(max_semi_space_size, 0,
            "max size of a semi-space (in MBytes), the new space consists of two"
            "semi-spaces")
+DEFINE_INT(semi_space_growth_factor, 2, "factor by which to grow the new space")
+DEFINE_BOOL(experimental_new_space_growth_heuristic, false,
+            "Grow the new space based on the percentage of survivors instead "
+            "of their absolute value.")
 DEFINE_INT(max_old_space_size, 0, "max size of the old space (in Mbytes)")
+DEFINE_INT(initial_old_space_size, 0, "initial old space size (in Mbytes)")
 DEFINE_INT(max_executable_size, 0, "max size of executable memory (in Mbytes)")
 DEFINE_BOOL(gc_global, false, "always perform global GCs")
 DEFINE_INT(gc_interval, -1, "garbage collect after <n> allocations")
@@ -546,6 +568,8 @@ DEFINE_BOOL(trace_gc_ignore_scavenger, false,
             "do not print trace line after scavenger collection")
 DEFINE_BOOL(trace_idle_notification, false,
             "print one trace line following each idle notification")
+DEFINE_BOOL(trace_idle_notification_verbose, false,
+            "prints the heap state used by the idle notification")
 DEFINE_BOOL(print_cumulative_gc_stat, false,
             "print cumulative GC statistics in name=value format on exit")
 DEFINE_BOOL(print_max_heap_committed, false,
@@ -557,8 +581,6 @@ DEFINE_BOOL(trace_fragmentation, false,
             "report fragmentation for old pointer and data pages")
 DEFINE_BOOL(collect_maps, true,
             "garbage collect maps from which no objects can be reached")
-DEFINE_BOOL(weak_embedded_maps_in_ic, true,
-            "make maps embedded in inline cache stubs")
 DEFINE_BOOL(weak_embedded_maps_in_optimized_code, true,
             "make maps embedded in optimized code weak")
 DEFINE_BOOL(weak_embedded_objects_in_optimized_code, true,
@@ -573,6 +595,7 @@ DEFINE_BOOL(age_code, true,
             "old code (required for code flushing)")
 DEFINE_BOOL(incremental_marking, true, "use incremental marking")
 DEFINE_BOOL(incremental_marking_steps, true, "do incremental marking steps")
+DEFINE_BOOL(concurrent_sweeping, true, "use concurrent sweeping")
 DEFINE_BOOL(trace_incremental_marking, false,
             "trace progress of the incremental marking")
 DEFINE_BOOL(track_gc_object_stats, false,
@@ -619,7 +642,13 @@ DEFINE_INT(random_seed, 0,
            "(0, the default, means to use system random).")
 
 // objects.cc
+DEFINE_BOOL(trace_weak_arrays, false, "trace WeakFixedArray usage")
+DEFINE_BOOL(track_prototype_users, false,
+            "keep track of which maps refer to a given prototype object")
 DEFINE_BOOL(use_verbose_printer, true, "allows verbose printing")
+#if TRACE_MAPS
+DEFINE_BOOL(trace_maps, false, "trace map creation")
+#endif
 
 // parser.cc
 DEFINE_BOOL(allow_natives_syntax, false, "allow natives syntax")
@@ -685,18 +714,8 @@ DEFINE_STRING(testing_serialization_file, "/tmp/serdes",
 #endif
 
 // mksnapshot.cc
-DEFINE_STRING(extra_code, NULL,
-              "A filename with extra code to be included in"
-              " the snapshot (mksnapshot only)")
-DEFINE_STRING(raw_file, NULL,
-              "A file to write the raw snapshot bytes to. "
-              "(mksnapshot only)")
-DEFINE_STRING(raw_context_file, NULL,
-              "A file to write the raw context "
-              "snapshot bytes to. (mksnapshot only)")
 DEFINE_STRING(startup_blob, NULL,
-              "Write V8 startup blob file. "
-              "(mksnapshot only)")
+              "Write V8 startup blob file. (mksnapshot only)")
 
 // code-stubs-hydrogen.cc
 DEFINE_BOOL(profile_hydrogen_code_stub_compilation, false,
@@ -705,6 +724,7 @@ DEFINE_BOOL(profile_hydrogen_code_stub_compilation, false,
 DEFINE_BOOL(predictable, false, "enable predictable mode")
 DEFINE_NEG_IMPLICATION(predictable, concurrent_recompilation)
 DEFINE_NEG_IMPLICATION(predictable, concurrent_osr)
+DEFINE_NEG_IMPLICATION(predictable, concurrent_sweeping)
 
 
 //
@@ -949,6 +969,11 @@ DEFINE_INT(dump_allocations_digest_at_alloc, 0,
 DEFINE_BOOL(enable_ool_constant_pool, V8_OOL_CONSTANT_POOL,
             "enable use of out-of-line constant pools (ARM only)")
 
+DEFINE_BOOL(unbox_double_fields, V8_DOUBLE_FIELDS_UNBOXING,
+            "enable in-object double fields unboxing (64-bit only)")
+DEFINE_IMPLICATION(unbox_double_fields, track_double_fields)
+
+
 // Cleanup...
 #undef FLAG_FULL
 #undef FLAG_READONLY
@@ -963,6 +988,7 @@ DEFINE_BOOL(enable_ool_constant_pool, V8_OOL_CONSTANT_POOL,
 #undef DEFINE_ARGS
 #undef DEFINE_IMPLICATION
 #undef DEFINE_NEG_IMPLICATION
+#undef DEFINE_NEG_VALUE_IMPLICATION
 #undef DEFINE_VALUE_IMPLICATION
 #undef DEFINE_ALIAS_BOOL
 #undef DEFINE_ALIAS_INT
index e53c45e..2aa4e6b 100644 (file)
@@ -9,6 +9,7 @@
 #include "src/v8.h"
 
 #include "src/assembler.h"
+#include "src/base/functional.h"
 #include "src/base/platform/platform.h"
 #include "src/ostreams.h"
 
@@ -549,4 +550,17 @@ void FlagList::EnforceFlagImplications() {
 #undef FLAG_MODE_DEFINE_IMPLICATIONS
 }
 
+
+uint32_t FlagList::Hash() {
+  std::ostringstream modified_args_as_string;
+  for (size_t i = 0; i < num_flags; ++i) {
+    Flag* current = &flags[i];
+    if (!current->IsDefault()) {
+      modified_args_as_string << *current;
+    }
+  }
+  std::string args(modified_args_as_string.str());
+  return static_cast<uint32_t>(
+      base::hash_range(args.c_str(), args.c_str() + args.length()));
+}
 } }  // namespace v8::internal
index 78522ff..9ec5d30 100644 (file)
@@ -57,6 +57,9 @@ class FlagList {
 
   // Set flags as consequence of being implied by another flag.
   static void EnforceFlagImplications();
+
+  // Hash of current flags (to quickly determine flag changes).
+  static uint32_t Hash();
 };
 
 } }  // namespace v8::internal
index 58e5e97..cb8f4aa 100644 (file)
@@ -136,21 +136,19 @@ void BreakableStatementChecker::VisitWhileStatement(WhileStatement* stmt) {
 
 
 void BreakableStatementChecker::VisitForStatement(ForStatement* stmt) {
-  // Mark for statements breakable if the condition expression is.
-  if (stmt->cond() != NULL) {
-    Visit(stmt->cond());
-  }
+  // We set positions for both init and condition, if they exist.
+  if (stmt->cond() != NULL || stmt->init() != NULL) is_breakable_ = true;
 }
 
 
 void BreakableStatementChecker::VisitForInStatement(ForInStatement* stmt) {
-  // Mark for in statements breakable if the enumerable expression is.
-  Visit(stmt->enumerable());
+  // For-in is breakable because we set the position for the enumerable.
+  is_breakable_ = true;
 }
 
 
 void BreakableStatementChecker::VisitForOfStatement(ForOfStatement* stmt) {
-  // For-of is breakable because of the next() call.
+  // For-of is breakable because we set the position for the next() call.
   is_breakable_ = true;
 }
 
@@ -308,6 +306,9 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
 
   TimerEventScope<TimerEventCompileFullCode> timer(info->isolate());
 
+  // Ensure that the feedback vector is large enough.
+  info->EnsureFeedbackVector();
+
   Handle<Script> script = info->script();
   if (!script->IsUndefined() && !script->source()->IsUndefined()) {
     int len = String::cast(script->source())->length();
@@ -337,6 +338,7 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
   cgen.PopulateDeoptimizationData(code);
   cgen.PopulateTypeFeedbackInfo(code);
   code->set_has_deoptimization_support(info->HasDeoptimizationSupport());
+  code->set_has_reloc_info_for_serialization(info->will_serialize());
   code->set_handler_table(*cgen.handler_table());
   code->set_compiled_optimizable(info->IsOptimizable());
   code->set_allow_osr_at_loop_nesting_level(0);
@@ -603,7 +605,7 @@ void FullCodeGenerator::DoTest(const TestContext* context) {
 
 
 void FullCodeGenerator::AllocateModules(ZoneList<Declaration*>* declarations) {
-  DCHECK(scope_->is_global_scope());
+  DCHECK(scope_->is_script_scope());
 
   for (int i = 0; i < declarations->length(); i++) {
     ModuleDeclaration* declaration = declarations->at(i)->AsModuleDeclaration();
@@ -644,8 +646,8 @@ void FullCodeGenerator::AllocateModules(ZoneList<Declaration*>* declarations) {
 // modules themselves, however, are simple data properties.)
 //
 // All modules have a _hosting_ scope/context, which (currently) is the
-// (innermost) enclosing global scope. To deal with recursion, nested modules
-// are hosted by the same scope as global ones.
+// enclosing script scope. To deal with recursion, nested modules are hosted
+// by the same scope as global ones.
 //
 // For every (global or nested) module literal, the hosting context has an
 // internal slot that points directly to the respective module context. This
@@ -675,7 +677,7 @@ void FullCodeGenerator::AllocateModules(ZoneList<Declaration*>* declarations) {
 //
 // To deal with arbitrary recursion and aliases between modules,
 // they are created and initialized in several stages. Each stage applies to
-// all modules in the hosting global scope, including nested ones.
+// all modules in the hosting script scope, including nested ones.
 //
 // 1. Allocate: for each module _literal_, allocate the module contexts and
 //    respective instance object and wire them up. This happens in the
@@ -710,7 +712,7 @@ void FullCodeGenerator::VisitDeclarations(
     // This is a scope hosting modules. Allocate a descriptor array to pass
     // to the runtime for initialization.
     Comment cmnt(masm_, "[ Allocate modules");
-    DCHECK(scope_->is_global_scope());
+    DCHECK(scope_->is_script_scope());
     modules_ =
         isolate()->factory()->NewFixedArray(scope_->num_modules(), TENURED);
     module_index_ = 0;
@@ -1072,41 +1074,12 @@ void FullCodeGenerator::VisitBlock(Block* stmt) {
   NestedBlock nested_block(this, stmt);
   SetStatementPosition(stmt);
 
-  Scope* saved_scope = scope();
-  // Push a block context when entering a block with block scoped variables.
-  if (stmt->scope() == NULL) {
-    PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
-  } else {
-    scope_ = stmt->scope();
-    DCHECK(!scope_->is_module_scope());
-    { Comment cmnt(masm_, "[ Extend block context");
-      __ Push(scope_->GetScopeInfo());
-      PushFunctionArgumentForContextAllocation();
-      __ CallRuntime(Runtime::kPushBlockContext, 2);
-
-      // Replace the context stored in the frame.
-      StoreToFrameField(StandardFrameConstants::kContextOffset,
-                        context_register());
-      PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
-    }
-    { Comment cmnt(masm_, "[ Declarations");
-      VisitDeclarations(scope_->declarations());
-      PrepareForBailoutForId(stmt->DeclsId(), NO_REGISTERS);
-    }
-  }
-
-  VisitStatements(stmt->statements());
-  scope_ = saved_scope;
-  __ bind(nested_block.break_label());
-
-  // Pop block context if necessary.
-  if (stmt->scope() != NULL) {
-    LoadContextField(context_register(), Context::PREVIOUS_INDEX);
-    // Update local stack frame context field.
-    StoreToFrameField(StandardFrameConstants::kContextOffset,
-                      context_register());
+  {
+    EnterBlockScopeIfNeeded block_scope_state(
+        this, stmt->scope(), stmt->EntryId(), stmt->DeclsId(), stmt->ExitId());
+    VisitStatements(stmt->statements());
+    __ bind(nested_block.break_label());
   }
-  PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
 }
 
 
@@ -1345,6 +1318,7 @@ void FullCodeGenerator::VisitForStatement(ForStatement* stmt) {
   SetStatementPosition(stmt);
 
   if (stmt->init() != NULL) {
+    SetStatementPosition(stmt->init());
     Visit(stmt->init());
   }
 
@@ -1359,6 +1333,7 @@ void FullCodeGenerator::VisitForStatement(ForStatement* stmt) {
   PrepareForBailoutForId(stmt->ContinueId(), NO_REGISTERS);
   __ bind(loop_statement.continue_label());
   if (stmt->next() != NULL) {
+    SetStatementPosition(stmt->next());
     Visit(stmt->next());
   }
 
@@ -1371,6 +1346,7 @@ void FullCodeGenerator::VisitForStatement(ForStatement* stmt) {
 
   __ bind(&test);
   if (stmt->cond() != NULL) {
+    SetExpressionPosition(stmt->cond());
     VisitForControl(stmt->cond(),
                     &body,
                     loop_statement.break_label(),
@@ -1385,6 +1361,47 @@ void FullCodeGenerator::VisitForStatement(ForStatement* stmt) {
 }
 
 
+void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
+  Comment cmnt(masm_, "[ ForOfStatement");
+  SetStatementPosition(stmt);
+
+  Iteration loop_statement(this, stmt);
+  increment_loop_depth();
+
+  // var iterator = iterable[Symbol.iterator]();
+  VisitForEffect(stmt->assign_iterator());
+
+  // Loop entry.
+  __ bind(loop_statement.continue_label());
+
+  // result = iterator.next()
+  SetExpressionPosition(stmt->next_result());
+  VisitForEffect(stmt->next_result());
+
+  // if (result.done) break;
+  Label result_not_done;
+  VisitForControl(stmt->result_done(), loop_statement.break_label(),
+                  &result_not_done, &result_not_done);
+  __ bind(&result_not_done);
+
+  // each = result.value
+  VisitForEffect(stmt->assign_each());
+
+  // Generate code for the body of the loop.
+  Visit(stmt->body());
+
+  // Check stack before looping.
+  PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS);
+  EmitBackEdgeBookkeeping(stmt, loop_statement.continue_label());
+  __ jmp(loop_statement.continue_label());
+
+  // Exit and decrement the loop depth.
+  PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+  __ bind(loop_statement.break_label());
+  decrement_loop_depth();
+}
+
+
 void FullCodeGenerator::VisitTryCatchStatement(TryCatchStatement* stmt) {
   Comment cmnt(masm_, "[ TryCatchStatement");
   SetStatementPosition(stmt);
@@ -1563,30 +1580,38 @@ void FullCodeGenerator::VisitFunctionLiteral(FunctionLiteral* expr) {
 void FullCodeGenerator::VisitClassLiteral(ClassLiteral* lit) {
   Comment cmnt(masm_, "[ ClassLiteral");
 
-  if (lit->raw_name() != NULL) {
-    __ Push(lit->name());
-  } else {
-    __ Push(isolate()->factory()->undefined_value());
-  }
+  {
+    EnterBlockScopeIfNeeded block_scope_state(
+        this, lit->scope(), BailoutId::None(), BailoutId::None(),
+        BailoutId::None());
 
-  if (lit->extends() != NULL) {
-    VisitForStackValue(lit->extends());
-  } else {
-    __ Push(isolate()->factory()->the_hole_value());
-  }
+    if (lit->raw_name() != NULL) {
+      __ Push(lit->name());
+    } else {
+      __ Push(isolate()->factory()->undefined_value());
+    }
+
+    if (lit->extends() != NULL) {
+      VisitForStackValue(lit->extends());
+    } else {
+      __ Push(isolate()->factory()->the_hole_value());
+    }
 
-  if (lit->constructor() != NULL) {
     VisitForStackValue(lit->constructor());
-  } else {
-    __ Push(isolate()->factory()->undefined_value());
-  }
 
-  __ Push(script());
-  __ Push(Smi::FromInt(lit->start_position()));
-  __ Push(Smi::FromInt(lit->end_position()));
+    __ Push(script());
+    __ Push(Smi::FromInt(lit->start_position()));
+    __ Push(Smi::FromInt(lit->end_position()));
+
+    __ CallRuntime(Runtime::kDefineClass, 6);
+    EmitClassDefineProperties(lit);
 
-  __ CallRuntime(Runtime::kDefineClass, 6);
-  EmitClassDefineProperties(lit);
+    if (lit->scope() != NULL) {
+      DCHECK_NOT_NULL(lit->class_variable_proxy());
+      EmitVariableAssignment(lit->class_variable_proxy()->var(),
+                             Token::INIT_CONST);
+    }
+  }
 
   context()->Plug(result_register());
 }
@@ -1754,6 +1779,49 @@ bool BackEdgeTable::Verify(Isolate* isolate, Code* unoptimized) {
 #endif  // DEBUG
 
 
+FullCodeGenerator::EnterBlockScopeIfNeeded::EnterBlockScopeIfNeeded(
+    FullCodeGenerator* codegen, Scope* scope, BailoutId entry_id,
+    BailoutId declarations_id, BailoutId exit_id)
+    : codegen_(codegen), scope_(scope), exit_id_(exit_id) {
+  saved_scope_ = codegen_->scope();
+
+  if (scope == NULL) {
+    codegen_->PrepareForBailoutForId(entry_id, NO_REGISTERS);
+  } else {
+    codegen_->scope_ = scope;
+    {
+      Comment cmnt(masm(), "[ Extend block context");
+      __ Push(scope->GetScopeInfo());
+      codegen_->PushFunctionArgumentForContextAllocation();
+      __ CallRuntime(Runtime::kPushBlockContext, 2);
+
+      // Replace the context stored in the frame.
+      codegen_->StoreToFrameField(StandardFrameConstants::kContextOffset,
+                                  codegen_->context_register());
+      codegen_->PrepareForBailoutForId(entry_id, NO_REGISTERS);
+    }
+    {
+      Comment cmnt(masm(), "[ Declarations");
+      codegen_->VisitDeclarations(scope->declarations());
+      codegen_->PrepareForBailoutForId(declarations_id, NO_REGISTERS);
+    }
+  }
+}
+
+
+FullCodeGenerator::EnterBlockScopeIfNeeded::~EnterBlockScopeIfNeeded() {
+  if (scope_ != NULL) {
+    codegen_->LoadContextField(codegen_->context_register(),
+                               Context::PREVIOUS_INDEX);
+    // Update local stack frame context field.
+    codegen_->StoreToFrameField(StandardFrameConstants::kContextOffset,
+                                codegen_->context_register());
+  }
+  codegen_->PrepareForBailoutForId(exit_id_, NO_REGISTERS);
+  codegen_->scope_ = saved_scope_;
+}
+
+
 #undef __
 
 
index f032854..1439942 100644 (file)
@@ -586,6 +586,19 @@ class FullCodeGenerator: public AstVisitor {
   // is expected in the accumulator.
   void EmitAssignment(Expression* expr);
 
+  // Shall an error be thrown if assignment with 'op' operation is perfomed
+  // on this variable in given language mode?
+  static bool IsSignallingAssignmentToConst(Variable* var, Token::Value op,
+                                            StrictMode strict_mode) {
+    if (var->mode() == CONST) return op != Token::INIT_CONST;
+
+    if (var->mode() == CONST_LEGACY) {
+      return strict_mode == STRICT && op != Token::INIT_CONST_LEGACY;
+    }
+
+    return false;
+  }
+
   // Complete a variable assignment.  The right-hand-side value is expected
   // in the accumulator.
   void EmitVariableAssignment(Variable* var,
@@ -614,6 +627,15 @@ class FullCodeGenerator: public AstVisitor {
 
   void EmitLoadHomeObject(SuperReference* expr);
 
+  static bool NeedsHomeObject(Expression* expr) {
+    return FunctionLiteral::NeedsHomeObject(expr);
+  }
+
+  // Adds the [[HomeObject]] to |initializer| if it is a FunctionLiteral.
+  // The value of the initializer is expected to be at the top of the stack.
+  // |offset| is the offset in the stack where the home object can be found.
+  void EmitSetHomeObjectIfNeeded(Expression* initializer, int offset);
+
   void EmitLoadSuperConstructor(SuperReference* expr);
 
   void CallIC(Handle<Code> code,
@@ -873,6 +895,22 @@ class FullCodeGenerator: public AstVisitor {
     virtual bool IsEffect() const { return true; }
   };
 
+  class EnterBlockScopeIfNeeded {
+   public:
+    EnterBlockScopeIfNeeded(FullCodeGenerator* codegen, Scope* scope,
+                            BailoutId entry_id, BailoutId declarations_id,
+                            BailoutId exit_id);
+    ~EnterBlockScopeIfNeeded();
+
+   private:
+    MacroAssembler* masm() const { return codegen_->masm(); }
+
+    FullCodeGenerator* codegen_;
+    Scope* scope_;
+    Scope* saved_scope_;
+    BailoutId exit_id_;
+  };
+
   MacroAssembler* masm_;
   CompilationInfo* info_;
   Scope* scope_;
index b35744a..8e9ed8f 100644 (file)
@@ -20,8 +20,23 @@ function GeneratorObjectNext(value) {
                         ['[Generator].prototype.next', this]);
   }
 
-  if (DEBUG_IS_ACTIVE) %DebugPrepareStepInIfStepping(this);
-  return %_GeneratorNext(this, value);
+  var continuation = %GeneratorGetContinuation(this);
+  if (continuation > 0) {
+    // Generator is suspended.
+    if (DEBUG_IS_ACTIVE) %DebugPrepareStepInIfStepping(this);
+    try {
+      return %_GeneratorNext(this, value);
+    } catch (e) {
+      %GeneratorClose(this);
+      throw e;
+    }
+  } else if (continuation == 0) {
+    // Generator is already closed.
+    return { value: void 0, done: true };
+  } else {
+    // Generator is running.
+    throw MakeTypeError('generator_running', []);
+  }
 }
 
 function GeneratorObjectThrow(exn) {
@@ -30,7 +45,22 @@ function GeneratorObjectThrow(exn) {
                         ['[Generator].prototype.throw', this]);
   }
 
-  return %_GeneratorThrow(this, exn);
+  var continuation = %GeneratorGetContinuation(this);
+  if (continuation > 0) {
+    // Generator is suspended.
+    try {
+      return %_GeneratorThrow(this, exn);
+    } catch (e) {
+      %GeneratorClose(this);
+      throw e;
+    }
+  } else if (continuation == 0) {
+    // Generator is already closed.
+    throw exn;
+  } else {
+    // Generator is running.
+    throw MakeTypeError('generator_running', []);
+  }
 }
 
 function GeneratorObjectIterator() {
@@ -44,13 +74,7 @@ function GeneratorFunctionPrototypeConstructor(x) {
 }
 
 function GeneratorFunctionConstructor(arg1) {  // length == 1
-  var source = NewFunctionString(arguments, 'function*');
-  var global_proxy = %GlobalProxy(global);
-  // Compile the string in the constructor and not a helper so that errors
-  // appear to come from here.
-  var f = %_CallFunction(global_proxy, %CompileString(source, true));
-  %FunctionMarkNameShouldPrintAsAnonymous(f);
-  return f;
+  return NewFunctionFromString(arguments, 'function*');
 }
 
 
@@ -77,6 +101,8 @@ function SetUpGenerators() {
   %AddNamedProperty(GeneratorObjectPrototype,
       symbolToStringTag, "Generator", DONT_ENUM | READ_ONLY);
   %InternalSetPrototype(GeneratorFunctionPrototype, $Function.prototype);
+  %AddNamedProperty(GeneratorFunctionPrototype,
+      symbolToStringTag, "GeneratorFunction", DONT_ENUM | READ_ONLY);
   %SetCode(GeneratorFunctionPrototype, GeneratorFunctionPrototypeConstructor);
   %AddNamedProperty(GeneratorFunctionPrototype, "constructor",
       GeneratorFunction, DONT_ENUM | DONT_DELETE | READ_ONLY);
index 574b248..4f744d6 100644 (file)
@@ -30,10 +30,11 @@ class GlobalHandles::Node {
   // FREE -> NORMAL <-> WEAK -> PENDING -> NEAR_DEATH -> { NORMAL, WEAK, FREE }
   enum State {
     FREE = 0,
-    NORMAL,     // Normal global handle.
-    WEAK,       // Flagged as weak but not yet finalized.
-    PENDING,    // Has been recognized as only reachable by weak handles.
-    NEAR_DEATH  // Callback has informed the handle is near death.
+    NORMAL,      // Normal global handle.
+    WEAK,        // Flagged as weak but not yet finalized.
+    PENDING,     // Has been recognized as only reachable by weak handles.
+    NEAR_DEATH,  // Callback has informed the handle is near death.
+    NUMBER_OF_NODE_STATES
   };
 
   // Maps handle location (slot) to the containing node.
@@ -92,8 +93,14 @@ class GlobalHandles::Node {
     IncreaseBlockUses();
   }
 
+  void Zap() {
+    DCHECK(IsInUse());
+    // Zap the values for eager trapping.
+    object_ = reinterpret_cast<Object*>(kGlobalHandleZapValue);
+  }
+
   void Release() {
-    DCHECK(state() != FREE);
+    DCHECK(IsInUse());
     set_state(FREE);
     // Zap the values for eager trapping.
     object_ = reinterpret_cast<Object*>(kGlobalHandleZapValue);
@@ -146,11 +153,11 @@ class GlobalHandles::Node {
     flags_ = IsInNewSpaceList::update(flags_, v);
   }
 
-  bool is_zapped_during_weak_callback() {
-    return IsZappedDuringWeakCallback::decode(flags_);
+  WeaknessType weakness_type() const {
+    return NodeWeaknessType::decode(flags_);
   }
-  void set_is_zapped_during_weak_callback(bool v) {
-    flags_ = IsZappedDuringWeakCallback::update(flags_, v);
+  void set_weakness_type(WeaknessType weakness_type) {
+    flags_ = NodeWeaknessType::update(flags_, weakness_type);
   }
 
   bool IsNearDeath() const {
@@ -160,6 +167,8 @@ class GlobalHandles::Node {
 
   bool IsWeak() const { return state() == WEAK; }
 
+  bool IsInUse() const { return state() != FREE; }
+
   bool IsRetainer() const { return state() != FREE; }
 
   bool IsStrongRetainer() const { return state() == NORMAL; }
@@ -175,12 +184,12 @@ class GlobalHandles::Node {
 
   // Independent flag accessors.
   void MarkIndependent() {
-    DCHECK(state() != FREE);
+    DCHECK(IsInUse());
     set_independent(true);
   }
 
   void MarkPartiallyDependent() {
-    DCHECK(state() != FREE);
+    DCHECK(IsInUse());
     if (GetGlobalHandles()->isolate()->heap()->InNewSpace(object_)) {
       set_partially_dependent(true);
     }
@@ -193,14 +202,39 @@ class GlobalHandles::Node {
 
   // Callback parameter accessors.
   void set_parameter(void* parameter) {
-    DCHECK(state() != FREE);
+    DCHECK(IsInUse());
+    DCHECK(weakness_type() == NORMAL_WEAK || weakness_type() == PHANTOM_WEAK);
     parameter_or_next_free_.parameter = parameter;
   }
   void* parameter() const {
-    DCHECK(state() != FREE);
+    DCHECK(IsInUse());
     return parameter_or_next_free_.parameter;
   }
 
+  void set_internal_fields(int internal_field_index1,
+                           int internal_field_index2) {
+    DCHECK(weakness_type() == INTERNAL_FIELDS_WEAK);
+    // These are stored in an int16_t.
+    DCHECK(internal_field_index1 < 1 << 16);
+    DCHECK(internal_field_index1 >= -(1 << 16));
+    DCHECK(internal_field_index2 < 1 << 16);
+    DCHECK(internal_field_index2 >= -(1 << 16));
+    parameter_or_next_free_.internal_field_indeces.internal_field1 =
+        static_cast<int16_t>(internal_field_index1);
+    parameter_or_next_free_.internal_field_indeces.internal_field2 =
+        static_cast<int16_t>(internal_field_index2);
+  }
+
+  int internal_field1() const {
+    DCHECK(weakness_type() == INTERNAL_FIELDS_WEAK);
+    return parameter_or_next_free_.internal_field_indeces.internal_field1;
+  }
+
+  int internal_field2() const {
+    DCHECK(weakness_type() == INTERNAL_FIELDS_WEAK);
+    return parameter_or_next_free_.internal_field_indeces.internal_field2;
+  }
+
   // Accessors for next free node in the free list.
   Node* next_free() {
     DCHECK(state() == FREE);
@@ -211,67 +245,128 @@ class GlobalHandles::Node {
     parameter_or_next_free_.next_free = value;
   }
 
-  void MakeWeak(void* parameter, WeakCallback weak_callback,
-                bool is_zapped_during_weak_callback = false) {
+  void MakeWeak(void* parameter, WeakCallback weak_callback) {
     DCHECK(weak_callback != NULL);
-    DCHECK(state() != FREE);
+    DCHECK(IsInUse());
     CHECK(object_ != NULL);
     set_state(WEAK);
+    set_weakness_type(NORMAL_WEAK);
     set_parameter(parameter);
-    set_is_zapped_during_weak_callback(is_zapped_during_weak_callback);
     weak_callback_ = weak_callback;
   }
 
+  void MakePhantom(void* parameter,
+                   PhantomCallbackData<void>::Callback phantom_callback,
+                   int16_t internal_field_index1,
+                   int16_t internal_field_index2) {
+    DCHECK(phantom_callback != NULL);
+    DCHECK(IsInUse());
+    CHECK(object_ != NULL);
+    set_state(WEAK);
+    if (parameter == NULL) {
+      set_weakness_type(INTERNAL_FIELDS_WEAK);
+      set_internal_fields(internal_field_index1, internal_field_index2);
+    } else {
+      DCHECK(internal_field_index1 == v8::Object::kNoInternalFieldIndex);
+      DCHECK(internal_field_index2 == v8::Object::kNoInternalFieldIndex);
+      set_weakness_type(PHANTOM_WEAK);
+      set_parameter(parameter);
+    }
+    weak_callback_ = reinterpret_cast<WeakCallback>(phantom_callback);
+  }
+
   void* ClearWeakness() {
-    DCHECK(state() != FREE);
+    DCHECK(IsInUse());
     void* p = parameter();
     set_state(NORMAL);
     set_parameter(NULL);
     return p;
   }
 
+  void CollectPhantomCallbackData(
+      Isolate* isolate, List<PendingPhantomCallback>* pending_phantom_callbacks,
+      List<PendingInternalFieldsCallback>* pending_internal_fields_callbacks) {
+    if (state() != Node::PENDING) return;
+    bool do_release = true;
+    if (weak_callback_ != NULL) {
+      if (weakness_type() == NORMAL_WEAK) return;
+
+      v8::Isolate* api_isolate = reinterpret_cast<v8::Isolate*>(isolate);
+
+      if (weakness_type() == PHANTOM_WEAK) {
+        // Phantom weak pointer case. Zap with harmless value.
+        DCHECK(*location() == Smi::FromInt(0));
+        typedef PhantomCallbackData<void> Data;
+
+        Data data(api_isolate, parameter());
+        Data::Callback callback =
+            reinterpret_cast<Data::Callback>(weak_callback_);
+
+        pending_phantom_callbacks->Add(
+            PendingPhantomCallback(this, data, callback));
+
+        // Postpone the release of the handle. The embedder can't use the
+        // handle (it's zapped), but it may be using the location, and we
+        // don't want to confuse things by reusing that.
+        do_release = false;
+      } else {
+        DCHECK(weakness_type() == INTERNAL_FIELDS_WEAK);
+        typedef InternalFieldsCallbackData<void, void> Data;
+
+        // Phantom weak pointer case, passing internal fields instead of
+        // parameter. Don't use a handle here during GC, because it will
+        // create a handle pointing to a dying object, which can confuse
+        // the next GC.
+        JSObject* jsobject = reinterpret_cast<JSObject*>(object());
+        DCHECK(jsobject->IsJSObject());
+        Data data(api_isolate, jsobject->GetInternalField(internal_field1()),
+                  jsobject->GetInternalField(internal_field2()));
+        Data::Callback callback =
+            reinterpret_cast<Data::Callback>(weak_callback_);
+
+        // In the future, we want to delay the callback. In that case we will
+        // zap when we queue up, to stop the C++ side accessing the dead V8
+        // object, but we will call Release only after the callback (allowing
+        // the node to be reused).
+        pending_internal_fields_callbacks->Add(
+            PendingInternalFieldsCallback(data, callback));
+      }
+    }
+    // TODO(erikcorry): At the moment the callbacks are not postponed much,
+    // but if we really postpone them until after the mutator has run, we
+    // need to divide things up, so that an early callback clears the handle,
+    // while a later one destroys the objects involved, possibley triggering
+    // some work when decremented ref counts hit zero.
+    if (do_release) Release();
+  }
+
   bool PostGarbageCollectionProcessing(Isolate* isolate) {
     if (state() != Node::PENDING) return false;
     if (weak_callback_ == NULL) {
       Release();
       return false;
     }
-    void* param = parameter();
     set_state(NEAR_DEATH);
-    set_parameter(NULL);
 
+    // Check that we are not passing a finalized external string to
+    // the callback.
+    DCHECK(!object_->IsExternalOneByteString() ||
+           ExternalOneByteString::cast(object_)->resource() != NULL);
+    DCHECK(!object_->IsExternalTwoByteString() ||
+           ExternalTwoByteString::cast(object_)->resource() != NULL);
+    // Leaving V8.
+    VMState<EXTERNAL> vmstate(isolate);
+    HandleScope handle_scope(isolate);
+    if (weakness_type() == PHANTOM_WEAK) return false;
+    DCHECK(weakness_type() == NORMAL_WEAK);
     Object** object = location();
-    {
-      // Check that we are not passing a finalized external string to
-      // the callback.
-      DCHECK(!object_->IsExternalOneByteString() ||
-             ExternalOneByteString::cast(object_)->resource() != NULL);
-      DCHECK(!object_->IsExternalTwoByteString() ||
-             ExternalTwoByteString::cast(object_)->resource() != NULL);
-      // Leaving V8.
-      VMState<EXTERNAL> vmstate(isolate);
-      HandleScope handle_scope(isolate);
-      if (is_zapped_during_weak_callback()) {
-        // Phantom weak pointer case.
-        DCHECK(*object == Smi::FromInt(kPhantomReferenceZap));
-        // Make data with a null handle.
-        v8::WeakCallbackData<v8::Value, void> data(
-            reinterpret_cast<v8::Isolate*>(isolate), v8::Local<v8::Object>(),
-            param);
-        weak_callback_(data);
-        if (state() != FREE) {
-          // Callback does not have to clear the global handle if it is a
-          // phantom handle.
-          Release();
-        }
-      } else {
-        Handle<Object> handle(*object, isolate);
-        v8::WeakCallbackData<v8::Value, void> data(
-            reinterpret_cast<v8::Isolate*>(isolate), v8::Utils::ToLocal(handle),
-            param);
-        weak_callback_(data);
-      }
-    }
+    Handle<Object> handle(*object, isolate);
+    v8::WeakCallbackData<v8::Value, void> data(
+        reinterpret_cast<v8::Isolate*>(isolate), parameter(),
+        v8::Utils::ToLocal(handle));
+    set_parameter(NULL);
+    weak_callback_(data);
+
     // Absence of explicit cleanup or revival of weak handle
     // in most of the cases would lead to memory leak.
     CHECK(state() != NEAR_DEATH);
@@ -300,11 +395,11 @@ class GlobalHandles::Node {
 
   // This stores three flags (independent, partially_dependent and
   // in_new_space_list) and a State.
-  class NodeState : public BitField<State, 0, 4> {};
-  class IsIndependent : public BitField<bool, 4, 1> {};
-  class IsPartiallyDependent : public BitField<bool, 5, 1> {};
-  class IsInNewSpaceList : public BitField<bool, 6, 1> {};
-  class IsZappedDuringWeakCallback : public BitField<bool, 7, 1> {};
+  class NodeState : public BitField<State, 0, 3> {};
+  class IsIndependent : public BitField<bool, 3, 1> {};
+  class IsPartiallyDependent : public BitField<bool, 4, 1> {};
+  class IsInNewSpaceList : public BitField<bool, 5, 1> {};
+  class NodeWeaknessType : public BitField<WeaknessType, 6, 2> {};
 
   uint8_t flags_;
 
@@ -315,6 +410,10 @@ class GlobalHandles::Node {
   // the free list link.
   union {
     void* parameter;
+    struct {
+      int16_t internal_field1;
+      int16_t internal_field2;
+    } internal_field_indeces;
     Node* next_free;
   } parameter_or_next_free_;
 
@@ -500,9 +599,38 @@ void GlobalHandles::Destroy(Object** location) {
 
 
 void GlobalHandles::MakeWeak(Object** location, void* parameter,
-                             WeakCallback weak_callback, PhantomState phantom) {
+                             WeakCallback weak_callback) {
+  Node::FromLocation(location)->MakeWeak(parameter, weak_callback);
+}
+
+
+typedef PhantomCallbackData<void>::Callback GenericCallback;
+
+
+void GlobalHandles::MakePhantom(
+    Object** location,
+    v8::InternalFieldsCallbackData<void, void>::Callback phantom_callback,
+    int16_t internal_field_index1, int16_t internal_field_index2) {
   Node::FromLocation(location)
-      ->MakeWeak(parameter, weak_callback, phantom == Phantom);
+      ->MakePhantom(NULL, reinterpret_cast<GenericCallback>(phantom_callback),
+                    internal_field_index1, internal_field_index2);
+}
+
+
+void GlobalHandles::MakePhantom(Object** location, void* parameter,
+                                GenericCallback phantom_callback) {
+  Node::FromLocation(location)->MakePhantom(parameter, phantom_callback,
+                                            v8::Object::kNoInternalFieldIndex,
+                                            v8::Object::kNoInternalFieldIndex);
+}
+
+
+void GlobalHandles::CollectPhantomCallbackData() {
+  for (NodeIterator it(this); !it.done(); it.Advance()) {
+    Node* node = it.node();
+    node->CollectPhantomCallbackData(isolate(), &pending_phantom_callbacks_,
+                                     &pending_internal_fields_callbacks_);
+  }
 }
 
 
@@ -540,10 +668,27 @@ void GlobalHandles::IterateWeakRoots(ObjectVisitor* v) {
   for (NodeIterator it(this); !it.done(); it.Advance()) {
     Node* node = it.node();
     if (node->IsWeakRetainer()) {
-      if (node->state() == Node::PENDING &&
-          node->is_zapped_during_weak_callback()) {
-        *(node->location()) = Smi::FromInt(kPhantomReferenceZap);
+      // Weakness type can be normal, phantom or internal fields.
+      // For normal weakness we mark through the handle so that
+      // the object and things reachable from it are available
+      // to the callback.
+      // In the case of phantom we can zap the object handle now
+      // and we won't need it, so we don't need to mark through it.
+      // In the internal fields case we will need the internal
+      // fields, so we can't zap the handle, but we don't need to
+      // mark through it, because it will die in this GC round.
+      if (node->state() == Node::PENDING) {
+        if (node->weakness_type() == PHANTOM_WEAK) {
+          *(node->location()) = Smi::FromInt(0);
+        } else if (node->weakness_type() == NORMAL_WEAK) {
+          v->VisitPointer(node->location());
+        } else {
+          DCHECK(node->weakness_type() == INTERNAL_FIELDS_WEAK);
+        }
       } else {
+        // Node is not pending, so that means the object survived.  We still
+        // need to visit the pointer in case the object moved, eg. because of
+        // compaction.
         v->VisitPointer(node->location());
       }
     }
@@ -591,10 +736,19 @@ void GlobalHandles::IterateNewSpaceWeakIndependentRoots(ObjectVisitor* v) {
     DCHECK(node->is_in_new_space_list());
     if ((node->is_independent() || node->is_partially_dependent()) &&
         node->IsWeakRetainer()) {
-      if (node->is_zapped_during_weak_callback()) {
-        *(node->location()) = Smi::FromInt(kPhantomReferenceZap);
-      } else {
+      if (node->weakness_type() == PHANTOM_WEAK) {
+        *(node->location()) = Smi::FromInt(0);
+      } else if (node->weakness_type() == NORMAL_WEAK) {
         v->VisitPointer(node->location());
+      } else {
+        DCHECK(node->weakness_type() == INTERNAL_FIELDS_WEAK);
+        // For this case we only need to trace if it's alive: The tracing of
+        // something that is already alive is just to get the pointer updated
+        // to the new location of the object).
+        DCHECK(node->state() != Node::NEAR_DEATH);
+        if (node->state() != Node::PENDING) {
+          v->VisitPointer(node->location());
+        }
       }
     }
   }
@@ -647,63 +801,66 @@ bool GlobalHandles::IterateObjectGroups(ObjectVisitor* v,
 }
 
 
-int GlobalHandles::PostGarbageCollectionProcessing(
-    GarbageCollector collector) {
-  // Process weak global handle callbacks. This must be done after the
-  // GC is completely done, because the callbacks may invoke arbitrary
-  // API functions.
-  DCHECK(isolate_->heap()->gc_state() == Heap::NOT_IN_GC);
-  const int initial_post_gc_processing_count = ++post_gc_processing_count_;
+int GlobalHandles::PostScavengeProcessing(
+    const int initial_post_gc_processing_count) {
   int freed_nodes = 0;
-  if (collector == SCAVENGER) {
-    for (int i = 0; i < new_space_nodes_.length(); ++i) {
-      Node* node = new_space_nodes_[i];
-      DCHECK(node->is_in_new_space_list());
-      if (!node->IsRetainer()) {
-        // Free nodes do not have weak callbacks. Do not use them to compute
-        // the freed_nodes.
-        continue;
-      }
-      // Skip dependent handles. Their weak callbacks might expect to be
-      // called between two global garbage collection callbacks which
-      // are not called for minor collections.
-      if (!node->is_independent() && !node->is_partially_dependent()) {
-        continue;
-      }
-      node->clear_partially_dependent();
-      if (node->PostGarbageCollectionProcessing(isolate_)) {
-        if (initial_post_gc_processing_count != post_gc_processing_count_) {
-          // Weak callback triggered another GC and another round of
-          // PostGarbageCollection processing.  The current node might
-          // have been deleted in that round, so we need to bail out (or
-          // restart the processing).
-          return freed_nodes;
-        }
-      }
-      if (!node->IsRetainer()) {
-        freed_nodes++;
-      }
+  for (int i = 0; i < new_space_nodes_.length(); ++i) {
+    Node* node = new_space_nodes_[i];
+    DCHECK(node->is_in_new_space_list());
+    if (!node->IsRetainer()) {
+      // Free nodes do not have weak callbacks. Do not use them to compute
+      // the freed_nodes.
+      continue;
     }
-  } else {
-    for (NodeIterator it(this); !it.done(); it.Advance()) {
-      if (!it.node()->IsRetainer()) {
-        // Free nodes do not have weak callbacks. Do not use them to compute
-        // the freed_nodes.
-        continue;
-      }
-      it.node()->clear_partially_dependent();
-      if (it.node()->PostGarbageCollectionProcessing(isolate_)) {
-        if (initial_post_gc_processing_count != post_gc_processing_count_) {
-          // See the comment above.
-          return freed_nodes;
-        }
+    // Skip dependent handles. Their weak callbacks might expect to be
+    // called between two global garbage collection callbacks which
+    // are not called for minor collections.
+    if (!node->is_independent() && !node->is_partially_dependent()) {
+      continue;
+    }
+    node->clear_partially_dependent();
+    if (node->PostGarbageCollectionProcessing(isolate_)) {
+      if (initial_post_gc_processing_count != post_gc_processing_count_) {
+        // Weak callback triggered another GC and another round of
+        // PostGarbageCollection processing.  The current node might
+        // have been deleted in that round, so we need to bail out (or
+        // restart the processing).
+        return freed_nodes;
       }
-      if (!it.node()->IsRetainer()) {
-        freed_nodes++;
+    }
+    if (!node->IsRetainer()) {
+      freed_nodes++;
+    }
+  }
+  return freed_nodes;
+}
+
+
+int GlobalHandles::PostMarkSweepProcessing(
+    const int initial_post_gc_processing_count) {
+  int freed_nodes = 0;
+  for (NodeIterator it(this); !it.done(); it.Advance()) {
+    if (!it.node()->IsRetainer()) {
+      // Free nodes do not have weak callbacks. Do not use them to compute
+      // the freed_nodes.
+      continue;
+    }
+    it.node()->clear_partially_dependent();
+    if (it.node()->PostGarbageCollectionProcessing(isolate_)) {
+      if (initial_post_gc_processing_count != post_gc_processing_count_) {
+        // See the comment above.
+        return freed_nodes;
       }
     }
+    if (!it.node()->IsRetainer()) {
+      freed_nodes++;
+    }
   }
-  // Update the list of new space nodes.
+  return freed_nodes;
+}
+
+
+void GlobalHandles::UpdateListOfNewSpaceNodes() {
   int last = 0;
   for (int i = 0; i < new_space_nodes_.length(); ++i) {
     Node* node = new_space_nodes_[i];
@@ -722,10 +879,59 @@ int GlobalHandles::PostGarbageCollectionProcessing(
     }
   }
   new_space_nodes_.Rewind(last);
+}
+
+
+int GlobalHandles::DispatchPendingPhantomCallbacks() {
+  int freed_nodes = 0;
+  while (pending_phantom_callbacks_.length() != 0) {
+    PendingPhantomCallback callback = pending_phantom_callbacks_.RemoveLast();
+    callback.invoke();
+    freed_nodes++;
+  }
+  while (pending_internal_fields_callbacks_.length() != 0) {
+    PendingInternalFieldsCallback callback =
+        pending_internal_fields_callbacks_.RemoveLast();
+    callback.invoke();
+    freed_nodes++;
+  }
+  return freed_nodes;
+}
+
+
+int GlobalHandles::PostGarbageCollectionProcessing(GarbageCollector collector) {
+  // Process weak global handle callbacks. This must be done after the
+  // GC is completely done, because the callbacks may invoke arbitrary
+  // API functions.
+  DCHECK(isolate_->heap()->gc_state() == Heap::NOT_IN_GC);
+  const int initial_post_gc_processing_count = ++post_gc_processing_count_;
+  int freed_nodes = 0;
+  if (collector == SCAVENGER) {
+    freed_nodes = PostScavengeProcessing(initial_post_gc_processing_count);
+  } else {
+    freed_nodes = PostMarkSweepProcessing(initial_post_gc_processing_count);
+  }
+  if (initial_post_gc_processing_count != post_gc_processing_count_) {
+    // If the callbacks caused a nested GC, then return.  See comment in
+    // PostScavengeProcessing.
+    return freed_nodes;
+  }
+  freed_nodes += DispatchPendingPhantomCallbacks();
+  if (initial_post_gc_processing_count == post_gc_processing_count_) {
+    UpdateListOfNewSpaceNodes();
+  }
   return freed_nodes;
 }
 
 
+void GlobalHandles::PendingPhantomCallback::invoke() {
+  if (node_->state() == Node::FREE) return;
+  DCHECK(node_->state() == Node::NEAR_DEATH);
+  callback_(data_);
+  if (node_->state() != Node::FREE) node_->Release();
+}
+
+
 void GlobalHandles::IterateStrongRoots(ObjectVisitor* v) {
   for (NodeIterator it(this); !it.done(); it.Advance()) {
     if (it.node()->IsStrongRetainer()) {
index aacdcbc..b3756d0 100644 (file)
@@ -97,6 +97,13 @@ struct ObjectGroupRetainerInfo {
 };
 
 
+enum WeaknessType {
+  NORMAL_WEAK,          // Embedder gets a handle to the dying object.
+  PHANTOM_WEAK,         // Embedder gets the parameter they passed in earlier.
+  INTERNAL_FIELDS_WEAK  // Embedder gets 2 internal fields from dying object.
+};
+
+
 class GlobalHandles {
  public:
   ~GlobalHandles();
@@ -128,8 +135,18 @@ class GlobalHandles {
   // before the callback is invoked, but the handle can still be identified
   // in the callback by using the location() of the handle.
   static void MakeWeak(Object** location, void* parameter,
-                       WeakCallback weak_callback,
-                       PhantomState phantom = Nonphantom);
+                       WeakCallback weak_callback);
+
+  // It would be nice to template this one, but it's really hard to get
+  // the template instantiator to work right if you do.
+  static void MakePhantom(Object** location, void* parameter,
+                          PhantomCallbackData<void>::Callback weak_callback);
+
+  static void MakePhantom(
+      Object** location,
+      v8::InternalFieldsCallbackData<void, void>::Callback weak_callback,
+      int16_t internal_field_index1,
+      int16_t internal_field_index2 = v8::Object::kNoInternalFieldIndex);
 
   void RecordStats(HeapStats* stats);
 
@@ -145,6 +162,10 @@ class GlobalHandles {
     return number_of_global_handles_;
   }
 
+  // Collect up data for the weak handle callbacks after GC has completed, but
+  // before memory is reclaimed.
+  void CollectPhantomCallbackData();
+
   // Clear the weakness of a global handle.
   static void* ClearWeakness(Object** location);
 
@@ -270,10 +291,18 @@ class GlobalHandles {
   // don't assign any initial capacity.
   static const int kObjectGroupConnectionsCapacity = 20;
 
+  // Helpers for PostGarbageCollectionProcessing.
+  int PostScavengeProcessing(int initial_post_gc_processing_count);
+  int PostMarkSweepProcessing(int initial_post_gc_processing_count);
+  int DispatchPendingPhantomCallbacks();
+  void UpdateListOfNewSpaceNodes();
+
   // Internal node structures.
   class Node;
   class NodeBlock;
   class NodeIterator;
+  class PendingPhantomCallback;
+  class PendingInternalFieldsCallback;
 
   Isolate* isolate_;
 
@@ -306,12 +335,46 @@ class GlobalHandles {
   List<ObjectGroupRetainerInfo> retainer_infos_;
   List<ObjectGroupConnection> implicit_ref_connections_;
 
+  List<PendingPhantomCallback> pending_phantom_callbacks_;
+  List<PendingInternalFieldsCallback> pending_internal_fields_callbacks_;
+
   friend class Isolate;
 
   DISALLOW_COPY_AND_ASSIGN(GlobalHandles);
 };
 
 
+class GlobalHandles::PendingPhantomCallback {
+ public:
+  typedef PhantomCallbackData<void> Data;
+  PendingPhantomCallback(Node* node, Data data, Data::Callback callback)
+      : node_(node), data_(data), callback_(callback) {}
+
+  void invoke();
+
+  Node* node() { return node_; }
+
+ private:
+  Node* node_;
+  Data data_;
+  Data::Callback callback_;
+};
+
+
+class GlobalHandles::PendingInternalFieldsCallback {
+ public:
+  typedef InternalFieldsCallbackData<void, void> Data;
+  PendingInternalFieldsCallback(Data data, Data::Callback callback)
+      : data_(data), callback_(callback) {}
+
+  void invoke() { callback_(data_); }
+
+ private:
+  Data data_;
+  Data::Callback callback_;
+};
+
+
 class EternalHandles {
  public:
   enum SingletonHandle {
index c6ba010..48bb030 100644 (file)
 #endif
 
 #if V8_TARGET_ARCH_IA32 || (V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_32_BIT) || \
-    V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS
+    V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_MIPS ||     \
+    V8_TARGET_ARCH_MIPS64
 #define V8_TURBOFAN_BACKEND 1
 #else
 #define V8_TURBOFAN_BACKEND 0
 #endif
-#if V8_TURBOFAN_BACKEND && !(V8_OS_WIN && V8_TARGET_ARCH_X64)
+#if V8_TURBOFAN_BACKEND
 #define V8_TURBOFAN_TARGET 1
 #else
 #define V8_TURBOFAN_TARGET 0
@@ -81,23 +82,14 @@ namespace internal {
 #endif
 
 
-// Support for alternative bool type. This is only enabled if the code is
-// compiled with USE_MYBOOL defined. This catches some nasty type bugs.
-// For instance, 'bool b = "false";' results in b == true! This is a hidden
-// source of bugs.
-// However, redefining the bool type does have some negative impact on some
-// platforms. It gives rise to compiler warnings (i.e. with
-// MSVC) in the API header files when mixing code that uses the standard
-// bool with code that uses the redefined version.
-// This does not actually belong in the platform code, but needs to be
-// defined here because the platform code uses bool, and platform.h is
-// include very early in the main include file.
-
-#ifdef USE_MYBOOL
-typedef unsigned int __my_bool__;
-#define bool __my_bool__  // use 'indirection' to avoid name clashes
+// Determine whether double field unboxing feature is enabled.
+#if (V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64)
+#define V8_DOUBLE_FIELDS_UNBOXING 1
+#else
+#define V8_DOUBLE_FIELDS_UNBOXING 0
 #endif
 
+
 typedef uint8_t byte;
 typedef byte* Address;
 
@@ -352,6 +344,7 @@ class Smi;
 template <typename Config, class Allocator = FreeStoreAllocationPolicy>
     class SplayTree;
 class String;
+class Symbol;
 class Name;
 class Struct;
 class Symbol;
@@ -369,6 +362,7 @@ typedef bool (*WeakSlotCallbackWithHeap)(Heap* heap, Object** pointer);
 
 // NOTE: SpaceIterator depends on AllocationSpace enumeration values being
 // consecutive.
+// Keep this enum in sync with the ObjectSpace enum in v8.h
 enum AllocationSpace {
   NEW_SPACE,            // Semispaces collected with copying collector.
   OLD_POINTER_SPACE,    // May contain pointers to new space.
@@ -610,6 +604,8 @@ enum CpuFeature {
   SSE4_1,
   SSE3,
   SAHF,
+  AVX,
+  FMA3,
   // ARM
   VFP3,
   ARMv7,
@@ -628,6 +624,7 @@ enum CpuFeature {
   MIPSr6,
   // ARM64
   ALWAYS_ALIGN_CSP,
+  COHERENT_CACHE,
   NUMBER_OF_CPU_FEATURES
 };
 
@@ -644,7 +641,7 @@ enum ScopeType {
   EVAL_SCOPE,      // The top-level scope for an eval source.
   FUNCTION_SCOPE,  // The top-level scope for a function.
   MODULE_SCOPE,    // The scope introduced by a module literal
-  GLOBAL_SCOPE,    // The top-level scope for a program or a top-level eval.
+  SCRIPT_SCOPE,    // The top-level scope for a script or a top-level eval.
   CATCH_SCOPE,     // The scope introduced by catch.
   BLOCK_SCOPE,     // The scope introduced by a new block.
   WITH_SCOPE,      // The scope introduced by with.
@@ -776,7 +773,8 @@ enum FunctionKind {
   kArrowFunction = 1,
   kGeneratorFunction = 2,
   kConciseMethod = 4,
-  kConciseGeneratorMethod = kGeneratorFunction | kConciseMethod
+  kConciseGeneratorMethod = kGeneratorFunction | kConciseMethod,
+  kDefaultConstructor = 8
 };
 
 
@@ -785,7 +783,8 @@ inline bool IsValidFunctionKind(FunctionKind kind) {
          kind == FunctionKind::kArrowFunction ||
          kind == FunctionKind::kGeneratorFunction ||
          kind == FunctionKind::kConciseMethod ||
-         kind == FunctionKind::kConciseGeneratorMethod;
+         kind == FunctionKind::kConciseGeneratorMethod ||
+         kind == FunctionKind::kDefaultConstructor;
 }
 
 
@@ -805,6 +804,14 @@ inline bool IsConciseMethod(FunctionKind kind) {
   DCHECK(IsValidFunctionKind(kind));
   return kind & FunctionKind::kConciseMethod;
 }
+
+
+inline bool IsDefaultConstructor(FunctionKind kind) {
+  DCHECK(IsValidFunctionKind(kind));
+  return kind & FunctionKind::kDefaultConstructor;
+}
+
+
 } }  // namespace v8::internal
 
 namespace i = v8::internal;
diff --git a/deps/v8/src/harmony-array-includes.js b/deps/v8/src/harmony-array-includes.js
new file mode 100644 (file)
index 0000000..d1a7790
--- /dev/null
@@ -0,0 +1,61 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+'use strict';
+
+// This file relies on the fact that the following declaration has been made
+// in runtime.js:
+// var $Array = global.Array;
+
+// -------------------------------------------------------------------
+
+// Proposed for ES7
+// https://github.com/tc39/Array.prototype.includes
+// 6e3b78c927aeda20b9d40e81303f9d44596cd904
+function ArrayIncludes(searchElement, fromIndex) {
+  var array = ToObject(this);
+  var len = ToLength(array.length);
+
+  if (len === 0) {
+    return false;
+  }
+
+  var n = ToInteger(fromIndex);
+
+  var k;
+  if (n >= 0) {
+    k = n;
+  } else {
+    k = len + n;
+    if (k < 0) {
+      k = 0;
+    }
+  }
+
+  while (k < len) {
+    var elementK = array[k];
+    if (SameValueZero(searchElement, elementK)) {
+      return true;
+    }
+
+    ++k;
+  }
+
+  return false;
+}
+
+// -------------------------------------------------------------------
+
+function HarmonyArrayIncludesExtendArrayPrototype() {
+  %CheckIsBootstrapping();
+
+  %FunctionSetLength(ArrayIncludes, 1);
+
+  // Set up the non-enumerable functions on the Array prototype object.
+  InstallFunctions($Array.prototype, DONT_ENUM, $Array(
+    "includes", ArrayIncludes
+  ));
+}
+
+HarmonyArrayIncludesExtendArrayPrototype();
index 06fada7..5d1262a 100644 (file)
@@ -127,6 +127,56 @@ function ArrayFill(value /* [, start [, end ] ] */) {  // length == 1
   return array;
 }
 
+// ES6, draft 10-14-14, section 22.1.2.1
+function ArrayFrom(arrayLike, mapfn, receiver) {
+  var items = ToObject(arrayLike);
+  var mapping = !IS_UNDEFINED(mapfn);
+
+  if (mapping) {
+    if (!IS_SPEC_FUNCTION(mapfn)) {
+      throw MakeTypeError('called_non_callable', [ mapfn ]);
+    } else if (IS_NULL_OR_UNDEFINED(receiver)) {
+      receiver = %GetDefaultReceiver(mapfn) || receiver;
+    } else if (!IS_SPEC_OBJECT(receiver) && %IsSloppyModeFunction(mapfn)) {
+      receiver = ToObject(receiver);
+    }
+  }
+
+  var iterable = ToIterable(items);
+  var k;
+  var result;
+  var mappedValue;
+  var nextValue;
+
+  if (!IS_UNDEFINED(iterable)) {
+    result = IS_SPEC_FUNCTION(this) && this.prototype ? new this() : [];
+
+    k = 0;
+    for (nextValue of items) {
+      if (mapping) mappedValue = %_CallFunction(receiver, nextValue, k, mapfn);
+      else mappedValue = nextValue;
+      %AddElement(result, k++, mappedValue, NONE);
+    }
+
+    result.length = k;
+    return result;
+  } else {
+    var len = ToLength(items.length);
+    result = IS_SPEC_FUNCTION(this) && this.prototype ? new this(len) :
+        new $Array(len);
+
+    for (k = 0; k < len; ++k) {
+      nextValue = items[k];
+      if (mapping) mappedValue = %_CallFunction(receiver, nextValue, k, mapfn);
+      else mappedValue = nextValue;
+      %AddElement(result, k, mappedValue, NONE);
+    }
+
+    result.length = k;
+    return result;
+  }
+}
+
 // ES6, draft 05-22-14, section 22.1.2.3
 function ArrayOf() {
   var length = %_ArgumentsLength();
@@ -142,11 +192,25 @@ function ArrayOf() {
 
 // -------------------------------------------------------------------
 
+function HarmonyArrayExtendSymbolPrototype() {
+  %CheckIsBootstrapping();
+
+  InstallConstants($Symbol, $Array(
+    // TODO(dslomov, caitp): Move to symbol.js when shipping
+   "isConcatSpreadable", symbolIsConcatSpreadable
+  ));
+}
+
+HarmonyArrayExtendSymbolPrototype();
+
 function HarmonyArrayExtendArrayPrototype() {
   %CheckIsBootstrapping();
 
+  %FunctionSetLength(ArrayFrom, 1);
+
   // Set up non-enumerable functions on the Array object.
   InstallFunctions($Array, DONT_ENUM, $Array(
+    "from", ArrayFrom,
     "of", ArrayOf
   ));
 
index b6605a9..ac06758 100644 (file)
@@ -7,26 +7,29 @@
 // var $Function = global.Function;
 // var $Array = global.Array;
 
+"use strict";
 
-(function() {
-  function FunctionToMethod(homeObject) {
-    if (!IS_SPEC_FUNCTION(this)) {
-      throw MakeTypeError('toMethod_non_function',
-                          [%ToString(this), typeof this]);
+function FunctionToMethod(homeObject) {
+  if (!IS_SPEC_FUNCTION(this)) {
+    throw MakeTypeError('toMethod_non_function',
+                        [%ToString(this), typeof this]);
 
-    }
-
-    if (!IS_SPEC_OBJECT(homeObject)) {
-      throw MakeTypeError('toMethod_non_object',
-                          [%ToString(homeObject)]);
-    }
+  }
 
-    return %ToMethod(this, homeObject);
+  if (!IS_SPEC_OBJECT(homeObject)) {
+    throw MakeTypeError('toMethod_non_object',
+                        [%ToString(homeObject)]);
   }
 
+  return %ToMethod(this, homeObject);
+}
+
+function SetupHarmonyClasses() {
   %CheckIsBootstrapping();
 
   InstallFunctions($Function.prototype, DONT_ENUM, $Array(
       "toMethod", FunctionToMethod
   ));
-}());
+}
+
+SetupHarmonyClasses();
diff --git a/deps/v8/src/harmony-regexp.js b/deps/v8/src/harmony-regexp.js
new file mode 100644 (file)
index 0000000..a1b32ab
--- /dev/null
@@ -0,0 +1,35 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+'use strict';
+
+var $RegExp = global.RegExp;
+
+// -------------------------------------------------------------------
+
+// ES6 draft 12-06-13, section 21.2.5.3
+// + https://bugs.ecmascript.org/show_bug.cgi?id=3423
+function RegExpGetFlags() {
+  if (!IS_SPEC_OBJECT(this)) {
+    throw MakeTypeError('flags_getter_non_object',
+                        [%ToString(this)]);
+  }
+  var result = '';
+  if (this.global) result += 'g';
+  if (this.ignoreCase) result += 'i';
+  if (this.multiline) result += 'm';
+  if (this.unicode) result += 'u';
+  if (this.sticky) result += 'y';
+  return result;
+}
+
+function ExtendRegExpPrototype() {
+  %CheckIsBootstrapping();
+
+  %DefineAccessorPropertyUnchecked($RegExp.prototype, 'flags', RegExpGetFlags,
+                                   null, DONT_ENUM);
+  %SetNativeFlag(RegExpGetFlags);
+}
+
+ExtendRegExpPrototype();
index 1c477e2..6bbe139 100644 (file)
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-'use strict';
+"use strict";
 
 // This file relies on the fact that the following declaration has been made
 // in runtime.js:
@@ -95,14 +95,14 @@ function StringEndsWith(searchString /* position */) {  // length == 1
 
 
 // ES6 draft 04-05-14, section 21.1.3.6
-function StringContains(searchString /* position */) {  // length == 1
-  CHECK_OBJECT_COERCIBLE(this, "String.prototype.contains");
+function StringIncludes(searchString /* position */) {  // length == 1
+  CHECK_OBJECT_COERCIBLE(this, "String.prototype.includes");
 
   var s = TO_STRING_INLINE(this);
 
   if (IS_REGEXP(searchString)) {
     throw MakeTypeError("first_argument_not_regexp",
-                        ["String.prototype.contains"]);
+                        ["String.prototype.includes"]);
   }
 
   var ss = TO_STRING_INLINE(searchString);
@@ -184,7 +184,7 @@ function ExtendStringPrototype() {
   // Set up the non-enumerable functions on the String prototype object.
   InstallFunctions($String.prototype, DONT_ENUM, $Array(
     "codePointAt", StringCodePointAt,
-    "contains", StringContains,
+    "includes", StringIncludes,
     "endsWith", StringEndsWith,
     "repeat", StringRepeat,
     "startsWith", StringStartsWith
diff --git a/deps/v8/src/harmony-templates.js b/deps/v8/src/harmony-templates.js
new file mode 100644 (file)
index 0000000..254f434
--- /dev/null
@@ -0,0 +1,94 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+"use strict";
+
+var callSiteCache = new $Map;
+
+function SameCallSiteElements(rawStrings, other) {
+  var length = rawStrings.length;
+  var other = other.raw;
+
+  if (length !== other.length) return false;
+
+  for (var i = 0; i < length; ++i) {
+    if (rawStrings[i] !== other[i]) return false;
+  }
+
+  return true;
+}
+
+
+function GetCachedCallSite(siteObj, hash) {
+  var obj = %MapGet(callSiteCache, hash);
+
+  if (IS_UNDEFINED(obj)) return;
+
+  var length = obj.length;
+  for (var i = 0; i < length; ++i) {
+    if (SameCallSiteElements(siteObj, obj[i])) return obj[i];
+  }
+}
+
+
+function SetCachedCallSite(siteObj, hash) {
+  var obj = %MapGet(callSiteCache, hash);
+  var array;
+
+  if (IS_UNDEFINED(obj)) {
+    array = new InternalArray(1);
+    array[0] = siteObj;
+    %MapSet(callSiteCache, hash, array);
+  } else {
+    obj.push(siteObj);
+  }
+
+  return siteObj;
+}
+
+
+function GetTemplateCallSite(siteObj, rawStrings, hash) {
+  var cached = GetCachedCallSite(rawStrings, hash);
+
+  if (!IS_UNDEFINED(cached)) return cached;
+
+  %AddNamedProperty(siteObj, "raw", %ObjectFreeze(rawStrings),
+      READ_ONLY | DONT_ENUM | DONT_DELETE);
+
+  return SetCachedCallSite(%ObjectFreeze(siteObj), hash);
+}
+
+
+// ES6 Draft 10-14-2014, section 21.1.2.4
+function StringRaw(callSite) {
+  // TODO(caitp): Use rest parameters when implemented
+  var numberOfSubstitutions = %_ArgumentsLength();
+  var cooked = ToObject(callSite);
+  var raw = ToObject(cooked.raw);
+  var literalSegments = ToLength(raw.length);
+  if (literalSegments <= 0) return "";
+
+  var result = ToString(raw[0]);
+
+  for (var i = 1; i < literalSegments; ++i) {
+    if (i < numberOfSubstitutions) {
+      result += ToString(%_Arguments(i));
+    }
+    result += ToString(raw[i]);
+  }
+
+  return result;
+}
+
+
+function ExtendStringForTemplates() {
+  %CheckIsBootstrapping();
+
+  // Set up the non-enumerable functions on the String object.
+  InstallFunctions($String, DONT_ENUM, $Array(
+    "raw", StringRaw
+  ));
+}
+
+ExtendStringForTemplates();
index 41a84df..0336456 100644 (file)
@@ -2,15 +2,13 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-'use strict';
+"use strict";
 
 // This file relies on the fact that the following declaration has been made
 // in runtime.js and symbol.js:
 // var $Object = global.Object;
 // var $Symbol = global.Symbol;
 
-var symbolToStringTag = InternalSymbol("Symbol.toStringTag");
-
 var kBuiltinStringTags = {
   "__proto__": null,
   "Arguments": true,
index 0129f38..c2eb452 100644 (file)
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-'use strict';
+"use strict";
 
 // This file relies on the fact that the following declaration has been made
 // in runtime.js:
@@ -58,6 +58,17 @@ function NAMEForEach(f /* thisArg */) {  // length == 1
     %_CallFunction(new_receiver, TO_OBJECT_INLINE(element), i, this, f);
   }
 }
+
+// ES6 draft 08-24-14, section 22.2.2.2
+function NAMEOf() {  // length == 0
+  var length = %_ArgumentsLength();
+  var array = new this(length);
+  for (var i = 0; i < length; i++) {
+    array[i] = %_Arguments(i);
+  }
+  return array;
+}
+
 endmacro
 
 TYPED_ARRAYS(TYPED_ARRAY_HARMONY_ADDITIONS)
@@ -67,6 +78,11 @@ function HarmonyTypedArrayExtendPrototypes() {
 macro EXTEND_TYPED_ARRAY(ARRAY_ID, NAME, ELEMENT_SIZE)
   %CheckIsBootstrapping();
 
+  // Set up non-enumerable functions on the object.
+  InstallFunctions(global.NAME, DONT_ENUM | DONT_DELETE | READ_ONLY, $Array(
+    "of", NAMEOf
+  ));
+
   // Set up non-enumerable functions on the prototype object.
   InstallFunctions(global.NAME.prototype, DONT_ENUM, $Array(
     "forEach", NAMEForEach
index 7f217bb..196eb13 100644 (file)
@@ -1167,13 +1167,10 @@ void V8HeapExplorer::ExtractJSObjectReferences(
                          "native_context", global_obj->native_context(),
                          GlobalObject::kNativeContextOffset);
     SetInternalReference(global_obj, entry,
-                         "global_context", global_obj->global_context(),
-                         GlobalObject::kGlobalContextOffset);
-    SetInternalReference(global_obj, entry,
                          "global_proxy", global_obj->global_proxy(),
                          GlobalObject::kGlobalProxyOffset);
     STATIC_ASSERT(GlobalObject::kHeaderSize - JSObject::kHeaderSize ==
-                 4 * kPointerSize);
+                 3 * kPointerSize);
   } else if (obj->IsJSArrayBufferView()) {
     JSArrayBufferView* view = JSArrayBufferView::cast(obj);
     SetInternalReference(view, entry, "buffer", view->buffer(),
@@ -1282,7 +1279,7 @@ void V8HeapExplorer::ExtractContextReferences(int entry, Context* context) {
                   Context::FIRST_WEAK_SLOT);
     STATIC_ASSERT(Context::NEXT_CONTEXT_LINK + 1 ==
                   Context::NATIVE_CONTEXT_SLOTS);
-    STATIC_ASSERT(Context::FIRST_WEAK_SLOT + 5 ==
+    STATIC_ASSERT(Context::FIRST_WEAK_SLOT + 4 ==
                   Context::NATIVE_CONTEXT_SLOTS);
   }
 }
@@ -1629,53 +1626,32 @@ void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj, int entry) {
     DescriptorArray* descs = js_obj->map()->instance_descriptors();
     int real_size = js_obj->map()->NumberOfOwnDescriptors();
     for (int i = 0; i < real_size; i++) {
-      switch (descs->GetType(i)) {
-        case FIELD: {
-          Representation r = descs->GetDetails(i).representation();
+      PropertyDetails details = descs->GetDetails(i);
+      switch (details.location()) {
+        case IN_OBJECT: {
+          Representation r = details.representation();
           if (r.IsSmi() || r.IsDouble()) break;
-          int index = descs->GetFieldIndex(i);
 
           Name* k = descs->GetKey(i);
-          if (index < js_obj->map()->inobject_properties()) {
-            Object* value = js_obj->InObjectPropertyAt(index);
-            if (k != heap_->hidden_string()) {
-              SetPropertyReference(
-                  js_obj, entry,
-                  k, value,
-                  NULL,
-                  js_obj->GetInObjectPropertyOffset(index));
-            } else {
-              TagObject(value, "(hidden properties)");
-              SetInternalReference(
-                  js_obj, entry,
-                  "hidden_properties", value,
-                  js_obj->GetInObjectPropertyOffset(index));
-            }
+          FieldIndex field_index = FieldIndex::ForDescriptor(js_obj->map(), i);
+          Object* value = js_obj->RawFastPropertyAt(field_index);
+          int field_offset =
+              field_index.is_inobject() ? field_index.offset() : -1;
+
+          if (k != heap_->hidden_string()) {
+            SetDataOrAccessorPropertyReference(details.kind(), js_obj, entry, k,
+                                               value, NULL, field_offset);
           } else {
-            FieldIndex field_index =
-                FieldIndex::ForDescriptor(js_obj->map(), i);
-            Object* value = js_obj->RawFastPropertyAt(field_index);
-            if (k != heap_->hidden_string()) {
-              SetPropertyReference(js_obj, entry, k, value);
-            } else {
-              TagObject(value, "(hidden properties)");
-              SetInternalReference(js_obj, entry, "hidden_properties", value);
-            }
+            TagObject(value, "(hidden properties)");
+            SetInternalReference(js_obj, entry, "hidden_properties", value,
+                                 field_offset);
           }
           break;
         }
-        case CONSTANT:
-          SetPropertyReference(
-              js_obj, entry,
-              descs->GetKey(i), descs->GetConstant(i));
-          break;
-        case CALLBACKS:
-          ExtractAccessorPairProperty(
-              js_obj, entry,
-              descs->GetKey(i), descs->GetValue(i));
-          break;
-        case NORMAL:  // only in slow mode
-          UNREACHABLE();
+        case IN_DESCRIPTOR:
+          SetDataOrAccessorPropertyReference(details.kind(), js_obj, entry,
+                                             descs->GetKey(i),
+                                             descs->GetValue(i));
           break;
       }
     }
@@ -1695,27 +1671,30 @@ void V8HeapExplorer::ExtractPropertyReferences(JSObject* js_obj, int entry) {
           SetInternalReference(js_obj, entry, "hidden_properties", value);
           continue;
         }
-        if (ExtractAccessorPairProperty(js_obj, entry, k, value)) continue;
-        SetPropertyReference(js_obj, entry, Name::cast(k), value);
+        PropertyDetails details = dictionary->DetailsAt(i);
+        SetDataOrAccessorPropertyReference(details.kind(), js_obj, entry,
+                                           Name::cast(k), value);
       }
     }
   }
 }
 
 
-bool V8HeapExplorer::ExtractAccessorPairProperty(
-    JSObject* js_obj, int entry, Object* key, Object* callback_obj) {
-  if (!callback_obj->IsAccessorPair()) return false;
+void V8HeapExplorer::ExtractAccessorPairProperty(JSObject* js_obj, int entry,
+                                                 Name* key,
+                                                 Object* callback_obj,
+                                                 int field_offset) {
+  if (!callback_obj->IsAccessorPair()) return;
   AccessorPair* accessors = AccessorPair::cast(callback_obj);
+  SetPropertyReference(js_obj, entry, key, accessors, NULL, field_offset);
   Object* getter = accessors->getter();
   if (!getter->IsOddball()) {
-    SetPropertyReference(js_obj, entry, Name::cast(key), getter, "get %s");
+    SetPropertyReference(js_obj, entry, key, getter, "get %s");
   }
   Object* setter = accessors->setter();
   if (!setter->IsOddball()) {
-    SetPropertyReference(js_obj, entry, Name::cast(key), setter, "set %s");
+    SetPropertyReference(js_obj, entry, key, setter, "set %s");
   }
-  return true;
 }
 
 
@@ -2054,6 +2033,20 @@ void V8HeapExplorer::SetWeakReference(HeapObject* parent_obj,
 }
 
 
+void V8HeapExplorer::SetDataOrAccessorPropertyReference(
+    PropertyKind kind, JSObject* parent_obj, int parent_entry,
+    Name* reference_name, Object* child_obj, const char* name_format_string,
+    int field_offset) {
+  if (kind == ACCESSOR) {
+    ExtractAccessorPairProperty(parent_obj, parent_entry, reference_name,
+                                child_obj, field_offset);
+  } else {
+    SetPropertyReference(parent_obj, parent_entry, reference_name, child_obj,
+                         name_format_string, field_offset);
+  }
+}
+
+
 void V8HeapExplorer::SetPropertyReference(HeapObject* parent_obj,
                                           int parent_entry,
                                           Name* reference_name,
@@ -2165,6 +2158,9 @@ const char* V8HeapExplorer::GetStrongGcSubrootName(Object* object) {
 #define SYMBOL_NAME(name) NAME_ENTRY(name)
     PRIVATE_SYMBOL_LIST(SYMBOL_NAME)
 #undef SYMBOL_NAME
+#define SYMBOL_NAME(name, varname, description) NAME_ENTRY(name)
+    PUBLIC_SYMBOL_LIST(SYMBOL_NAME)
+#undef SYMBOL_NAME
 #undef NAME_ENTRY
     CHECK(!strong_gc_subroot_names_.is_empty());
   }
index fb43876..8aef437 100644 (file)
@@ -381,8 +381,8 @@ class V8HeapExplorer : public HeapEntriesAllocator {
   void ExtractFixedArrayReferences(int entry, FixedArray* array);
   void ExtractClosureReferences(JSObject* js_obj, int entry);
   void ExtractPropertyReferences(JSObject* js_obj, int entry);
-  bool ExtractAccessorPairProperty(JSObject* js_obj, int entry,
-                                   Object* key, Object* callback_obj);
+  void ExtractAccessorPairProperty(JSObject* js_obj, int entry, Name* key,
+                                   Object* callback_obj, int field_offset = -1);
   void ExtractElementReferences(JSObject* js_obj, int entry);
   void ExtractInternalReferences(JSObject* js_obj, int entry);
 
@@ -430,6 +430,12 @@ class V8HeapExplorer : public HeapEntriesAllocator {
                             Object* child,
                             const char* name_format_string = NULL,
                             int field_offset = -1);
+  void SetDataOrAccessorPropertyReference(PropertyKind kind,
+                                          JSObject* parent_obj, int parent,
+                                          Name* reference_name, Object* child,
+                                          const char* name_format_string = NULL,
+                                          int field_offset = -1);
+
   void SetUserGlobalReference(Object* user_global);
   void SetRootGcRootsReference();
   void SetGcRootsReference(VisitorSynchronization::SyncTag tag);
index b352f44..ff2a559 100644 (file)
@@ -11,9 +11,11 @@ namespace internal {
 
 const double GCIdleTimeHandler::kConservativeTimeRatio = 0.9;
 const size_t GCIdleTimeHandler::kMaxMarkCompactTimeInMs = 1000;
+const size_t GCIdleTimeHandler::kMaxFinalIncrementalMarkCompactTimeInMs = 1000;
 const size_t GCIdleTimeHandler::kMinTimeForFinalizeSweeping = 100;
 const int GCIdleTimeHandler::kMaxMarkCompactsInIdleRound = 7;
 const int GCIdleTimeHandler::kIdleScavengeThreshold = 5;
+const double GCIdleTimeHandler::kHighContextDisposalRate = 100;
 
 
 void GCIdleTimeAction::Print() {
@@ -25,7 +27,11 @@ void GCIdleTimeAction::Print() {
       PrintF("no action");
       break;
     case DO_INCREMENTAL_MARKING:
-      PrintF("incremental marking with step %" V8_PTR_PREFIX "d", parameter);
+      PrintF("incremental marking with step %" V8_PTR_PREFIX "d / ms",
+             parameter);
+      if (additional_work) {
+        PrintF("; finalized marking");
+      }
       break;
     case DO_SCAVENGE:
       PrintF("scavenge");
@@ -40,6 +46,25 @@ void GCIdleTimeAction::Print() {
 }
 
 
+void GCIdleTimeHandler::HeapState::Print() {
+  PrintF("contexts_disposed=%d ", contexts_disposed);
+  PrintF("contexts_disposal_rate=%f ", contexts_disposal_rate);
+  PrintF("size_of_objects=%" V8_PTR_PREFIX "d ", size_of_objects);
+  PrintF("incremental_marking_stopped=%d ", incremental_marking_stopped);
+  PrintF("can_start_incremental_marking=%d ", can_start_incremental_marking);
+  PrintF("sweeping_in_progress=%d ", sweeping_in_progress);
+  PrintF("mark_compact_speed=%" V8_PTR_PREFIX "d ",
+         mark_compact_speed_in_bytes_per_ms);
+  PrintF("incremental_marking_speed=%" V8_PTR_PREFIX "d ",
+         incremental_marking_speed_in_bytes_per_ms);
+  PrintF("scavenge_speed=%" V8_PTR_PREFIX "d ", scavenge_speed_in_bytes_per_ms);
+  PrintF("new_space_size=%" V8_PTR_PREFIX "d ", used_new_space_size);
+  PrintF("new_space_capacity=%" V8_PTR_PREFIX "d ", new_space_capacity);
+  PrintF("new_space_allocation_throughput=%" V8_PTR_PREFIX "d",
+         new_space_allocation_throughput_in_bytes_per_ms);
+}
+
+
 size_t GCIdleTimeHandler::EstimateMarkingStepSize(
     size_t idle_time_in_ms, size_t marking_speed_in_bytes_per_ms) {
   DCHECK(idle_time_in_ms > 0);
@@ -64,8 +89,7 @@ size_t GCIdleTimeHandler::EstimateMarkingStepSize(
 size_t GCIdleTimeHandler::EstimateMarkCompactTime(
     size_t size_of_objects, size_t mark_compact_speed_in_bytes_per_ms) {
   // TODO(hpayer): Be more precise about the type of mark-compact event. It
-  // makes a huge difference if it is incremental or non-incremental and if
-  // compaction is happening.
+  // makes a huge difference if compaction is happening.
   if (mark_compact_speed_in_bytes_per_ms == 0) {
     mark_compact_speed_in_bytes_per_ms = kInitialConservativeMarkCompactSpeed;
   }
@@ -74,6 +98,19 @@ size_t GCIdleTimeHandler::EstimateMarkCompactTime(
 }
 
 
+size_t GCIdleTimeHandler::EstimateFinalIncrementalMarkCompactTime(
+    size_t size_of_objects,
+    size_t final_incremental_mark_compact_speed_in_bytes_per_ms) {
+  if (final_incremental_mark_compact_speed_in_bytes_per_ms == 0) {
+    final_incremental_mark_compact_speed_in_bytes_per_ms =
+        kInitialConservativeFinalIncrementalMarkCompactSpeed;
+  }
+  size_t result =
+      size_of_objects / final_incremental_mark_compact_speed_in_bytes_per_ms;
+  return Min(result, kMaxFinalIncrementalMarkCompactTimeInMs);
+}
+
+
 bool GCIdleTimeHandler::ShouldDoScavenge(
     size_t idle_time_in_ms, size_t new_space_size, size_t used_new_space_size,
     size_t scavenge_speed_in_bytes_per_ms,
@@ -122,6 +159,23 @@ bool GCIdleTimeHandler::ShouldDoMarkCompact(
 }
 
 
+bool GCIdleTimeHandler::ShouldDoContextDisposalMarkCompact(
+    int contexts_disposed, double contexts_disposal_rate) {
+  return contexts_disposed > 0 && contexts_disposal_rate > 0 &&
+         contexts_disposal_rate < kHighContextDisposalRate;
+}
+
+
+bool GCIdleTimeHandler::ShouldDoFinalIncrementalMarkCompact(
+    size_t idle_time_in_ms, size_t size_of_objects,
+    size_t final_incremental_mark_compact_speed_in_bytes_per_ms) {
+  return idle_time_in_ms >=
+         EstimateFinalIncrementalMarkCompactTime(
+             size_of_objects,
+             final_incremental_mark_compact_speed_in_bytes_per_ms);
+}
+
+
 // The following logic is implemented by the controller:
 // (1) If we don't have any idle time, do nothing, unless a context was
 // disposed, incremental marking is stopped, and the heap is small. Then do
@@ -129,24 +183,26 @@ bool GCIdleTimeHandler::ShouldDoMarkCompact(
 // (2) If the new space is almost full and we can affort a Scavenge or if the
 // next Scavenge will very likely take long, then a Scavenge is performed.
 // (3) If there is currently no MarkCompact idle round going on, we start a
-// new idle round if enough garbage was created or we received a context
-// disposal event. Otherwise we do not perform garbage collection to keep
-// system utilization low.
+// new idle round if enough garbage was created. Otherwise we do not perform
+// garbage collection to keep system utilization low.
 // (4) If incremental marking is done, we perform a full garbage collection
-// if context was disposed or if we are allowed to still do full garbage
-// collections during this idle round or if we are not allowed to start
-// incremental marking. Otherwise we do not perform garbage collection to
-// keep system utilization low.
+// if  we are allowed to still do full garbage collections during this idle
+// round or if we are not allowed to start incremental marking. Otherwise we
+// do not perform garbage collection to keep system utilization low.
 // (5) If sweeping is in progress and we received a large enough idle time
 // request, we finalize sweeping here.
 // (6) If incremental marking is in progress, we perform a marking step. Note,
 // that this currently may trigger a full garbage collection.
-GCIdleTimeAction GCIdleTimeHandler::Compute(size_t idle_time_in_ms,
+GCIdleTimeAction GCIdleTimeHandler::Compute(double idle_time_in_ms,
                                             HeapState heap_state) {
-  if (idle_time_in_ms == 0) {
+  if (static_cast<int>(idle_time_in_ms) <= 0) {
+    if (heap_state.contexts_disposed > 0) {
+      StartIdleRound();
+    }
     if (heap_state.incremental_marking_stopped) {
-      if (heap_state.size_of_objects < kSmallHeapSize &&
-          heap_state.contexts_disposed > 0) {
+      if (ShouldDoContextDisposalMarkCompact(
+              heap_state.contexts_disposed,
+              heap_state.contexts_disposal_rate)) {
         return GCIdleTimeAction::FullGC();
       }
     }
@@ -154,7 +210,7 @@ GCIdleTimeAction GCIdleTimeHandler::Compute(size_t idle_time_in_ms,
   }
 
   if (ShouldDoScavenge(
-          idle_time_in_ms, heap_state.new_space_capacity,
+          static_cast<size_t>(idle_time_in_ms), heap_state.new_space_capacity,
           heap_state.used_new_space_size,
           heap_state.scavenge_speed_in_bytes_per_ms,
           heap_state.new_space_allocation_throughput_in_bytes_per_ms)) {
@@ -162,7 +218,7 @@ GCIdleTimeAction GCIdleTimeHandler::Compute(size_t idle_time_in_ms,
   }
 
   if (IsMarkCompactIdleRoundFinished()) {
-    if (EnoughGarbageSinceLastIdleRound() || heap_state.contexts_disposed > 0) {
+    if (EnoughGarbageSinceLastIdleRound()) {
       StartIdleRound();
     } else {
       return GCIdleTimeAction::Done();
@@ -170,11 +226,9 @@ GCIdleTimeAction GCIdleTimeHandler::Compute(size_t idle_time_in_ms,
   }
 
   if (heap_state.incremental_marking_stopped) {
-    // TODO(jochen): Remove context disposal dependant logic.
-    if (ShouldDoMarkCompact(idle_time_in_ms, heap_state.size_of_objects,
-                            heap_state.mark_compact_speed_in_bytes_per_ms) ||
-        (heap_state.size_of_objects < kSmallHeapSize &&
-         heap_state.contexts_disposed > 0)) {
+    if (ShouldDoMarkCompact(static_cast<size_t>(idle_time_in_ms),
+                            heap_state.size_of_objects,
+                            heap_state.mark_compact_speed_in_bytes_per_ms)) {
       // If there are no more than two GCs left in this idle round and we are
       // allowed to do a full GC, then make those GCs full in order to compact
       // the code space.
@@ -182,10 +236,9 @@ GCIdleTimeAction GCIdleTimeHandler::Compute(size_t idle_time_in_ms,
       // can get rid of this special case and always start incremental marking.
       int remaining_mark_sweeps =
           kMaxMarkCompactsInIdleRound - mark_compacts_since_idle_round_started_;
-      if (heap_state.contexts_disposed > 0 ||
-          (idle_time_in_ms > kMaxFrameRenderingIdleTime &&
-           (remaining_mark_sweeps <= 2 ||
-            !heap_state.can_start_incremental_marking))) {
+      if (static_cast<size_t>(idle_time_in_ms) > kMaxFrameRenderingIdleTime &&
+          (remaining_mark_sweeps <= 2 ||
+           !heap_state.can_start_incremental_marking)) {
         return GCIdleTimeAction::FullGC();
       }
     }
@@ -195,7 +248,7 @@ GCIdleTimeAction GCIdleTimeHandler::Compute(size_t idle_time_in_ms,
   }
   // TODO(hpayer): Estimate finalize sweeping time.
   if (heap_state.sweeping_in_progress &&
-      idle_time_in_ms >= kMinTimeForFinalizeSweeping) {
+      static_cast<size_t>(idle_time_in_ms) >= kMinTimeForFinalizeSweeping) {
     return GCIdleTimeAction::FinalizeSweeping();
   }
 
@@ -204,7 +257,8 @@ GCIdleTimeAction GCIdleTimeHandler::Compute(size_t idle_time_in_ms,
     return GCIdleTimeAction::Nothing();
   }
   size_t step_size = EstimateMarkingStepSize(
-      idle_time_in_ms, heap_state.incremental_marking_speed_in_bytes_per_ms);
+      static_cast<size_t>(kIncrementalMarkingStepTimeInMs),
+      heap_state.incremental_marking_speed_in_bytes_per_ms);
   return GCIdleTimeAction::IncrementalMarking(step_size);
 }
 }
index edd5e42..4dd190b 100644 (file)
@@ -26,6 +26,7 @@ class GCIdleTimeAction {
     GCIdleTimeAction result;
     result.type = DONE;
     result.parameter = 0;
+    result.additional_work = false;
     return result;
   }
 
@@ -33,6 +34,7 @@ class GCIdleTimeAction {
     GCIdleTimeAction result;
     result.type = DO_NOTHING;
     result.parameter = 0;
+    result.additional_work = false;
     return result;
   }
 
@@ -40,6 +42,7 @@ class GCIdleTimeAction {
     GCIdleTimeAction result;
     result.type = DO_INCREMENTAL_MARKING;
     result.parameter = step_size;
+    result.additional_work = false;
     return result;
   }
 
@@ -47,6 +50,7 @@ class GCIdleTimeAction {
     GCIdleTimeAction result;
     result.type = DO_SCAVENGE;
     result.parameter = 0;
+    result.additional_work = false;
     return result;
   }
 
@@ -54,6 +58,7 @@ class GCIdleTimeAction {
     GCIdleTimeAction result;
     result.type = DO_FULL_GC;
     result.parameter = 0;
+    result.additional_work = false;
     return result;
   }
 
@@ -61,6 +66,7 @@ class GCIdleTimeAction {
     GCIdleTimeAction result;
     result.type = DO_FINALIZE_SWEEPING;
     result.parameter = 0;
+    result.additional_work = false;
     return result;
   }
 
@@ -68,6 +74,7 @@ class GCIdleTimeAction {
 
   GCIdleTimeActionType type;
   intptr_t parameter;
+  bool additional_work;
 };
 
 
@@ -92,9 +99,18 @@ class GCIdleTimeHandler {
   // conservative lower bound for the mark-compact speed.
   static const size_t kInitialConservativeMarkCompactSpeed = 2 * MB;
 
+  // If we haven't recorded any final incremental mark-compact events yet, we
+  // use conservative lower bound for the mark-compact speed.
+  static const size_t kInitialConservativeFinalIncrementalMarkCompactSpeed =
+      2 * MB;
+
   // Maximum mark-compact time returned by EstimateMarkCompactTime.
   static const size_t kMaxMarkCompactTimeInMs;
 
+  // Maximum final incremental mark-compact time returned by
+  // EstimateFinalIncrementalMarkCompactTime.
+  static const size_t kMaxFinalIncrementalMarkCompactTimeInMs;
+
   // Minimum time to finalize sweeping phase. The main thread may wait for
   // sweeper threads.
   static const size_t kMinTimeForFinalizeSweeping;
@@ -106,10 +122,6 @@ class GCIdleTimeHandler {
   // Number of scavenges that will trigger start of new idle round.
   static const int kIdleScavengeThreshold;
 
-  // Heap size threshold below which we prefer mark-compact over incremental
-  // step.
-  static const size_t kSmallHeapSize = 4 * kPointerSize * MB;
-
   // That is the maximum idle time we will have during frame rendering.
   static const size_t kMaxFrameRenderingIdleTime = 16;
 
@@ -117,14 +129,25 @@ class GCIdleTimeHandler {
   // lower bound for the scavenger speed.
   static const size_t kInitialConservativeScavengeSpeed = 100 * KB;
 
-  struct HeapState {
+  // If contexts are disposed at a higher rate a full gc is triggered.
+  static const double kHighContextDisposalRate;
+
+  // Incremental marking step time.
+  static const size_t kIncrementalMarkingStepTimeInMs = 1;
+
+  class HeapState {
+   public:
+    void Print();
+
     int contexts_disposed;
+    double contexts_disposal_rate;
     size_t size_of_objects;
     bool incremental_marking_stopped;
     bool can_start_incremental_marking;
     bool sweeping_in_progress;
     size_t mark_compact_speed_in_bytes_per_ms;
     size_t incremental_marking_speed_in_bytes_per_ms;
+    size_t final_incremental_mark_compact_speed_in_bytes_per_ms;
     size_t scavenge_speed_in_bytes_per_ms;
     size_t used_new_space_size;
     size_t new_space_capacity;
@@ -135,7 +158,7 @@ class GCIdleTimeHandler {
       : mark_compacts_since_idle_round_started_(0),
         scavenges_since_last_idle_round_(0) {}
 
-  GCIdleTimeAction Compute(size_t idle_time_in_ms, HeapState heap_state);
+  GCIdleTimeAction Compute(double idle_time_in_ms, HeapState heap_state);
 
   void NotifyIdleMarkCompact() {
     if (mark_compacts_since_idle_round_started_ < kMaxMarkCompactsInIdleRound) {
@@ -155,10 +178,20 @@ class GCIdleTimeHandler {
   static size_t EstimateMarkCompactTime(
       size_t size_of_objects, size_t mark_compact_speed_in_bytes_per_ms);
 
+  static size_t EstimateFinalIncrementalMarkCompactTime(
+      size_t size_of_objects, size_t mark_compact_speed_in_bytes_per_ms);
+
   static bool ShouldDoMarkCompact(size_t idle_time_in_ms,
                                   size_t size_of_objects,
                                   size_t mark_compact_speed_in_bytes_per_ms);
 
+  static bool ShouldDoContextDisposalMarkCompact(int context_disposed,
+                                                 double contexts_disposal_rate);
+
+  static bool ShouldDoFinalIncrementalMarkCompact(
+      size_t idle_time_in_ms, size_t size_of_objects,
+      size_t final_incremental_mark_compact_speed_in_bytes_per_ms);
+
   static bool ShouldDoScavenge(
       size_t idle_time_in_ms, size_t new_space_size, size_t used_new_space_size,
       size_t scavenger_speed_in_bytes_per_ms,
index 8a40b53..a35872d 100644 (file)
@@ -26,6 +26,16 @@ GCTracer::AllocationEvent::AllocationEvent(double duration,
 }
 
 
+GCTracer::ContextDisposalEvent::ContextDisposalEvent(double time) {
+  time_ = time;
+}
+
+
+GCTracer::SurvivalEvent::SurvivalEvent(double survival_rate) {
+  survival_rate_ = survival_rate;
+}
+
+
 GCTracer::Event::Event(Type type, const char* gc_reason,
                        const char* collector_reason)
     : type(type),
@@ -63,6 +73,7 @@ const char* GCTracer::Event::TypeName(bool short_name) const {
         return "Scavenge";
       }
     case MARK_COMPACTOR:
+    case INCREMENTAL_MARK_COMPACTOR:
       if (short_name) {
         return "ms";
       } else {
@@ -88,15 +99,19 @@ GCTracer::GCTracer(Heap* heap)
       longest_incremental_marking_step_(0.0),
       cumulative_marking_duration_(0.0),
       cumulative_sweeping_duration_(0.0),
-      new_space_top_after_gc_(0) {
+      new_space_top_after_gc_(0),
+      start_counter_(0) {
   current_ = Event(Event::START, NULL, NULL);
   current_.end_time = base::OS::TimeCurrentMillis();
-  previous_ = previous_mark_compactor_event_ = current_;
+  previous_ = previous_incremental_mark_compactor_event_ = current_;
 }
 
 
 void GCTracer::Start(GarbageCollector collector, const char* gc_reason,
                      const char* collector_reason) {
+  start_counter_++;
+  if (start_counter_ != 1) return;
+
   previous_ = current_;
   double start_time = base::OS::TimeCurrentMillis();
   if (new_space_top_after_gc_ != 0) {
@@ -105,13 +120,18 @@ void GCTracer::Start(GarbageCollector collector, const char* gc_reason,
         reinterpret_cast<intptr_t>((heap_->new_space()->top()) -
                                    new_space_top_after_gc_));
   }
-  if (current_.type == Event::MARK_COMPACTOR)
-    previous_mark_compactor_event_ = current_;
+  if (current_.type == Event::INCREMENTAL_MARK_COMPACTOR)
+    previous_incremental_mark_compactor_event_ = current_;
 
   if (collector == SCAVENGER) {
     current_ = Event(Event::SCAVENGER, gc_reason, collector_reason);
-  } else {
-    current_ = Event(Event::MARK_COMPACTOR, gc_reason, collector_reason);
+  } else if (collector == MARK_COMPACTOR) {
+    if (heap_->incremental_marking()->WasActivated()) {
+      current_ =
+          Event(Event::INCREMENTAL_MARK_COMPACTOR, gc_reason, collector_reason);
+    } else {
+      current_ = Event(Event::MARK_COMPACTOR, gc_reason, collector_reason);
+    }
   }
 
   current_.start_time = start_time;
@@ -137,7 +157,23 @@ void GCTracer::Start(GarbageCollector collector, const char* gc_reason,
 }
 
 
-void GCTracer::Stop() {
+void GCTracer::Stop(GarbageCollector collector) {
+  start_counter_--;
+  if (start_counter_ != 0) {
+    if (FLAG_trace_gc) {
+      PrintF("[Finished reentrant %s during %s.]\n",
+             collector == SCAVENGER ? "Scavenge" : "Mark-sweep",
+             current_.TypeName(false));
+    }
+    return;
+  }
+
+  DCHECK(start_counter_ >= 0);
+  DCHECK((collector == SCAVENGER && current_.type == Event::SCAVENGER) ||
+         (collector == MARK_COMPACTOR &&
+          (current_.type == Event::MARK_COMPACTOR ||
+           current_.type == Event::INCREMENTAL_MARK_COMPACTOR)));
+
   current_.end_time = base::OS::TimeCurrentMillis();
   current_.end_object_size = heap_->SizeOfObjects();
   current_.end_memory_size = heap_->isolate()->memory_allocator()->Size();
@@ -159,21 +195,30 @@ void GCTracer::Stop() {
         current_.cumulative_pure_incremental_marking_duration -
         previous_.cumulative_pure_incremental_marking_duration;
     scavenger_events_.push_front(current_);
-  } else {
+  } else if (current_.type == Event::INCREMENTAL_MARK_COMPACTOR) {
     current_.incremental_marking_steps =
         current_.cumulative_incremental_marking_steps -
-        previous_mark_compactor_event_.cumulative_incremental_marking_steps;
+        previous_incremental_mark_compactor_event_
+            .cumulative_incremental_marking_steps;
     current_.incremental_marking_bytes =
         current_.cumulative_incremental_marking_bytes -
-        previous_mark_compactor_event_.cumulative_incremental_marking_bytes;
+        previous_incremental_mark_compactor_event_
+            .cumulative_incremental_marking_bytes;
     current_.incremental_marking_duration =
         current_.cumulative_incremental_marking_duration -
-        previous_mark_compactor_event_.cumulative_incremental_marking_duration;
+        previous_incremental_mark_compactor_event_
+            .cumulative_incremental_marking_duration;
     current_.pure_incremental_marking_duration =
         current_.cumulative_pure_incremental_marking_duration -
-        previous_mark_compactor_event_
+        previous_incremental_mark_compactor_event_
             .cumulative_pure_incremental_marking_duration;
     longest_incremental_marking_step_ = 0.0;
+    incremental_mark_compactor_events_.push_front(current_);
+  } else {
+    DCHECK(current_.incremental_marking_bytes == 0);
+    DCHECK(current_.incremental_marking_duration == 0);
+    DCHECK(current_.pure_incremental_marking_duration == 0);
+    DCHECK(longest_incremental_marking_step_ == 0.0);
     mark_compactor_events_.push_front(current_);
   }
 
@@ -207,6 +252,16 @@ void GCTracer::AddNewSpaceAllocationTime(double duration,
 }
 
 
+void GCTracer::AddContextDisposalTime(double time) {
+  context_disposal_events_.push_front(ContextDisposalEvent(time));
+}
+
+
+void GCTracer::AddSurvivalRate(double survival_rate) {
+  survival_events_.push_front(SurvivalEvent(survival_rate));
+}
+
+
 void GCTracer::AddIncrementalMarkingStep(double duration, intptr_t bytes) {
   cumulative_incremental_marking_steps_++;
   cumulative_incremental_marking_bytes_ += bytes;
@@ -294,6 +349,7 @@ void GCTracer::PrintNVP() const {
          current_.scopes[Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED]);
   PrintF("misc_compaction=%.1f ",
          current_.scopes[Scope::MC_UPDATE_MISC_POINTERS]);
+  PrintF("weak_closure=%.1f ", current_.scopes[Scope::MC_WEAKCLOSURE]);
   PrintF("weakcollection_process=%.1f ",
          current_.scopes[Scope::MC_WEAKCOLLECTION_PROCESS]);
   PrintF("weakcollection_clear=%.1f ",
@@ -315,10 +371,13 @@ void GCTracer::PrintNVP() const {
   PrintF("nodes_died_in_new=%d ", heap_->nodes_died_in_new_space_);
   PrintF("nodes_copied_in_new=%d ", heap_->nodes_copied_in_new_space_);
   PrintF("nodes_promoted=%d ", heap_->nodes_promoted_);
+  PrintF("promotion_ratio=%.1f%% ", heap_->promotion_ratio_);
   PrintF("promotion_rate=%.1f%% ", heap_->promotion_rate_);
   PrintF("semi_space_copy_rate=%.1f%% ", heap_->semi_space_copied_rate_);
+  PrintF("average_survival_rate%.1f%% ", AverageSurvivalRate());
   PrintF("new_space_allocation_throughput=%" V8_PTR_PREFIX "d ",
          NewSpaceAllocationThroughputInBytesPerMillisecond());
+  PrintF("context_disposal_rate=%.1f ", ContextDisposalRateInMilliseconds());
 
   if (current_.type == Event::SCAVENGER) {
     PrintF("steps_count=%d ", current_.incremental_marking_steps);
@@ -370,15 +429,15 @@ double GCTracer::MeanIncrementalMarkingDuration() const {
 
   // We haven't completed an entire round of incremental marking, yet.
   // Use data from GCTracer instead of data from event buffers.
-  if (mark_compactor_events_.empty()) {
+  if (incremental_mark_compactor_events_.empty()) {
     return cumulative_incremental_marking_duration_ /
            cumulative_incremental_marking_steps_;
   }
 
   int steps = 0;
   double durations = 0.0;
-  EventBuffer::const_iterator iter = mark_compactor_events_.begin();
-  while (iter != mark_compactor_events_.end()) {
+  EventBuffer::const_iterator iter = incremental_mark_compactor_events_.begin();
+  while (iter != incremental_mark_compactor_events_.end()) {
     steps += iter->incremental_marking_steps;
     durations += iter->incremental_marking_duration;
     ++iter;
@@ -393,11 +452,12 @@ double GCTracer::MeanIncrementalMarkingDuration() const {
 double GCTracer::MaxIncrementalMarkingDuration() const {
   // We haven't completed an entire round of incremental marking, yet.
   // Use data from GCTracer instead of data from event buffers.
-  if (mark_compactor_events_.empty()) return longest_incremental_marking_step_;
+  if (incremental_mark_compactor_events_.empty())
+    return longest_incremental_marking_step_;
 
   double max_duration = 0.0;
-  EventBuffer::const_iterator iter = mark_compactor_events_.begin();
-  while (iter != mark_compactor_events_.end())
+  EventBuffer::const_iterator iter = incremental_mark_compactor_events_.begin();
+  while (iter != incremental_mark_compactor_events_.end())
     max_duration = Max(iter->longest_incremental_marking_step, max_duration);
 
   return max_duration;
@@ -409,15 +469,15 @@ intptr_t GCTracer::IncrementalMarkingSpeedInBytesPerMillisecond() const {
 
   // We haven't completed an entire round of incremental marking, yet.
   // Use data from GCTracer instead of data from event buffers.
-  if (mark_compactor_events_.empty()) {
+  if (incremental_mark_compactor_events_.empty()) {
     return static_cast<intptr_t>(cumulative_incremental_marking_bytes_ /
                                  cumulative_pure_incremental_marking_duration_);
   }
 
   intptr_t bytes = 0;
   double durations = 0.0;
-  EventBuffer::const_iterator iter = mark_compactor_events_.begin();
-  while (iter != mark_compactor_events_.end()) {
+  EventBuffer::const_iterator iter = incremental_mark_compactor_events_.begin();
+  while (iter != incremental_mark_compactor_events_.end()) {
     bytes += iter->incremental_marking_bytes;
     durations += iter->pure_incremental_marking_duration;
     ++iter;
@@ -451,8 +511,24 @@ intptr_t GCTracer::MarkCompactSpeedInBytesPerMillisecond() const {
   EventBuffer::const_iterator iter = mark_compactor_events_.begin();
   while (iter != mark_compactor_events_.end()) {
     bytes += iter->start_object_size;
-    durations += iter->end_time - iter->start_time +
-                 iter->pure_incremental_marking_duration;
+    durations += iter->end_time - iter->start_time;
+    ++iter;
+  }
+
+  if (durations == 0.0) return 0;
+
+  return static_cast<intptr_t>(bytes / durations);
+}
+
+
+intptr_t GCTracer::FinalIncrementalMarkCompactSpeedInBytesPerMillisecond()
+    const {
+  intptr_t bytes = 0;
+  double durations = 0.0;
+  EventBuffer::const_iterator iter = incremental_mark_compactor_events_.begin();
+  while (iter != incremental_mark_compactor_events_.end()) {
+    bytes += iter->start_object_size;
+    durations += iter->end_time - iter->start_time;
     ++iter;
   }
 
@@ -476,5 +552,43 @@ intptr_t GCTracer::NewSpaceAllocationThroughputInBytesPerMillisecond() const {
 
   return static_cast<intptr_t>(bytes / durations);
 }
+
+
+double GCTracer::ContextDisposalRateInMilliseconds() const {
+  if (context_disposal_events_.size() < kRingBufferMaxSize) return 0.0;
+
+  double begin = base::OS::TimeCurrentMillis();
+  double end = 0.0;
+  ContextDisposalEventBuffer::const_iterator iter =
+      context_disposal_events_.begin();
+  while (iter != context_disposal_events_.end()) {
+    end = iter->time_;
+    ++iter;
+  }
+
+  return (begin - end) / context_disposal_events_.size();
+}
+
+
+double GCTracer::AverageSurvivalRate() const {
+  if (survival_events_.size() == 0) return 0.0;
+
+  double sum_of_rates = 0.0;
+  SurvivalEventBuffer::const_iterator iter = survival_events_.begin();
+  while (iter != survival_events_.end()) {
+    sum_of_rates += iter->survival_rate_;
+    ++iter;
+  }
+
+  return sum_of_rates / static_cast<double>(survival_events_.size());
+}
+
+
+bool GCTracer::SurvivalEventsRecorded() const {
+  return survival_events_.size() > 0;
+}
+
+
+void GCTracer::ResetSurvivalEvents() { survival_events_.reset(); }
 }
 }  // namespace v8::internal
index 4e70f07..528eb52 100644 (file)
@@ -71,6 +71,11 @@ class RingBuffer {
     elements_[begin_] = element;
   }
 
+  void reset() {
+    begin_ = 0;
+    end_ = 0;
+  }
+
  private:
   T elements_[MAX_SIZE + 1];
   size_t begin_;
@@ -103,6 +108,7 @@ class GCTracer {
       MC_UPDATE_POINTERS_TO_EVACUATED,
       MC_UPDATE_POINTERS_BETWEEN_EVACUATED,
       MC_UPDATE_MISC_POINTERS,
+      MC_WEAKCLOSURE,
       MC_WEAKCOLLECTION_PROCESS,
       MC_WEAKCOLLECTION_CLEAR,
       MC_WEAKCOLLECTION_ABORT,
@@ -145,9 +151,38 @@ class GCTracer {
     intptr_t allocation_in_bytes_;
   };
 
+
+  class ContextDisposalEvent {
+   public:
+    // Default constructor leaves the event uninitialized.
+    ContextDisposalEvent() {}
+
+    explicit ContextDisposalEvent(double time);
+
+    // Time when context disposal event happened.
+    double time_;
+  };
+
+
+  class SurvivalEvent {
+   public:
+    // Default constructor leaves the event uninitialized.
+    SurvivalEvent() {}
+
+    explicit SurvivalEvent(double survival_rate);
+
+    double survival_rate_;
+  };
+
+
   class Event {
    public:
-    enum Type { SCAVENGER = 0, MARK_COMPACTOR = 1, START = 2 };
+    enum Type {
+      SCAVENGER = 0,
+      MARK_COMPACTOR = 1,
+      INCREMENTAL_MARK_COMPACTOR = 2,
+      START = 3
+    };
 
     // Default constructor leaves the event uninitialized.
     Event() {}
@@ -198,7 +233,8 @@ class GCTracer {
 
     // Incremental marking steps since
     // - last event for SCAVENGER events
-    // - last MARK_COMPACTOR event for MARK_COMPACTOR events
+    // - last INCREMENTAL_MARK_COMPACTOR event for INCREMENTAL_MARK_COMPACTOR
+    // events
     int incremental_marking_steps;
 
     // Bytes marked since creation of tracer (value at start of event).
@@ -206,7 +242,8 @@ class GCTracer {
 
     // Bytes marked since
     // - last event for SCAVENGER events
-    // - last MARK_COMPACTOR event for MARK_COMPACTOR events
+    // - last INCREMENTAL_MARK_COMPACTOR event for INCREMENTAL_MARK_COMPACTOR
+    // events
     intptr_t incremental_marking_bytes;
 
     // Cumulative duration of incremental marking steps since creation of
@@ -215,7 +252,8 @@ class GCTracer {
 
     // Duration of incremental marking steps since
     // - last event for SCAVENGER events
-    // - last MARK_COMPACTOR event for MARK_COMPACTOR events
+    // - last INCREMENTAL_MARK_COMPACTOR event for INCREMENTAL_MARK_COMPACTOR
+    // events
     double incremental_marking_duration;
 
     // Cumulative pure duration of incremental marking steps since creation of
@@ -224,7 +262,8 @@ class GCTracer {
 
     // Duration of pure incremental marking steps since
     // - last event for SCAVENGER events
-    // - last MARK_COMPACTOR event for MARK_COMPACTOR events
+    // - last INCREMENTAL_MARK_COMPACTOR event for INCREMENTAL_MARK_COMPACTOR
+    // events
     double pure_incremental_marking_duration;
 
     // Longest incremental marking step since start of marking.
@@ -235,12 +274,17 @@ class GCTracer {
     double scopes[Scope::NUMBER_OF_SCOPES];
   };
 
-  static const int kRingBufferMaxSize = 10;
+  static const size_t kRingBufferMaxSize = 10;
 
   typedef RingBuffer<Event, kRingBufferMaxSize> EventBuffer;
 
   typedef RingBuffer<AllocationEvent, kRingBufferMaxSize> AllocationEventBuffer;
 
+  typedef RingBuffer<ContextDisposalEvent, kRingBufferMaxSize>
+      ContextDisposalEventBuffer;
+
+  typedef RingBuffer<SurvivalEvent, kRingBufferMaxSize> SurvivalEventBuffer;
+
   explicit GCTracer(Heap* heap);
 
   // Start collecting data.
@@ -248,11 +292,15 @@ class GCTracer {
              const char* collector_reason);
 
   // Stop collecting data and print results.
-  void Stop();
+  void Stop(GarbageCollector collector);
 
   // Log an allocation throughput event.
   void AddNewSpaceAllocationTime(double duration, intptr_t allocation_in_bytes);
 
+  void AddContextDisposalTime(double time);
+
+  void AddSurvivalRate(double survival_rate);
+
   // Log an incremental marking step.
   void AddIncrementalMarkingStep(double duration, intptr_t bytes);
 
@@ -298,6 +346,12 @@ class GCTracer {
     return MaxDuration(mark_compactor_events_);
   }
 
+  // Compute the mean duration of the last incremental mark compactor
+  // events. Returns 0 if no events have been recorded.
+  double MeanIncrementalMarkCompactorDuration() const {
+    return MeanDuration(incremental_mark_compactor_events_);
+  }
+
   // Compute the mean step duration of the last incremental marking round.
   // Returns 0 if no incremental marking round has been completed.
   double MeanIncrementalMarkingDuration() const;
@@ -314,14 +368,36 @@ class GCTracer {
   // Returns 0 if no events have been recorded.
   intptr_t ScavengeSpeedInBytesPerMillisecond() const;
 
-  // Compute the max mark-sweep speed in bytes/millisecond.
+  // Compute the average mark-sweep speed in bytes/millisecond.
   // Returns 0 if no events have been recorded.
   intptr_t MarkCompactSpeedInBytesPerMillisecond() const;
 
+  // Compute the average incremental mark-sweep finalize speed in
+  // bytes/millisecond.
+  // Returns 0 if no events have been recorded.
+  intptr_t FinalIncrementalMarkCompactSpeedInBytesPerMillisecond() const;
+
   // Allocation throughput in the new space in bytes/millisecond.
   // Returns 0 if no events have been recorded.
   intptr_t NewSpaceAllocationThroughputInBytesPerMillisecond() const;
 
+  // Computes the context disposal rate in milliseconds. It takes the time
+  // frame of the first recorded context disposal to the current time and
+  // divides it by the number of recorded events.
+  // Returns 0 if no events have been recorded.
+  double ContextDisposalRateInMilliseconds() const;
+
+  // Computes the average survival rate based on the last recorded survival
+  // events.
+  // Returns 0 if no events have been recorded.
+  double AverageSurvivalRate() const;
+
+  // Returns true if at least one survival event was recorded.
+  bool SurvivalEventsRecorded() const;
+
+  // Discard all recorded survival events.
+  void ResetSurvivalEvents();
+
  private:
   // Print one detailed trace line in name=value format.
   // TODO(ernstm): Move to Heap.
@@ -337,6 +413,16 @@ class GCTracer {
   // Compute the max duration of the events in the given ring buffer.
   double MaxDuration(const EventBuffer& events) const;
 
+  void ClearMarkCompactStatistics() {
+    cumulative_incremental_marking_steps_ = 0;
+    cumulative_incremental_marking_bytes_ = 0;
+    cumulative_incremental_marking_duration_ = 0;
+    cumulative_pure_incremental_marking_duration_ = 0;
+    longest_incremental_marking_step_ = 0;
+    cumulative_marking_duration_ = 0;
+    cumulative_sweeping_duration_ = 0;
+  }
+
   // Pointer to the heap that owns this tracer.
   Heap* heap_;
 
@@ -347,8 +433,8 @@ class GCTracer {
   // Previous tracer event.
   Event previous_;
 
-  // Previous MARK_COMPACTOR event.
-  Event previous_mark_compactor_event_;
+  // Previous INCREMENTAL_MARK_COMPACTOR event.
+  Event previous_incremental_mark_compactor_event_;
 
   // RingBuffers for SCAVENGER events.
   EventBuffer scavenger_events_;
@@ -356,9 +442,18 @@ class GCTracer {
   // RingBuffers for MARK_COMPACTOR events.
   EventBuffer mark_compactor_events_;
 
+  // RingBuffers for INCREMENTAL_MARK_COMPACTOR events.
+  EventBuffer incremental_mark_compactor_events_;
+
   // RingBuffer for allocation events.
   AllocationEventBuffer allocation_events_;
 
+  // RingBuffer for context disposal events.
+  ContextDisposalEventBuffer context_disposal_events_;
+
+  // RingBuffer for survival events.
+  SurvivalEventBuffer survival_events_;
+
   // Cumulative number of incremental marking steps since creation of tracer.
   int cumulative_incremental_marking_steps_;
 
@@ -393,6 +488,9 @@ class GCTracer {
   // collection.
   intptr_t new_space_top_after_gc_;
 
+  // Counts how many tracers were started without stopping.
+  int start_counter_;
+
   DISALLOW_COPY_AND_ASSIGN(GCTracer);
 };
 }
index 48e928d..549ecbc 100644 (file)
@@ -587,7 +587,7 @@ bool Heap::CollectGarbage(AllocationSpace space, const char* gc_reason,
 Isolate* Heap::isolate() {
   return reinterpret_cast<Isolate*>(
       reinterpret_cast<intptr_t>(this) -
-      reinterpret_cast<size_t>(reinterpret_cast<Isolate*>(4)->heap()) + 4);
+      reinterpret_cast<size_t>(reinterpret_cast<Isolate*>(16)->heap()) + 16);
 }
 
 
index 166dd3a..cf67dab 100644 (file)
@@ -63,6 +63,8 @@ Heap::Heap()
       initial_semispace_size_(Page::kPageSize),
       target_semispace_size_(Page::kPageSize),
       max_old_generation_size_(700ul * (kPointerSize / 4) * MB),
+      initial_old_generation_size_(max_old_generation_size_ / 2),
+      old_generation_size_configured_(false),
       max_executable_size_(256ul * (kPointerSize / 4) * MB),
       // Variables set based on semispace_size_ and old_generation_size_ in
       // ConfigureHeap.
@@ -70,6 +72,7 @@ Heap::Heap()
       // generation can be aligned to its size.
       maximum_committed_(0),
       survived_since_last_expansion_(0),
+      survived_last_scavenge_(0),
       sweep_generation_(0),
       always_allocate_scope_depth_(0),
       contexts_disposed_(0),
@@ -96,7 +99,7 @@ Heap::Heap()
 #ifdef DEBUG
       allocation_timeout_(0),
 #endif  // DEBUG
-      old_generation_allocation_limit_(kMinimumOldGenerationAllocationLimit),
+      old_generation_allocation_limit_(initial_old_generation_size_),
       old_gen_exhausted_(false),
       inline_allocation_disabled_(false),
       store_buffer_rebuilder_(store_buffer()),
@@ -106,8 +109,9 @@ Heap::Heap()
       tracer_(this),
       high_survival_rate_period_length_(0),
       promoted_objects_size_(0),
-      promotion_rate_(0),
+      promotion_ratio_(0),
       semi_space_copied_object_size_(0),
+      previous_semi_space_copied_object_size_(0),
       semi_space_copied_rate_(0),
       nodes_died_in_new_space_(0),
       nodes_copied_in_new_space_(0),
@@ -119,6 +123,7 @@ Heap::Heap()
       min_in_mutator_(kMaxInt),
       marking_time_(0.0),
       sweeping_time_(0.0),
+      last_idle_notification_time_(0.0),
       mark_compact_collector_(this),
       store_buffer_(this),
       marking_(this),
@@ -431,6 +436,7 @@ void Heap::GarbageCollectionPrologue() {
 
   // Reset GC statistics.
   promoted_objects_size_ = 0;
+  previous_semi_space_copied_object_size_ = semi_space_copied_object_size_;
   semi_space_copied_object_size_ = 0;
   nodes_died_in_new_space_ = 0;
   nodes_copied_in_new_space_ = 0;
@@ -767,7 +773,6 @@ void Heap::CollectAllAvailableGarbage(const char* gc_reason) {
   mark_compact_collector()->SetFlags(kNoGCFlags);
   new_space_.Shrink();
   UncommitFromSpace();
-  incremental_marking()->UncommitMarkingDeque();
 }
 
 
@@ -844,7 +849,7 @@ bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason,
     }
 
     GarbageCollectionEpilogue();
-    tracer()->Stop();
+    tracer()->Stop(collector);
   }
 
   // Start incremental marking for the next cycle. The heap snapshot
@@ -858,13 +863,18 @@ bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason,
 }
 
 
-int Heap::NotifyContextDisposed() {
+int Heap::NotifyContextDisposed(bool dependant_context) {
+  if (!dependant_context) {
+    tracer()->ResetSurvivalEvents();
+    old_generation_size_configured_ = false;
+  }
   if (isolate()->concurrent_recompilation_enabled()) {
     // Flush the queued recompilation tasks.
     isolate()->optimizing_compiler_thread()->Flush();
   }
   flush_monomorphic_ics_ = true;
   AgeInlineCaches();
+  tracer()->AddContextDisposalTime(base::OS::TimeCurrentMillis());
   return ++contexts_disposed_;
 }
 
@@ -1034,14 +1044,23 @@ void Heap::ClearNormalizedMapCaches() {
 void Heap::UpdateSurvivalStatistics(int start_new_space_size) {
   if (start_new_space_size == 0) return;
 
-  promotion_rate_ = (static_cast<double>(promoted_objects_size_) /
-                     static_cast<double>(start_new_space_size) * 100);
+  promotion_ratio_ = (static_cast<double>(promoted_objects_size_) /
+                      static_cast<double>(start_new_space_size) * 100);
+
+  if (previous_semi_space_copied_object_size_ > 0) {
+    promotion_rate_ =
+        (static_cast<double>(promoted_objects_size_) /
+         static_cast<double>(previous_semi_space_copied_object_size_) * 100);
+  } else {
+    promotion_rate_ = 0;
+  }
 
   semi_space_copied_rate_ =
       (static_cast<double>(semi_space_copied_object_size_) /
        static_cast<double>(start_new_space_size) * 100);
 
-  double survival_rate = promotion_rate_ + semi_space_copied_rate_;
+  double survival_rate = promotion_ratio_ + semi_space_copied_rate_;
+  tracer()->AddSurvivalRate(survival_rate);
 
   if (survival_rate > kYoungSurvivalRateHighThreshold) {
     high_survival_rate_period_length_++;
@@ -1099,11 +1118,13 @@ bool Heap::PerformGarbageCollection(
     old_generation_allocation_limit_ =
         OldGenerationAllocationLimit(PromotedSpaceSizeOfObjects(), 0);
     old_gen_exhausted_ = false;
+    old_generation_size_configured_ = true;
   } else {
     Scavenge();
   }
 
   UpdateSurvivalStatistics(start_new_space_size);
+  ConfigureInitialOldGenerationSize();
 
   isolate_->counters()->objs_since_last_young()->Set(0);
 
@@ -1131,6 +1152,9 @@ bool Heap::PerformGarbageCollection(
         amount_of_external_allocated_memory_;
     old_generation_allocation_limit_ = OldGenerationAllocationLimit(
         PromotedSpaceSizeOfObjects(), freed_global_handles);
+    // We finished a marking cycle. We can uncommit the marking deque until
+    // we start marking again.
+    mark_compact_collector_.UncommitMarkingDeque();
   }
 
   {
@@ -1205,15 +1229,22 @@ void Heap::MarkCompact() {
 
   LOG(isolate_, ResourceEvent("markcompact", "end"));
 
+  MarkCompactEpilogue();
+
+  if (FLAG_allocation_site_pretenuring) {
+    EvaluateOldSpaceLocalPretenuring(size_of_objects_before_gc);
+  }
+}
+
+
+void Heap::MarkCompactEpilogue() {
   gc_state_ = NOT_IN_GC;
 
   isolate_->counters()->objs_since_last_full()->Set(0);
 
   flush_monomorphic_ics_ = false;
 
-  if (FLAG_allocation_site_pretenuring) {
-    EvaluateOldSpaceLocalPretenuring(size_of_objects_before_gc);
-  }
+  incremental_marking()->Epilogue();
 }
 
 
@@ -1300,11 +1331,18 @@ static void VerifyNonPointerSpacePointers(Heap* heap) {
 
 
 void Heap::CheckNewSpaceExpansionCriteria() {
-  if (new_space_.TotalCapacity() < new_space_.MaximumCapacity() &&
-      survived_since_last_expansion_ > new_space_.TotalCapacity()) {
-    // Grow the size of new space if there is room to grow, enough data
-    // has survived scavenge since the last expansion and we are not in
-    // high promotion mode.
+  if (FLAG_experimental_new_space_growth_heuristic) {
+    if (new_space_.TotalCapacity() < new_space_.MaximumCapacity() &&
+        survived_last_scavenge_ * 100 / new_space_.TotalCapacity() >= 10) {
+      // Grow the size of new space if there is room to grow, and more than 10%
+      // have survived the last scavenge.
+      new_space_.Grow();
+      survived_since_last_expansion_ = 0;
+    }
+  } else if (new_space_.TotalCapacity() < new_space_.MaximumCapacity() &&
+             survived_since_last_expansion_ > new_space_.TotalCapacity()) {
+    // Grow the size of new space if there is room to grow, and enough data
+    // has survived scavenge since the last expansion.
     new_space_.Grow();
     survived_since_last_expansion_ = 0;
   }
@@ -1541,9 +1579,10 @@ void Heap::Scavenge() {
   isolate()->global_handles()->RemoveObjectGroups();
   isolate()->global_handles()->RemoveImplicitRefGroups();
 
-  isolate_->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
+  isolate()->global_handles()->IdentifyNewSpaceWeakIndependentHandles(
       &IsUnscavengedHeapObject);
-  isolate_->global_handles()->IterateNewSpaceWeakIndependentRoots(
+
+  isolate()->global_handles()->IterateNewSpaceWeakIndependentRoots(
       &scavenge_visitor);
   new_space_front = DoScavenge(&scavenge_visitor, new_space_front);
 
@@ -1648,6 +1687,10 @@ void Heap::ProcessWeakReferences(WeakObjectRetainer* retainer) {
   // TODO(mvstanton): AllocationSites only need to be processed during
   // MARK_COMPACT, as they live in old space. Verify and address.
   ProcessAllocationSites(retainer);
+  // Collects callback info for handles that are pending (about to be
+  // collected) and either phantom or internal-fields.  Releases the global
+  // handles.  See also PostGarbageCollectionProcessing.
+  isolate()->global_handles()->CollectPhantomCallbackData();
 }
 
 
@@ -1787,12 +1830,32 @@ Address Heap::DoScavenge(ObjectVisitor* scavenge_visitor,
         promotion_queue()->remove(&target, &size);
 
         // Promoted object might be already partially visited
-        // during old space pointer iteration. Thus we search specificly
+        // during old space pointer iteration. Thus we search specifically
         // for pointers to from semispace instead of looking for pointers
         // to new space.
         DCHECK(!target->IsMap());
-        IterateAndMarkPointersToFromSpace(
-            target->address(), target->address() + size, &ScavengeObject);
+        Address obj_address = target->address();
+#if V8_DOUBLE_FIELDS_UNBOXING
+        LayoutDescriptorHelper helper(target->map());
+        bool has_only_tagged_fields = helper.all_fields_tagged();
+
+        if (!has_only_tagged_fields) {
+          for (int offset = 0; offset < size;) {
+            int end_of_region_offset;
+            if (helper.IsTagged(offset, size, &end_of_region_offset)) {
+              IterateAndMarkPointersToFromSpace(
+                  obj_address + offset, obj_address + end_of_region_offset,
+                  &ScavengeObject);
+            }
+            offset = end_of_region_offset;
+          }
+        } else {
+#endif
+          IterateAndMarkPointersToFromSpace(obj_address, obj_address + size,
+                                            &ScavengeObject);
+#if V8_DOUBLE_FIELDS_UNBOXING
+        }
+#endif
       }
     }
 
@@ -1828,6 +1891,11 @@ static HeapObject* EnsureDoubleAligned(Heap* heap, HeapObject* object,
 }
 
 
+HeapObject* Heap::DoubleAlignForDeserialization(HeapObject* object, int size) {
+  return EnsureDoubleAligned(this, object, size);
+}
+
+
 enum LoggingAndProfiling {
   LOGGING_AND_PROFILING_ENABLED,
   LOGGING_AND_PROFILING_DISABLED
@@ -2297,6 +2365,17 @@ void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
 }
 
 
+void Heap::ConfigureInitialOldGenerationSize() {
+  if (!old_generation_size_configured_ && tracer()->SurvivalEventsRecorded()) {
+    old_generation_allocation_limit_ =
+        Max(kMinimumOldGenerationAllocationLimit,
+            static_cast<intptr_t>(
+                static_cast<double>(initial_old_generation_size_) *
+                (tracer()->AverageSurvivalRate() / 100)));
+  }
+}
+
+
 AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
                                           int instance_size) {
   Object* result;
@@ -2307,15 +2386,21 @@ AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
   reinterpret_cast<Map*>(result)->set_map(raw_unchecked_meta_map());
   reinterpret_cast<Map*>(result)->set_instance_type(instance_type);
   reinterpret_cast<Map*>(result)->set_instance_size(instance_size);
+  // Initialize to only containing tagged fields.
   reinterpret_cast<Map*>(result)->set_visitor_id(
-      StaticVisitorBase::GetVisitorId(instance_type, instance_size));
+      StaticVisitorBase::GetVisitorId(instance_type, instance_size, false));
+  if (FLAG_unbox_double_fields) {
+    reinterpret_cast<Map*>(result)
+        ->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
+  }
   reinterpret_cast<Map*>(result)->set_inobject_properties(0);
   reinterpret_cast<Map*>(result)->set_pre_allocated_property_fields(0);
   reinterpret_cast<Map*>(result)->set_unused_property_fields(0);
   reinterpret_cast<Map*>(result)->set_bit_field(0);
   reinterpret_cast<Map*>(result)->set_bit_field2(0);
   int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
-                   Map::OwnsDescriptors::encode(true);
+                   Map::OwnsDescriptors::encode(true) |
+                   Map::Counter::encode(Map::kRetainingCounterStart);
   reinterpret_cast<Map*>(result)->set_bit_field3(bit_field3);
   return result;
 }
@@ -2331,8 +2416,6 @@ AllocationResult Heap::AllocateMap(InstanceType instance_type,
   result->set_map_no_write_barrier(meta_map());
   Map* map = Map::cast(result);
   map->set_instance_type(instance_type);
-  map->set_visitor_id(
-      StaticVisitorBase::GetVisitorId(instance_type, instance_size));
   map->set_prototype(null_value(), SKIP_WRITE_BARRIER);
   map->set_constructor(null_value(), SKIP_WRITE_BARRIER);
   map->set_instance_size(instance_size);
@@ -2344,10 +2427,17 @@ AllocationResult Heap::AllocateMap(InstanceType instance_type,
   map->init_back_pointer(undefined_value());
   map->set_unused_property_fields(0);
   map->set_instance_descriptors(empty_descriptor_array());
+  if (FLAG_unbox_double_fields) {
+    map->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
+  }
+  // Must be called only after |instance_type|, |instance_size| and
+  // |layout_descriptor| are set.
+  map->set_visitor_id(StaticVisitorBase::GetVisitorId(map));
   map->set_bit_field(0);
   map->set_bit_field2(1 << Map::kIsExtensible);
   int bit_field3 = Map::EnumLengthBits::encode(kInvalidEnumCacheSentinel) |
-                   Map::OwnsDescriptors::encode(true);
+                   Map::OwnsDescriptors::encode(true) |
+                   Map::Counter::encode(Map::kRetainingCounterStart);
   map->set_bit_field3(bit_field3);
   map->set_elements_kind(elements_kind);
 
@@ -2470,28 +2560,46 @@ bool Heap::CreateInitialMaps() {
   meta_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
   meta_map()->init_back_pointer(undefined_value());
   meta_map()->set_instance_descriptors(empty_descriptor_array());
+  if (FLAG_unbox_double_fields) {
+    meta_map()->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
+  }
 
   fixed_array_map()->set_code_cache(empty_fixed_array());
   fixed_array_map()->set_dependent_code(
       DependentCode::cast(empty_fixed_array()));
   fixed_array_map()->init_back_pointer(undefined_value());
   fixed_array_map()->set_instance_descriptors(empty_descriptor_array());
+  if (FLAG_unbox_double_fields) {
+    fixed_array_map()->set_layout_descriptor(
+        LayoutDescriptor::FastPointerLayout());
+  }
 
   undefined_map()->set_code_cache(empty_fixed_array());
   undefined_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
   undefined_map()->init_back_pointer(undefined_value());
   undefined_map()->set_instance_descriptors(empty_descriptor_array());
+  if (FLAG_unbox_double_fields) {
+    undefined_map()->set_layout_descriptor(
+        LayoutDescriptor::FastPointerLayout());
+  }
 
   null_map()->set_code_cache(empty_fixed_array());
   null_map()->set_dependent_code(DependentCode::cast(empty_fixed_array()));
   null_map()->init_back_pointer(undefined_value());
   null_map()->set_instance_descriptors(empty_descriptor_array());
+  if (FLAG_unbox_double_fields) {
+    null_map()->set_layout_descriptor(LayoutDescriptor::FastPointerLayout());
+  }
 
   constant_pool_array_map()->set_code_cache(empty_fixed_array());
   constant_pool_array_map()->set_dependent_code(
       DependentCode::cast(empty_fixed_array()));
   constant_pool_array_map()->init_back_pointer(undefined_value());
   constant_pool_array_map()->set_instance_descriptors(empty_descriptor_array());
+  if (FLAG_unbox_double_fields) {
+    constant_pool_array_map()->set_layout_descriptor(
+        LayoutDescriptor::FastPointerLayout());
+  }
 
   // Fix prototype object for existing maps.
   meta_map()->set_prototype(null_value());
@@ -2607,7 +2715,8 @@ bool Heap::CreateInitialMaps() {
     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, with_context)
     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, block_context)
     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, module_context)
-    ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, global_context)
+    ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, script_context)
+    ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, script_context_table)
 
     ALLOCATE_VARSIZE_MAP(FIXED_ARRAY_TYPE, native_context)
     native_context_map()->set_dictionary_map(true);
@@ -2892,6 +3001,17 @@ void Heap::CreateInitialObjects() {
 #undef SYMBOL_INIT
   }
 
+  {
+    HandleScope scope(isolate());
+#define SYMBOL_INIT(name, varname, description)                             \
+  Handle<Symbol> name = factory->NewSymbol();                               \
+  Handle<String> name##d = factory->NewStringFromStaticChars(#description); \
+  name->set_name(*name##d);                                                 \
+  roots_[k##name##RootIndex] = *name;
+    PUBLIC_SYMBOL_LIST(SYMBOL_INIT)
+#undef SYMBOL_INIT
+  }
+
   CreateFixedStubs();
 
   // Allocate the dictionary of intrinsic function names.
@@ -4332,16 +4452,18 @@ void Heap::IdleMarkCompact(const char* message) {
 }
 
 
-void Heap::TryFinalizeIdleIncrementalMarking(
-    size_t idle_time_in_ms, size_t size_of_objects,
-    size_t mark_compact_speed_in_bytes_per_ms) {
+bool Heap::TryFinalizeIdleIncrementalMarking(
+    double idle_time_in_ms, size_t size_of_objects,
+    size_t final_incremental_mark_compact_speed_in_bytes_per_ms) {
   if (incremental_marking()->IsComplete() ||
-      (mark_compact_collector()->IsMarkingDequeEmpty() &&
-       gc_idle_time_handler_.ShouldDoMarkCompact(
-           idle_time_in_ms, size_of_objects,
-           mark_compact_speed_in_bytes_per_ms))) {
-    IdleMarkCompact("idle notification: finalize incremental");
+      (mark_compact_collector_.marking_deque()->IsEmpty() &&
+       gc_idle_time_handler_.ShouldDoFinalIncrementalMarkCompact(
+           static_cast<size_t>(idle_time_in_ms), size_of_objects,
+           final_incremental_mark_compact_speed_in_bytes_per_ms))) {
+    CollectAllGarbage(kNoGCFlags, "idle notification: finalize incremental");
+    return true;
   }
+  return false;
 }
 
 
@@ -4351,29 +4473,45 @@ bool Heap::WorthActivatingIncrementalMarking() {
 }
 
 
+static double MonotonicallyIncreasingTimeInMs() {
+  return V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() *
+         static_cast<double>(base::Time::kMillisecondsPerSecond);
+}
+
+
 bool Heap::IdleNotification(int idle_time_in_ms) {
-  // If incremental marking is off, we do not perform idle notification.
-  if (!FLAG_incremental_marking) return true;
-  base::ElapsedTimer timer;
-  timer.Start();
-  isolate()->counters()->gc_idle_time_allotted_in_ms()->AddSample(
-      idle_time_in_ms);
+  return IdleNotification(
+      V8::GetCurrentPlatform()->MonotonicallyIncreasingTime() +
+      (static_cast<double>(idle_time_in_ms) /
+       static_cast<double>(base::Time::kMillisecondsPerSecond)));
+}
+
+
+bool Heap::IdleNotification(double deadline_in_seconds) {
+  double deadline_in_ms =
+      deadline_in_seconds *
+      static_cast<double>(base::Time::kMillisecondsPerSecond);
   HistogramTimerScope idle_notification_scope(
       isolate_->counters()->gc_idle_notification());
 
   GCIdleTimeHandler::HeapState heap_state;
   heap_state.contexts_disposed = contexts_disposed_;
+  heap_state.contexts_disposal_rate =
+      tracer()->ContextDisposalRateInMilliseconds();
   heap_state.size_of_objects = static_cast<size_t>(SizeOfObjects());
   heap_state.incremental_marking_stopped = incremental_marking()->IsStopped();
   // TODO(ulan): Start incremental marking only for large heaps.
   heap_state.can_start_incremental_marking =
-      incremental_marking()->ShouldActivate();
+      incremental_marking()->ShouldActivate() && FLAG_incremental_marking;
   heap_state.sweeping_in_progress =
       mark_compact_collector()->sweeping_in_progress();
   heap_state.mark_compact_speed_in_bytes_per_ms =
       static_cast<size_t>(tracer()->MarkCompactSpeedInBytesPerMillisecond());
   heap_state.incremental_marking_speed_in_bytes_per_ms = static_cast<size_t>(
       tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
+  heap_state.final_incremental_mark_compact_speed_in_bytes_per_ms =
+      static_cast<size_t>(
+          tracer()->FinalIncrementalMarkCompactSpeedInBytesPerMillisecond());
   heap_state.scavenge_speed_in_bytes_per_ms =
       static_cast<size_t>(tracer()->ScavengeSpeedInBytesPerMillisecond());
   heap_state.used_new_space_size = new_space_.Size();
@@ -4382,11 +4520,13 @@ bool Heap::IdleNotification(int idle_time_in_ms) {
       static_cast<size_t>(
           tracer()->NewSpaceAllocationThroughputInBytesPerMillisecond());
 
+  double idle_time_in_ms = deadline_in_ms - MonotonicallyIncreasingTimeInMs();
   GCIdleTimeAction action =
       gc_idle_time_handler_.Compute(idle_time_in_ms, heap_state);
+  isolate()->counters()->gc_idle_time_allotted_in_ms()->AddSample(
+      static_cast<int>(idle_time_in_ms));
 
   bool result = false;
-  int actual_time_in_ms = 0;
   switch (action.type) {
     case DONE:
       result = true;
@@ -4395,24 +4535,29 @@ bool Heap::IdleNotification(int idle_time_in_ms) {
       if (incremental_marking()->IsStopped()) {
         incremental_marking()->Start();
       }
-      incremental_marking()->Step(action.parameter,
-                                  IncrementalMarking::NO_GC_VIA_STACK_GUARD,
-                                  IncrementalMarking::FORCE_MARKING,
-                                  IncrementalMarking::DO_NOT_FORCE_COMPLETION);
-      actual_time_in_ms = static_cast<int>(timer.Elapsed().InMilliseconds());
-      int remaining_idle_time_in_ms = idle_time_in_ms - actual_time_in_ms;
-      if (remaining_idle_time_in_ms > 0) {
-        TryFinalizeIdleIncrementalMarking(
+      double remaining_idle_time_in_ms = 0.0;
+      do {
+        incremental_marking()->Step(
+            action.parameter, IncrementalMarking::NO_GC_VIA_STACK_GUARD,
+            IncrementalMarking::FORCE_MARKING,
+            IncrementalMarking::DO_NOT_FORCE_COMPLETION);
+        remaining_idle_time_in_ms =
+            deadline_in_ms - MonotonicallyIncreasingTimeInMs();
+      } while (remaining_idle_time_in_ms >=
+                   2.0 * GCIdleTimeHandler::kIncrementalMarkingStepTimeInMs &&
+               !incremental_marking()->IsComplete() &&
+               !mark_compact_collector_.marking_deque()->IsEmpty());
+      if (remaining_idle_time_in_ms > 0.0) {
+        action.additional_work = TryFinalizeIdleIncrementalMarking(
             remaining_idle_time_in_ms, heap_state.size_of_objects,
-            heap_state.mark_compact_speed_in_bytes_per_ms);
+            heap_state.final_incremental_mark_compact_speed_in_bytes_per_ms);
       }
       break;
     }
     case DO_FULL_GC: {
-      HistogramTimerScope scope(isolate_->counters()->gc_context());
       if (contexts_disposed_) {
-        CollectAllGarbage(kReduceMemoryFootprintMask,
-                          "idle notification: contexts disposed");
+        HistogramTimerScope scope(isolate_->counters()->gc_context());
+        CollectAllGarbage(kNoGCFlags, "idle notification: contexts disposed");
         gc_idle_time_handler_.NotifyIdleMarkCompact();
         gc_count_at_last_idle_gc_ = gc_count_;
       } else {
@@ -4430,22 +4575,35 @@ bool Heap::IdleNotification(int idle_time_in_ms) {
       break;
   }
 
-  actual_time_in_ms = static_cast<int>(timer.Elapsed().InMilliseconds());
-  if (actual_time_in_ms <= idle_time_in_ms) {
+  double current_time = MonotonicallyIncreasingTimeInMs();
+  last_idle_notification_time_ = current_time;
+  double deadline_difference = deadline_in_ms - current_time;
+
+  if (deadline_difference >= 0) {
     if (action.type != DONE && action.type != DO_NOTHING) {
       isolate()->counters()->gc_idle_time_limit_undershot()->AddSample(
-          idle_time_in_ms - actual_time_in_ms);
+          static_cast<int>(deadline_difference));
     }
   } else {
     isolate()->counters()->gc_idle_time_limit_overshot()->AddSample(
-        actual_time_in_ms - idle_time_in_ms);
+        static_cast<int>(-deadline_difference));
   }
 
-  if (FLAG_trace_idle_notification) {
-    PrintF("Idle notification: requested idle time %d ms, actual time %d ms [",
-           idle_time_in_ms, actual_time_in_ms);
+  if ((FLAG_trace_idle_notification && action.type > DO_NOTHING) ||
+      FLAG_trace_idle_notification_verbose) {
+    PrintF(
+        "Idle notification: requested idle time %.2f ms, used idle time %.2f "
+        "ms, deadline usage %.2f ms [",
+        idle_time_in_ms, idle_time_in_ms - deadline_difference,
+        deadline_difference);
     action.Print();
-    PrintF("]\n");
+    PrintF("]");
+    if (FLAG_trace_idle_notification_verbose) {
+      PrintF("[");
+      heap_state.Print();
+      PrintF("]");
+    }
+    PrintF("\n");
   }
 
   contexts_disposed_ = 0;
@@ -4453,6 +4611,13 @@ bool Heap::IdleNotification(int idle_time_in_ms) {
 }
 
 
+bool Heap::RecentIdleNotifcationHappened() {
+  return (last_idle_notification_time_ +
+          GCIdleTimeHandler::kMaxFrameRenderingIdleTime) >
+         MonotonicallyIncreasingTimeInMs();
+}
+
+
 #ifdef DEBUG
 
 void Heap::Print() {
@@ -5013,12 +5178,23 @@ bool Heap::ConfigureHeap(int max_semi_space_size, int max_old_space_size,
 
   target_semispace_size_ = Max(initial_semispace_size_, target_semispace_size_);
 
+  if (FLAG_semi_space_growth_factor < 2) {
+    FLAG_semi_space_growth_factor = 2;
+  }
+
   // The old generation is paged and needs at least one page for each space.
   int paged_space_count = LAST_PAGED_SPACE - FIRST_PAGED_SPACE + 1;
   max_old_generation_size_ =
       Max(static_cast<intptr_t>(paged_space_count * Page::kPageSize),
           max_old_generation_size_);
 
+  if (FLAG_initial_old_space_size > 0) {
+    initial_old_generation_size_ = FLAG_initial_old_space_size * MB;
+  } else {
+    initial_old_generation_size_ = max_old_generation_size_ / 2;
+  }
+  old_generation_allocation_limit_ = initial_old_generation_size_;
+
   // We rely on being able to allocate new arrays in paged spaces.
   DCHECK(Page::kMaxRegularHeapObjectSize >=
          (JSArray::kSize +
@@ -5401,7 +5577,6 @@ void Heap::TearDown() {
   }
 
   store_buffer()->TearDown();
-  incremental_marking()->TearDown();
 
   isolate_->memory_allocator()->TearDown();
 }
index ee1fca9..3027131 100644 (file)
@@ -148,7 +148,8 @@ namespace internal {
   V(Map, with_context_map, WithContextMap)                                     \
   V(Map, block_context_map, BlockContextMap)                                   \
   V(Map, module_context_map, ModuleContextMap)                                 \
-  V(Map, global_context_map, GlobalContextMap)                                 \
+  V(Map, script_context_map, ScriptContextMap)                                 \
+  V(Map, script_context_table_map, ScriptContextTableMap)                      \
   V(Map, undefined_map, UndefinedMap)                                          \
   V(Map, the_hole_map, TheHoleMap)                                             \
   V(Map, null_map, NullMap)                                                    \
@@ -209,7 +210,6 @@ namespace internal {
   V(callee_string, "callee")                               \
   V(constructor_string, "constructor")                     \
   V(dot_result_string, ".result")                          \
-  V(dot_for_string, ".for.")                               \
   V(eval_string, "eval")                                   \
   V(empty_string, "")                                      \
   V(function_string, "function")                           \
@@ -279,9 +279,12 @@ namespace internal {
   V(RegExp_string, "RegExp")
 
 #define PRIVATE_SYMBOL_LIST(V)      \
+  V(nonextensible_symbol)           \
+  V(sealed_symbol)                  \
   V(frozen_symbol)                  \
   V(nonexistent_symbol)             \
   V(elements_transition_symbol)     \
+  V(prototype_users_symbol)         \
   V(observed_symbol)                \
   V(uninitialized_symbol)           \
   V(megamorphic_symbol)             \
@@ -299,6 +302,15 @@ namespace internal {
   V(class_start_position_symbol)    \
   V(class_end_position_symbol)
 
+#define PUBLIC_SYMBOL_LIST(V)                                    \
+  V(has_instance_symbol, symbolHasInstance, Symbol.hasInstance)  \
+  V(is_concat_spreadable_symbol, symbolIsConcatSpreadable,       \
+    Symbol.isConcatSpreadable)                                   \
+  V(is_regexp_symbol, symbolIsRegExp, Symbol.isRegExp)           \
+  V(iterator_symbol, symbolIterator, Symbol.iterator)            \
+  V(to_string_tag_symbol, symbolToStringTag, Symbol.toStringTag) \
+  V(unscopables_symbol, symbolUnscopables, Symbol.unscopables)
+
 // Heap roots that are known to be immortal immovable, for which we can safely
 // skip write barriers. This list is not complete and has omissions.
 #define IMMORTAL_IMMOVABLE_ROOT_LIST(V) \
@@ -341,7 +353,7 @@ namespace internal {
   V(WithContextMap)                     \
   V(BlockContextMap)                    \
   V(ModuleContextMap)                   \
-  V(GlobalContextMap)                   \
+  V(ScriptContextMap)                   \
   V(UndefinedMap)                       \
   V(TheHoleMap)                         \
   V(NullMap)                            \
@@ -755,7 +767,7 @@ class Heap {
   bool IsHeapIterable();
 
   // Notify the heap that a context has been disposed.
-  int NotifyContextDisposed();
+  int NotifyContextDisposed(bool dependant_context);
 
   inline void increment_scan_on_scavenge_pages() {
     scan_on_scavenge_pages_++;
@@ -809,6 +821,11 @@ class Heap {
   PRIVATE_SYMBOL_LIST(SYMBOL_ACCESSOR)
 #undef SYMBOL_ACCESSOR
 
+#define SYMBOL_ACCESSOR(name, varname, description) \
+  Symbol* name() { return Symbol::cast(roots_[k##name##RootIndex]); }
+  PUBLIC_SYMBOL_LIST(SYMBOL_ACCESSOR)
+#undef SYMBOL_ACCESSOR
+
   // The hidden_string is special because it is the empty string, but does
   // not match the empty string.
   String* hidden_string() { return hidden_string_; }
@@ -1090,6 +1107,7 @@ class Heap {
   void DisableInlineAllocation();
 
   // Implements the corresponding V8 API function.
+  bool IdleNotification(double deadline_in_seconds);
   bool IdleNotification(int idle_time_in_ms);
 
   // Declare all the root indices.  This defines the root list order.
@@ -1106,6 +1124,10 @@ class Heap {
     PRIVATE_SYMBOL_LIST(SYMBOL_INDEX_DECLARATION)
 #undef SYMBOL_INDEX_DECLARATION
 
+#define SYMBOL_INDEX_DECLARATION(name, varname, description) k##name##RootIndex,
+    PUBLIC_SYMBOL_LIST(SYMBOL_INDEX_DECLARATION)
+#undef SYMBOL_INDEX_DECLARATION
+
 // Utility type maps
 #define DECLARE_STRUCT_MAP(NAME, Name, name) k##Name##MapRootIndex,
     STRUCT_LIST(DECLARE_STRUCT_MAP)
@@ -1177,6 +1199,7 @@ class Heap {
 
   inline void IncrementYoungSurvivorsCounter(int survived) {
     DCHECK(survived >= 0);
+    survived_last_scavenge_ = survived;
     survived_since_last_expansion_ += survived;
   }
 
@@ -1275,6 +1298,8 @@ class Heap {
 
   int gc_count() const { return gc_count_; }
 
+  bool RecentIdleNotifcationHappened();
+
   // Completely clear the Instanceof cache (to stop it keeping objects alive
   // around a GC).
   inline void CompletelyClearInstanceofCache();
@@ -1481,6 +1506,8 @@ class Heap {
   int initial_semispace_size_;
   int target_semispace_size_;
   intptr_t max_old_generation_size_;
+  intptr_t initial_old_generation_size_;
+  bool old_generation_size_configured_;
   intptr_t max_executable_size_;
   intptr_t maximum_committed_;
 
@@ -1488,6 +1515,9 @@ class Heap {
   // scavenge since last new space expansion.
   int survived_since_last_expansion_;
 
+  // ... and since the last scavenge.
+  int survived_last_scavenge_;
+
   // For keeping track on when to flush RegExp code.
   int sweep_generation_;
 
@@ -1708,6 +1738,8 @@ class Heap {
     return (pretenure == TENURED) ? preferred_old_space : NEW_SPACE;
   }
 
+  HeapObject* DoubleAlignForDeserialization(HeapObject* object, int size);
+
   // Allocate an uninitialized object.  The memory is non-executable if the
   // hardware and OS allow.  This is the single choke-point for allocations
   // performed by the runtime and should not be bypassed (to extend this to
@@ -1909,6 +1941,7 @@ class Heap {
 
   // Code to be run before and after mark-compact.
   void MarkCompactPrologue();
+  void MarkCompactEpilogue();
 
   void ProcessNativeContexts(WeakObjectRetainer* retainer);
   void ProcessArrayBuffers(WeakObjectRetainer* retainer);
@@ -1962,8 +1995,10 @@ class Heap {
 
   int high_survival_rate_period_length_;
   intptr_t promoted_objects_size_;
+  double promotion_ratio_;
   double promotion_rate_;
   intptr_t semi_space_copied_object_size_;
+  intptr_t previous_semi_space_copied_object_size_;
   double semi_space_copied_rate_;
   int nodes_died_in_new_space_;
   int nodes_copied_in_new_space_;
@@ -1979,12 +2014,14 @@ class Heap {
   // Re-visit incremental marking heuristics.
   bool IsHighSurvivalRate() { return high_survival_rate_period_length_ > 0; }
 
+  void ConfigureInitialOldGenerationSize();
+
   void SelectScavengingVisitorsTable();
 
   void IdleMarkCompact(const char* message);
 
-  void TryFinalizeIdleIncrementalMarking(
-      size_t idle_time_in_ms, size_t size_of_objects,
+  bool TryFinalizeIdleIncrementalMarking(
+      double idle_time_in_ms, size_t size_of_objects,
       size_t mark_compact_speed_in_bytes_per_ms);
 
   bool WorthActivatingIncrementalMarking();
@@ -2032,6 +2069,9 @@ class Heap {
   // Cumulative GC time spent in sweeping
   double sweeping_time_;
 
+  // Last time an idle notification happened
+  double last_idle_notification_time_;
+
   MarkCompactCollector mark_compact_collector_;
 
   StoreBuffer store_buffer_;
index 5258c5c..496e02d 100644 (file)
@@ -103,13 +103,13 @@ void IncrementalMarking::BlackToGreyAndUnshift(HeapObject* obj,
     }
   }
 
-  marking_deque_.UnshiftGrey(obj);
+  heap_->mark_compact_collector()->marking_deque()->UnshiftGrey(obj);
 }
 
 
 void IncrementalMarking::WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit) {
   Marking::WhiteToGrey(mark_bit);
-  marking_deque_.PushGrey(obj);
+  heap_->mark_compact_collector()->marking_deque()->PushGrey(obj);
 }
 }
 }  // namespace v8::internal
index dde0621..33f9de0 100644 (file)
@@ -19,8 +19,6 @@ namespace internal {
 IncrementalMarking::IncrementalMarking(Heap* heap)
     : heap_(heap),
       state_(STOPPED),
-      marking_deque_memory_(NULL),
-      marking_deque_memory_committed_(false),
       steps_count_(0),
       old_generation_space_available_at_start_of_incremental_(0),
       old_generation_space_used_at_start_of_incremental_(0),
@@ -29,10 +27,8 @@ IncrementalMarking::IncrementalMarking(Heap* heap)
       allocated_(0),
       idle_marking_delay_counter_(0),
       no_marking_scope_depth_(0),
-      unscanned_bytes_of_large_object_(0) {}
-
-
-void IncrementalMarking::TearDown() { delete marking_deque_memory_; }
+      unscanned_bytes_of_large_object_(0),
+      was_activated_(false) {}
 
 
 void IncrementalMarking::RecordWriteSlow(HeapObject* obj, Object** slot,
@@ -195,11 +191,12 @@ class IncrementalMarkingMarkingVisitor
                                 HeapObject::RawField(object, end_offset));
         start_offset = end_offset;
         end_offset = Min(object_size, end_offset + kProgressBarScanningChunk);
-        scan_until_end = heap->incremental_marking()->marking_deque()->IsFull();
+        scan_until_end =
+            heap->mark_compact_collector()->marking_deque()->IsFull();
       } while (scan_until_end && start_offset < object_size);
       chunk->set_progress_bar(start_offset);
       if (start_offset < object_size) {
-        heap->incremental_marking()->marking_deque()->UnshiftGrey(object);
+        heap->mark_compact_collector()->marking_deque()->UnshiftGrey(object);
         heap->incremental_marking()->NotifyIncompleteScanOfObject(
             object_size - (start_offset - already_scanned_offset));
       }
@@ -427,6 +424,9 @@ bool IncrementalMarking::ShouldActivate() {
 }
 
 
+bool IncrementalMarking::WasActivated() { return was_activated_; }
+
+
 bool IncrementalMarking::WorthActivating() {
 #ifndef DEBUG
   static const intptr_t kActivationThreshold = 8 * MB;
@@ -482,32 +482,6 @@ static void PatchIncrementalMarkingRecordWriteStubs(
 }
 
 
-void IncrementalMarking::EnsureMarkingDequeIsCommitted() {
-  if (marking_deque_memory_ == NULL) {
-    marking_deque_memory_ = new base::VirtualMemory(4 * MB);
-  }
-  if (!marking_deque_memory_committed_) {
-    bool success = marking_deque_memory_->Commit(
-        reinterpret_cast<Address>(marking_deque_memory_->address()),
-        marking_deque_memory_->size(),
-        false);  // Not executable.
-    CHECK(success);
-    marking_deque_memory_committed_ = true;
-  }
-}
-
-
-void IncrementalMarking::UncommitMarkingDeque() {
-  if (state_ == STOPPED && marking_deque_memory_committed_) {
-    bool success = marking_deque_memory_->Uncommit(
-        reinterpret_cast<Address>(marking_deque_memory_->address()),
-        marking_deque_memory_->size());
-    CHECK(success);
-    marking_deque_memory_committed_ = false;
-  }
-}
-
-
 void IncrementalMarking::Start(CompactionFlag flag) {
   if (FLAG_trace_incremental_marking) {
     PrintF("[IncrementalMarking] Start\n");
@@ -520,6 +494,8 @@ void IncrementalMarking::Start(CompactionFlag flag) {
 
   ResetStepCounters();
 
+  was_activated_ = true;
+
   if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
     StartMarking(flag);
   } else {
@@ -550,13 +526,7 @@ void IncrementalMarking::StartMarking(CompactionFlag flag) {
 
   PatchIncrementalMarkingRecordWriteStubs(heap_, mode);
 
-  EnsureMarkingDequeIsCommitted();
-
-  // Initialize marking stack.
-  Address addr = static_cast<Address>(marking_deque_memory_->address());
-  size_t size = marking_deque_memory_->size();
-  if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize;
-  marking_deque_.Initialize(addr, addr + size);
+  heap_->mark_compact_collector()->EnsureMarkingDequeIsCommittedAndInitialize();
 
   ActivateIncrementalWriteBarrier();
 
@@ -602,10 +572,12 @@ void IncrementalMarking::PrepareForScavenge() {
 void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
   if (!IsMarking()) return;
 
-  int current = marking_deque_.bottom();
-  int mask = marking_deque_.mask();
-  int limit = marking_deque_.top();
-  HeapObject** array = marking_deque_.array();
+  MarkingDeque* marking_deque =
+      heap_->mark_compact_collector()->marking_deque();
+  int current = marking_deque->bottom();
+  int mask = marking_deque->mask();
+  int limit = marking_deque->top();
+  HeapObject** array = marking_deque->array();
   int new_top = current;
 
   Map* filler_map = heap_->one_pointer_filler_map();
@@ -620,7 +592,7 @@ void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
         HeapObject* dest = map_word.ToForwardingAddress();
         array[new_top] = dest;
         new_top = ((new_top + 1) & mask);
-        DCHECK(new_top != marking_deque_.bottom());
+        DCHECK(new_top != marking_deque->bottom());
 #ifdef DEBUG
         MarkBit mark_bit = Marking::MarkBitFrom(obj);
         DCHECK(Marking::IsGrey(mark_bit) ||
@@ -632,7 +604,7 @@ void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
       // stack when we perform in place array shift.
       array[new_top] = obj;
       new_top = ((new_top + 1) & mask);
-      DCHECK(new_top != marking_deque_.bottom());
+      DCHECK(new_top != marking_deque->bottom());
 #ifdef DEBUG
       MarkBit mark_bit = Marking::MarkBitFrom(obj);
       MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
@@ -643,7 +615,7 @@ void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
 #endif
     }
   }
-  marking_deque_.set_top(new_top);
+  marking_deque->set_top(new_top);
 }
 
 
@@ -670,8 +642,10 @@ void IncrementalMarking::VisitObject(Map* map, HeapObject* obj, int size) {
 intptr_t IncrementalMarking::ProcessMarkingDeque(intptr_t bytes_to_process) {
   intptr_t bytes_processed = 0;
   Map* filler_map = heap_->one_pointer_filler_map();
-  while (!marking_deque_.IsEmpty() && bytes_processed < bytes_to_process) {
-    HeapObject* obj = marking_deque_.Pop();
+  MarkingDeque* marking_deque =
+      heap_->mark_compact_collector()->marking_deque();
+  while (!marking_deque->IsEmpty() && bytes_processed < bytes_to_process) {
+    HeapObject* obj = marking_deque->Pop();
 
     // Explicitly skip one word fillers. Incremental markbit patterns are
     // correct only for objects that occupy at least two words.
@@ -692,8 +666,10 @@ intptr_t IncrementalMarking::ProcessMarkingDeque(intptr_t bytes_to_process) {
 
 void IncrementalMarking::ProcessMarkingDeque() {
   Map* filler_map = heap_->one_pointer_filler_map();
-  while (!marking_deque_.IsEmpty()) {
-    HeapObject* obj = marking_deque_.Pop();
+  MarkingDeque* marking_deque =
+      heap_->mark_compact_collector()->marking_deque();
+  while (!marking_deque->IsEmpty()) {
+    HeapObject* obj = marking_deque->Pop();
 
     // Explicitly skip one word fillers. Incremental markbit patterns are
     // correct only for objects that occupy at least two words.
@@ -793,7 +769,7 @@ void IncrementalMarking::Finalize() {
   PatchIncrementalMarkingRecordWriteStubs(heap_,
                                           RecordWriteStub::STORE_BUFFER_ONLY);
   DeactivateIncrementalWriteBarrier();
-  DCHECK(marking_deque_.IsEmpty());
+  DCHECK(heap_->mark_compact_collector()->marking_deque()->IsEmpty());
   heap_->isolate()->stack_guard()->ClearGC();
 }
 
@@ -815,6 +791,9 @@ void IncrementalMarking::MarkingComplete(CompletionAction action) {
 }
 
 
+void IncrementalMarking::Epilogue() { was_activated_ = false; }
+
+
 void IncrementalMarking::OldSpaceStep(intptr_t allocated) {
   if (IsStopped() && ShouldActivate()) {
     // TODO(hpayer): Let's play safe for now, but compaction should be
@@ -910,6 +889,12 @@ intptr_t IncrementalMarking::Step(intptr_t allocated_bytes,
     return 0;
   }
 
+  // If an idle notification happened recently, we delay marking steps.
+  if (marking == DO_NOT_FORCE_MARKING &&
+      heap_->RecentIdleNotifcationHappened()) {
+    return 0;
+  }
+
   if (state_ == MARKING && no_marking_scope_depth_ > 0) return 0;
 
   intptr_t bytes_processed = 0;
@@ -936,7 +921,8 @@ intptr_t IncrementalMarking::Step(intptr_t allocated_bytes,
 
     if (state_ == SWEEPING) {
       if (heap_->mark_compact_collector()->sweeping_in_progress() &&
-          heap_->mark_compact_collector()->IsSweepingCompleted()) {
+          (heap_->mark_compact_collector()->IsSweepingCompleted() ||
+           !FLAG_concurrent_sweeping)) {
         heap_->mark_compact_collector()->EnsureSweepingCompleted();
       }
       if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
@@ -945,7 +931,7 @@ intptr_t IncrementalMarking::Step(intptr_t allocated_bytes,
       }
     } else if (state_ == MARKING) {
       bytes_processed = ProcessMarkingDeque(bytes_to_process);
-      if (marking_deque_.IsEmpty()) {
+      if (heap_->mark_compact_collector()->marking_deque()->IsEmpty()) {
         if (completion == FORCE_COMPLETION ||
             IsIdleMarkingDelayCounterLimitReached()) {
           MarkingComplete(action);
index 96f8c6b..56c5a24 100644 (file)
@@ -28,8 +28,6 @@ class IncrementalMarking {
 
   static void Initialize();
 
-  void TearDown();
-
   State state() {
     DCHECK(state_ == STOPPED || FLAG_incremental_marking);
     return state_;
@@ -50,6 +48,8 @@ class IncrementalMarking {
 
   bool ShouldActivate();
 
+  bool WasActivated();
+
   enum CompactionFlag { ALLOW_COMPACTION, PREVENT_COMPACTION };
 
   void Start(CompactionFlag flag = ALLOW_COMPACTION);
@@ -68,6 +68,8 @@ class IncrementalMarking {
 
   void MarkingComplete(CompletionAction action);
 
+  void Epilogue();
+
   // It's hard to know how much work the incremental marker should do to make
   // progress in the face of the mutator creating new work for it.  We start
   // of at a moderate rate of work and gradually increase the speed of the
@@ -144,8 +146,6 @@ class IncrementalMarking {
     SetNewSpacePageFlags(chunk, IsMarking());
   }
 
-  MarkingDeque* marking_deque() { return &marking_deque_; }
-
   bool IsCompacting() { return IsMarking() && is_compacting_; }
 
   void ActivateGeneratedStub(Code* stub);
@@ -168,8 +168,6 @@ class IncrementalMarking {
 
   void LeaveNoMarkingScope() { no_marking_scope_depth_--; }
 
-  void UncommitMarkingDeque();
-
   void NotifyIncompleteScanOfObject(int unscanned_bytes) {
     unscanned_bytes_of_large_object_ = unscanned_bytes;
   }
@@ -200,8 +198,6 @@ class IncrementalMarking {
 
   static void SetNewSpacePageFlags(NewSpacePage* chunk, bool is_marking);
 
-  void EnsureMarkingDequeIsCommitted();
-
   INLINE(void ProcessMarkingDeque());
 
   INLINE(intptr_t ProcessMarkingDeque(intptr_t bytes_to_process));
@@ -215,10 +211,6 @@ class IncrementalMarking {
   State state_;
   bool is_compacting_;
 
-  base::VirtualMemory* marking_deque_memory_;
-  bool marking_deque_memory_committed_;
-  MarkingDeque marking_deque_;
-
   int steps_count_;
   int64_t old_generation_space_available_at_start_of_incremental_;
   int64_t old_generation_space_used_at_start_of_incremental_;
@@ -234,6 +226,8 @@ class IncrementalMarking {
 
   int unscanned_bytes_of_large_object_;
 
+  bool was_activated_;
+
   DISALLOW_IMPLICIT_CONSTRUCTORS(IncrementalMarking);
 };
 }
index 2cefebf..fa127db 100644 (file)
@@ -47,9 +47,11 @@ MarkCompactCollector::MarkCompactCollector(Heap* heap)
       was_marked_incrementally_(false),
       sweeping_in_progress_(false),
       pending_sweeper_jobs_semaphore_(0),
-      sequential_sweeping_(false),
+      evacuation_(false),
       migration_slots_buffer_(NULL),
       heap_(heap),
+      marking_deque_memory_(NULL),
+      marking_deque_memory_committed_(false),
       code_flusher_(NULL),
       have_code_to_deoptimize_(false) {
 }
@@ -233,7 +235,10 @@ void MarkCompactCollector::SetUp() {
 }
 
 
-void MarkCompactCollector::TearDown() { AbortCompaction(); }
+void MarkCompactCollector::TearDown() {
+  AbortCompaction();
+  delete marking_deque_memory_;
+}
 
 
 void MarkCompactCollector::AddEvacuationCandidate(Page* p) {
@@ -304,6 +309,8 @@ void MarkCompactCollector::CollectGarbage() {
 
   heap_->set_encountered_weak_cells(Smi::FromInt(0));
 
+  isolate()->global_handles()->CollectPhantomCallbackData();
+
 #ifdef VERIFY_HEAP
   if (FLAG_verify_heap) {
     VerifyMarking(heap_);
@@ -378,7 +385,7 @@ void MarkCompactCollector::VerifyWeakEmbeddedObjectsInCode() {
   for (HeapObject* obj = code_iterator.Next(); obj != NULL;
        obj = code_iterator.Next()) {
     Code* code = Code::cast(obj);
-    if (!code->is_optimized_code() && !code->is_weak_stub()) continue;
+    if (!code->is_optimized_code()) continue;
     if (WillBeDeoptimized(code)) continue;
     code->VerifyEmbeddedObjectsDependency();
   }
@@ -441,7 +448,7 @@ class MarkCompactCollector::SweeperTask : public v8::Task {
 
  private:
   // v8::Task overrides.
-  virtual void Run() OVERRIDE {
+  void Run() OVERRIDE {
     heap_->mark_compact_collector()->SweepInParallel(space_, 0);
     heap_->mark_compact_collector()->pending_sweeper_jobs_semaphore_.Signal();
   }
@@ -470,12 +477,12 @@ void MarkCompactCollector::EnsureSweepingCompleted() {
 
   // If sweeping is not completed or not running at all, we try to complete it
   // here.
-  if (FLAG_predictable || !IsSweepingCompleted()) {
+  if (!FLAG_concurrent_sweeping || !IsSweepingCompleted()) {
     SweepInParallel(heap()->paged_space(OLD_DATA_SPACE), 0);
     SweepInParallel(heap()->paged_space(OLD_POINTER_SPACE), 0);
   }
   // Wait twice for both jobs.
-  if (!FLAG_predictable) {
+  if (FLAG_concurrent_sweeping) {
     pending_sweeper_jobs_semaphore_.Wait();
     pending_sweeper_jobs_semaphore_.Wait();
   }
@@ -487,7 +494,7 @@ void MarkCompactCollector::EnsureSweepingCompleted() {
   heap()->paged_space(OLD_POINTER_SPACE)->ResetUnsweptFreeBytes();
 
 #ifdef VERIFY_HEAP
-  if (FLAG_verify_heap) {
+  if (FLAG_verify_heap && !evacuation()) {
     VerifyEvacuation(heap_);
   }
 #endif
@@ -1206,7 +1213,6 @@ static inline HeapObject* ShortCircuitConsString(Object** p) {
   // except the maps for the object and its possible substrings might be
   // marked.
   HeapObject* object = HeapObject::cast(*p);
-  if (!FLAG_clever_optimizations) return object;
   Map* map = object->map();
   InstanceType type = map->instance_type();
   if (!IsShortcutCandidate(type)) return object;
@@ -1945,11 +1951,6 @@ void MarkCompactCollector::MarkAllocationSite(AllocationSite* site) {
 }
 
 
-bool MarkCompactCollector::IsMarkingDequeEmpty() {
-  return marking_deque_.IsEmpty();
-}
-
-
 void MarkCompactCollector::MarkRoots(RootMarkingVisitor* visitor) {
   // Mark the heap roots including global variables, stack variables,
   // etc., and all objects reachable from them.
@@ -2015,13 +2016,18 @@ void MarkCompactCollector::MarkWeakObjectToCodeTable() {
 // After: the marking stack is empty, and all objects reachable from the
 // marking stack have been marked, or are overflowed in the heap.
 void MarkCompactCollector::EmptyMarkingDeque() {
+  Map* filler_map = heap_->one_pointer_filler_map();
   while (!marking_deque_.IsEmpty()) {
     HeapObject* object = marking_deque_.Pop();
+    // Explicitly skip one word fillers. Incremental markbit patterns are
+    // correct only for objects that occupy at least two words.
+    Map* map = object->map();
+    if (map == filler_map) continue;
+
     DCHECK(object->IsHeapObject());
     DCHECK(heap()->Contains(object));
-    DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
+    DCHECK(!Marking::IsWhite(Marking::MarkBitFrom(object)));
 
-    Map* map = object->map();
     MarkBit map_mark = Marking::MarkBitFrom(map);
     MarkObject(map, map_mark);
 
@@ -2084,13 +2090,16 @@ void MarkCompactCollector::ProcessMarkingDeque() {
 
 // Mark all objects reachable (transitively) from objects on the marking
 // stack including references only considered in the atomic marking pause.
-void MarkCompactCollector::ProcessEphemeralMarking(ObjectVisitor* visitor) {
+void MarkCompactCollector::ProcessEphemeralMarking(
+    ObjectVisitor* visitor, bool only_process_harmony_weak_collections) {
   bool work_to_do = true;
-  DCHECK(marking_deque_.IsEmpty());
+  DCHECK(marking_deque_.IsEmpty() && !marking_deque_.overflowed());
   while (work_to_do) {
-    isolate()->global_handles()->IterateObjectGroups(
-        visitor, &IsUnmarkedHeapObjectWithHeap);
-    MarkImplicitRefGroups();
+    if (!only_process_harmony_weak_collections) {
+      isolate()->global_handles()->IterateObjectGroups(
+          visitor, &IsUnmarkedHeapObjectWithHeap);
+      MarkImplicitRefGroups();
+    }
     ProcessWeakCollections();
     work_to_do = !marking_deque_.IsEmpty();
     ProcessMarkingDeque();
@@ -2116,6 +2125,43 @@ void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
 }
 
 
+void MarkCompactCollector::EnsureMarkingDequeIsCommittedAndInitialize() {
+  if (marking_deque_memory_ == NULL) {
+    marking_deque_memory_ = new base::VirtualMemory(4 * MB);
+  }
+  if (!marking_deque_memory_committed_) {
+    bool success = marking_deque_memory_->Commit(
+        reinterpret_cast<Address>(marking_deque_memory_->address()),
+        marking_deque_memory_->size(),
+        false);  // Not executable.
+    CHECK(success);
+    marking_deque_memory_committed_ = true;
+    InitializeMarkingDeque();
+  }
+}
+
+
+void MarkCompactCollector::InitializeMarkingDeque() {
+  if (marking_deque_memory_committed_) {
+    Address addr = static_cast<Address>(marking_deque_memory_->address());
+    size_t size = marking_deque_memory_->size();
+    if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize;
+    marking_deque_.Initialize(addr, addr + size);
+  }
+}
+
+
+void MarkCompactCollector::UncommitMarkingDeque() {
+  if (marking_deque_memory_committed_) {
+    bool success = marking_deque_memory_->Uncommit(
+        reinterpret_cast<Address>(marking_deque_memory_->address()),
+        marking_deque_memory_->size());
+    CHECK(success);
+    marking_deque_memory_committed_ = false;
+  }
+}
+
+
 void MarkCompactCollector::MarkLiveObjects() {
   GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_MARK);
   double start_time = 0.0;
@@ -2127,42 +2173,21 @@ void MarkCompactCollector::MarkLiveObjects() {
   // with the C stack limit check.
   PostponeInterruptsScope postpone(isolate());
 
-  bool incremental_marking_overflowed = false;
   IncrementalMarking* incremental_marking = heap_->incremental_marking();
   if (was_marked_incrementally_) {
-    // Finalize the incremental marking and check whether we had an overflow.
-    // Both markers use grey color to mark overflowed objects so
-    // non-incremental marker can deal with them as if overflow
-    // occured during normal marking.
-    // But incremental marker uses a separate marking deque
-    // so we have to explicitly copy its overflow state.
     incremental_marking->Finalize();
-    incremental_marking_overflowed =
-        incremental_marking->marking_deque()->overflowed();
-    incremental_marking->marking_deque()->ClearOverflowed();
   } else {
     // Abort any pending incremental activities e.g. incremental sweeping.
     incremental_marking->Abort();
+    InitializeMarkingDeque();
   }
 
 #ifdef DEBUG
   DCHECK(state_ == PREPARE_GC);
   state_ = MARK_LIVE_OBJECTS;
 #endif
-  // The to space contains live objects, a page in from space is used as a
-  // marking stack.
-  Address marking_deque_start = heap()->new_space()->FromSpacePageLow();
-  Address marking_deque_end = heap()->new_space()->FromSpacePageHigh();
-  if (FLAG_force_marking_deque_overflows) {
-    marking_deque_end = marking_deque_start + 64 * kPointerSize;
-  }
-  marking_deque_.Initialize(marking_deque_start, marking_deque_end);
-  DCHECK(!marking_deque_.overflowed());
 
-  if (incremental_marking_overflowed) {
-    // There are overflowed objects left in the heap after incremental marking.
-    marking_deque_.SetOverflowed();
-  }
+  EnsureMarkingDequeIsCommittedAndInitialize();
 
   PrepareForCodeFlushing();
 
@@ -2199,30 +2224,35 @@ void MarkCompactCollector::MarkLiveObjects() {
 
   ProcessTopOptimizedFrame(&root_visitor);
 
-  // The objects reachable from the roots are marked, yet unreachable
-  // objects are unmarked.  Mark objects reachable due to host
-  // application specific logic or through Harmony weak maps.
-  ProcessEphemeralMarking(&root_visitor);
+  {
+    GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_WEAKCLOSURE);
+
+    // The objects reachable from the roots are marked, yet unreachable
+    // objects are unmarked.  Mark objects reachable due to host
+    // application specific logic or through Harmony weak maps.
+    ProcessEphemeralMarking(&root_visitor, false);
+
+    // The objects reachable from the roots, weak maps or object groups
+    // are marked. Objects pointed to only by weak global handles cannot be
+    // immediately reclaimed. Instead, we have to mark them as pending and mark
+    // objects reachable from them.
+    //
+    // First we identify nonlive weak handles and mark them as pending
+    // destruction.
+    heap()->isolate()->global_handles()->IdentifyWeakHandles(
+        &IsUnmarkedHeapObject);
+    // Then we mark the objects.
+    heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
+    ProcessMarkingDeque();
 
-  // The objects reachable from the roots, weak maps or object groups
-  // are marked, yet unreachable objects are unmarked.  Mark objects
-  // reachable only from weak global handles.
-  //
-  // First we identify nonlive weak handles and mark them as pending
-  // destruction.
-  heap()->isolate()->global_handles()->IdentifyWeakHandles(
-      &IsUnmarkedHeapObject);
-  // Then we mark the objects and process the transitive closure.
-  heap()->isolate()->global_handles()->IterateWeakRoots(&root_visitor);
-  while (marking_deque_.overflowed()) {
-    RefillMarkingDeque();
-    EmptyMarkingDeque();
+    // Repeat Harmony weak maps marking to mark unmarked objects reachable from
+    // the weak roots we just marked as pending destruction.
+    //
+    // We only process harmony collections, as all object groups have been fully
+    // processed and no weakly reachable node can discover new objects groups.
+    ProcessEphemeralMarking(&root_visitor, true);
   }
 
-  // Repeat host application specific and Harmony weak maps marking to
-  // mark unmarked objects reachable from the weak roots.
-  ProcessEphemeralMarking(&root_visitor);
-
   AfterMarking();
 
   if (FLAG_print_cumulative_gc_stat) {
@@ -2232,12 +2262,6 @@ void MarkCompactCollector::MarkLiveObjects() {
 
 
 void MarkCompactCollector::AfterMarking() {
-  // Object literal map caches reference strings (cache keys) and maps
-  // (cache values). At this point still useful maps have already been
-  // marked. Mark the keys for the alive values before we process the
-  // string table.
-  ProcessMapCaches();
-
   // Prune the string table removing all strings only pointed to by the
   // string table.  Cannot use string_table() here because the string
   // table is marked.
@@ -2274,57 +2298,6 @@ void MarkCompactCollector::AfterMarking() {
 }
 
 
-void MarkCompactCollector::ProcessMapCaches() {
-  Object* raw_context = heap()->native_contexts_list();
-  while (raw_context != heap()->undefined_value()) {
-    Context* context = reinterpret_cast<Context*>(raw_context);
-    if (IsMarked(context)) {
-      HeapObject* raw_map_cache =
-          HeapObject::cast(context->get(Context::MAP_CACHE_INDEX));
-      // A map cache may be reachable from the stack. In this case
-      // it's already transitively marked and it's too late to clean
-      // up its parts.
-      if (!IsMarked(raw_map_cache) &&
-          raw_map_cache != heap()->undefined_value()) {
-        MapCache* map_cache = reinterpret_cast<MapCache*>(raw_map_cache);
-        int existing_elements = map_cache->NumberOfElements();
-        int used_elements = 0;
-        for (int i = MapCache::kElementsStartIndex; i < map_cache->length();
-             i += MapCache::kEntrySize) {
-          Object* raw_key = map_cache->get(i);
-          if (raw_key == heap()->undefined_value() ||
-              raw_key == heap()->the_hole_value())
-            continue;
-          STATIC_ASSERT(MapCache::kEntrySize == 2);
-          Object* raw_map = map_cache->get(i + 1);
-          if (raw_map->IsHeapObject() && IsMarked(raw_map)) {
-            ++used_elements;
-          } else {
-            // Delete useless entries with unmarked maps.
-            DCHECK(raw_map->IsMap());
-            map_cache->set_the_hole(i);
-            map_cache->set_the_hole(i + 1);
-          }
-        }
-        if (used_elements == 0) {
-          context->set(Context::MAP_CACHE_INDEX, heap()->undefined_value());
-        } else {
-          // Note: we don't actually shrink the cache here to avoid
-          // extra complexity during GC. We rely on subsequent cache
-          // usages (EnsureCapacity) to do this.
-          map_cache->ElementsRemoved(existing_elements - used_elements);
-          MarkBit map_cache_markbit = Marking::MarkBitFrom(map_cache);
-          MarkObject(map_cache, map_cache_markbit);
-        }
-      }
-    }
-    // Move to next element in the list.
-    raw_context = context->get(Context::NEXT_CONTEXT_LINK);
-  }
-  ProcessMarkingDeque();
-}
-
-
 void MarkCompactCollector::ClearNonLiveReferences() {
   // Iterate over the map space, setting map transitions that go from
   // a marked map to an unmarked map to null transitions.  This action
@@ -2572,34 +2545,12 @@ void MarkCompactCollector::TrimEnumCache(Map* map,
 }
 
 
-void MarkCompactCollector::ClearDependentICList(Object* head) {
-  Object* current = head;
-  Object* undefined = heap()->undefined_value();
-  while (current != undefined) {
-    Code* code = Code::cast(current);
-    if (IsMarked(code)) {
-      DCHECK(code->is_weak_stub());
-      IC::InvalidateMaps(code);
-    }
-    current = code->next_code_link();
-    code->set_next_code_link(undefined);
-  }
-}
-
-
 void MarkCompactCollector::ClearDependentCode(DependentCode* entries) {
   DisallowHeapAllocation no_allocation;
   DependentCode::GroupStartIndexes starts(entries);
   int number_of_entries = starts.number_of_entries();
   if (number_of_entries == 0) return;
-  int g = DependentCode::kWeakICGroup;
-  if (starts.at(g) != starts.at(g + 1)) {
-    int i = starts.at(g);
-    DCHECK(i + 1 == starts.at(g + 1));
-    Object* head = entries->object_at(i);
-    ClearDependentICList(head);
-  }
-  g = DependentCode::kWeakCodeGroup;
+  int g = DependentCode::kWeakCodeGroup;
   for (int i = starts.at(g); i < starts.at(g + 1); i++) {
     // If the entry is compilation info then the map must be alive,
     // and ClearDependentCode shouldn't be called.
@@ -2621,34 +2572,17 @@ void MarkCompactCollector::ClearDependentCode(DependentCode* entries) {
 int MarkCompactCollector::ClearNonLiveDependentCodeInGroup(
     DependentCode* entries, int group, int start, int end, int new_start) {
   int survived = 0;
-  if (group == DependentCode::kWeakICGroup) {
-    // Dependent weak IC stubs form a linked list and only the head is stored
-    // in the dependent code array.
-    if (start != end) {
-      DCHECK(start + 1 == end);
-      Object* old_head = entries->object_at(start);
-      MarkCompactWeakObjectRetainer retainer;
-      Object* head = VisitWeakList<Code>(heap(), old_head, &retainer);
-      entries->set_object_at(new_start, head);
-      Object** slot = entries->slot_at(new_start);
-      RecordSlot(slot, slot, head);
-      // We do not compact this group even if the head is undefined,
-      // more dependent ICs are likely to be added later.
-      survived = 1;
-    }
-  } else {
-    for (int i = start; i < end; i++) {
-      Object* obj = entries->object_at(i);
-      DCHECK(obj->IsCode() || IsMarked(obj));
-      if (IsMarked(obj) &&
-          (!obj->IsCode() || !WillBeDeoptimized(Code::cast(obj)))) {
-        if (new_start + survived != i) {
-          entries->set_object_at(new_start + survived, obj);
-        }
-        Object** slot = entries->slot_at(new_start + survived);
-        RecordSlot(slot, slot, obj);
-        survived++;
+  for (int i = start; i < end; i++) {
+    Object* obj = entries->object_at(i);
+    DCHECK(obj->IsCode() || IsMarked(obj));
+    if (IsMarked(obj) &&
+        (!obj->IsCode() || !WillBeDeoptimized(Code::cast(obj)))) {
+      if (new_start + survived != i) {
+        entries->set_object_at(new_start + survived, obj);
       }
+      Object** slot = entries->slot_at(new_start + survived);
+      RecordSlot(slot, slot, obj);
+      survived++;
     }
   }
   entries->set_number_of_entries(
@@ -2810,12 +2744,24 @@ void MarkCompactCollector::MigrateObject(HeapObject* dst, HeapObject* src,
     Address dst_slot = dst_addr;
     DCHECK(IsAligned(size, kPointerSize));
 
+    bool may_contain_raw_values = src->MayContainRawValues();
+#if V8_DOUBLE_FIELDS_UNBOXING
+    LayoutDescriptorHelper helper(src->map());
+    bool has_only_tagged_fields = helper.all_fields_tagged();
+#endif
     for (int remaining = size / kPointerSize; remaining > 0; remaining--) {
       Object* value = Memory::Object_at(src_slot);
 
       Memory::Object_at(dst_slot) = value;
 
-      if (!src->MayContainRawValues()) {
+#if V8_DOUBLE_FIELDS_UNBOXING
+      if (!may_contain_raw_values &&
+          (has_only_tagged_fields ||
+           helper.IsTagged(static_cast<int>(src_slot - src_addr))))
+#else
+      if (!may_contain_raw_values)
+#endif
+      {
         RecordMigratedSlot(value, dst_slot);
       }
 
@@ -2928,7 +2874,8 @@ class PointersUpdatingVisitor : public ObjectVisitor {
   }
 
   static inline void UpdateSlot(Heap* heap, Object** slot) {
-    Object* obj = *slot;
+    Object* obj = reinterpret_cast<Object*>(
+        base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(slot)));
 
     if (!obj->IsHeapObject()) return;
 
@@ -2939,7 +2886,10 @@ class PointersUpdatingVisitor : public ObjectVisitor {
       DCHECK(heap->InFromSpace(heap_obj) ||
              MarkCompactCollector::IsOnEvacuationCandidate(heap_obj));
       HeapObject* target = map_word.ToForwardingAddress();
-      *slot = target;
+      base::NoBarrier_CompareAndSwap(
+          reinterpret_cast<base::AtomicWord*>(slot),
+          reinterpret_cast<base::AtomicWord>(obj),
+          reinterpret_cast<base::AtomicWord>(target));
       DCHECK(!heap->InFromSpace(target) &&
              !MarkCompactCollector::IsOnEvacuationCandidate(target));
     }
@@ -3256,7 +3206,7 @@ static int Sweep(PagedSpace* space, FreeList* free_list, Page* p,
       }
       HeapObject* live_object = HeapObject::FromAddress(free_end);
       DCHECK(Marking::IsBlack(Marking::MarkBitFrom(live_object)));
-      Map* map = live_object->map();
+      Map* map = live_object->synchronized_map();
       int size = live_object->SizeFromMap(map);
       if (sweeping_mode == SWEEP_AND_VISIT_LIVE_OBJECTS) {
         live_object->IterateBody(map->instance_type(), size, v);
@@ -3436,6 +3386,7 @@ void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
   {
     GCTracer::Scope gc_scope(heap()->tracer(),
                              GCTracer::Scope::MC_EVACUATE_PAGES);
+    EvacuationScope evacuation_scope(this);
     EvacuatePages();
   }
 
@@ -4182,12 +4133,11 @@ void MarkCompactCollector::SweepSpaces() {
     GCTracer::Scope sweep_scope(heap()->tracer(),
                                 GCTracer::Scope::MC_SWEEP_OLDSPACE);
     {
-      SequentialSweepingScope scope(this);
       SweepSpace(heap()->old_pointer_space(), CONCURRENT_SWEEPING);
       SweepSpace(heap()->old_data_space(), CONCURRENT_SWEEPING);
     }
     sweeping_in_progress_ = true;
-    if (!FLAG_predictable) {
+    if (FLAG_concurrent_sweeping) {
       StartSweeperThreads();
     }
   }
@@ -4222,6 +4172,10 @@ void MarkCompactCollector::SweepSpaces() {
 
   // Deallocate evacuated candidate pages.
   ReleaseEvacuationCandidates();
+  CodeRange* code_range = heap()->isolate()->code_range();
+  if (code_range != NULL && code_range->valid()) {
+    code_range->ReserveEmergencyBlock();
+  }
 
   if (FLAG_print_cumulative_gc_stat) {
     heap_->tracer()->AddSweepingTime(base::OS::TimeCurrentMillis() -
index e48d5a3..e26e06c 100644 (file)
@@ -643,11 +643,9 @@ class MarkCompactCollector {
   // Checks if sweeping is in progress right now on any space.
   bool sweeping_in_progress() { return sweeping_in_progress_; }
 
-  void set_sequential_sweeping(bool sequential_sweeping) {
-    sequential_sweeping_ = sequential_sweeping;
-  }
+  void set_evacuation(bool evacuation) { evacuation_ = evacuation; }
 
-  bool sequential_sweeping() const { return sequential_sweeping_; }
+  bool evacuation() const { return evacuation_; }
 
   // Mark the global table which maps weak objects to dependent code without
   // marking its contents.
@@ -657,7 +655,13 @@ class MarkCompactCollector {
   // to artificially keep AllocationSites alive for a time.
   void MarkAllocationSite(AllocationSite* site);
 
-  bool IsMarkingDequeEmpty();
+  MarkingDeque* marking_deque() { return &marking_deque_; }
+
+  void EnsureMarkingDequeIsCommittedAndInitialize();
+
+  void InitializeMarkingDeque();
+
+  void UncommitMarkingDeque();
 
  private:
   class SweeperTask;
@@ -704,7 +708,7 @@ class MarkCompactCollector {
 
   base::Semaphore pending_sweeper_jobs_semaphore_;
 
-  bool sequential_sweeping_;
+  bool evacuation_;
 
   SlotsBufferAllocator slots_buffer_allocator_;
 
@@ -768,7 +772,8 @@ class MarkCompactCollector {
   //    - Processing of objects reachable through Harmony WeakMaps.
   //    - Objects reachable due to host application logic like object groups
   //      or implicit references' groups.
-  void ProcessEphemeralMarking(ObjectVisitor* visitor);
+  void ProcessEphemeralMarking(ObjectVisitor* visitor,
+                               bool only_process_harmony_weak_collections);
 
   // If the call-site of the top optimized code was not prepared for
   // deoptimization, then treat the maps in the code as strong pointers,
@@ -786,10 +791,6 @@ class MarkCompactCollector {
   // flag on the marking stack.
   void RefillMarkingDeque();
 
-  // After reachable maps have been marked process per context object
-  // literal map caches removing unmarked entries.
-  void ProcessMapCaches();
-
   // Callback function for telling whether the object *p is an unmarked
   // heap object.
   static bool IsUnmarkedHeapObject(Object** p);
@@ -807,7 +808,6 @@ class MarkCompactCollector {
   void TrimEnumCache(Map* map, DescriptorArray* descriptors);
 
   void ClearDependentCode(DependentCode* dependent_code);
-  void ClearDependentICList(Object* head);
   void ClearNonLiveDependentCode(DependentCode* dependent_code);
   int ClearNonLiveDependentCodeInGroup(DependentCode* dependent_code, int group,
                                        int start, int end, int new_start);
@@ -883,6 +883,8 @@ class MarkCompactCollector {
 #endif
 
   Heap* heap_;
+  base::VirtualMemory* marking_deque_memory_;
+  bool marking_deque_memory_committed_;
   MarkingDeque marking_deque_;
   CodeFlusher* code_flusher_;
   bool have_code_to_deoptimize_;
@@ -938,14 +940,14 @@ class MarkBitCellIterator BASE_EMBEDDED {
 };
 
 
-class SequentialSweepingScope BASE_EMBEDDED {
+class EvacuationScope BASE_EMBEDDED {
  public:
-  explicit SequentialSweepingScope(MarkCompactCollector* collector)
+  explicit EvacuationScope(MarkCompactCollector* collector)
       : collector_(collector) {
-    collector_->set_sequential_sweeping(true);
+    collector_->set_evacuation(true);
   }
 
-  ~SequentialSweepingScope() { collector_->set_sequential_sweeping(false); }
+  ~EvacuationScope() { collector_->set_evacuation(false); }
 
  private:
   MarkCompactCollector* collector_;
index 1f37306..e6334f3 100644 (file)
@@ -263,12 +263,9 @@ void StaticMarkingVisitor<StaticVisitor>::VisitCodeTarget(Heap* heap,
   // to be serialized.
   if (FLAG_cleanup_code_caches_at_gc && target->is_inline_cache_stub() &&
       !target->is_call_stub() &&
-      (target->ic_state() == MEGAMORPHIC || target->ic_state() == GENERIC ||
-       target->ic_state() == POLYMORPHIC ||
-       (heap->flush_monomorphic_ics() && !target->is_weak_stub()) ||
+      ((heap->flush_monomorphic_ics() && !target->embeds_maps_weakly()) ||
        heap->isolate()->serializer_enabled() ||
-       target->ic_age() != heap->global_ic_age() ||
-       target->is_invalidated_weak_stub())) {
+       target->ic_age() != heap->global_ic_age())) {
     ICUtility::Clear(heap->isolate(), rinfo->pc(),
                      rinfo->host()->constant_pool());
     target = Code::GetCodeFromTargetAddress(rinfo->target_address());
@@ -510,10 +507,7 @@ void StaticMarkingVisitor<StaticVisitor>::VisitConstantPoolArray(
     bool is_weak_object =
         (array->get_weak_object_state() ==
              ConstantPoolArray::WEAK_OBJECTS_IN_OPTIMIZED_CODE &&
-         Code::IsWeakObjectInOptimizedCode(object)) ||
-        (array->get_weak_object_state() ==
-             ConstantPoolArray::WEAK_OBJECTS_IN_IC &&
-         Code::IsWeakObjectInIC(object));
+         Code::IsWeakObjectInOptimizedCode(object));
     if (!is_weak_object) {
       StaticVisitor::MarkObject(heap, object);
     }
index d356917..20d92de 100644 (file)
@@ -11,7 +11,7 @@ namespace internal {
 
 
 StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
-    int instance_type, int instance_size) {
+    int instance_type, int instance_size, bool has_unboxed_fields) {
   if (instance_type < FIRST_NONSTRING_TYPE) {
     switch (instance_type & kStringRepresentationMask) {
       case kSeqStringTag:
@@ -33,7 +33,7 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
 
       case kExternalStringTag:
         return GetVisitorIdForSize(kVisitDataObject, kVisitDataObjectGeneric,
-                                   instance_size);
+                                   instance_size, has_unboxed_fields);
     }
     UNREACHABLE();
   }
@@ -74,11 +74,11 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
 
     case JS_SET_TYPE:
       return GetVisitorIdForSize(kVisitStruct, kVisitStructGeneric,
-                                 JSSet::kSize);
+                                 JSSet::kSize, has_unboxed_fields);
 
     case JS_MAP_TYPE:
       return GetVisitorIdForSize(kVisitStruct, kVisitStructGeneric,
-                                 JSMap::kSize);
+                                 JSMap::kSize, has_unboxed_fields);
 
     case JS_WEAK_MAP_TYPE:
     case JS_WEAK_SET_TYPE:
@@ -92,15 +92,15 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
 
     case JS_PROXY_TYPE:
       return GetVisitorIdForSize(kVisitStruct, kVisitStructGeneric,
-                                 JSProxy::kSize);
+                                 JSProxy::kSize, has_unboxed_fields);
 
     case JS_FUNCTION_PROXY_TYPE:
       return GetVisitorIdForSize(kVisitStruct, kVisitStructGeneric,
-                                 JSFunctionProxy::kSize);
+                                 JSFunctionProxy::kSize, has_unboxed_fields);
 
     case FOREIGN_TYPE:
       return GetVisitorIdForSize(kVisitDataObject, kVisitDataObjectGeneric,
-                                 Foreign::kSize);
+                                 Foreign::kSize, has_unboxed_fields);
 
     case SYMBOL_TYPE:
       return kVisitSymbol;
@@ -131,7 +131,7 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
     case JS_SET_ITERATOR_TYPE:
     case JS_MAP_ITERATOR_TYPE:
       return GetVisitorIdForSize(kVisitJSObject, kVisitJSObjectGeneric,
-                                 instance_size);
+                                 instance_size, has_unboxed_fields);
 
     case JS_FUNCTION_TYPE:
       return kVisitJSFunction;
@@ -143,7 +143,7 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
 
       TYPED_ARRAYS(EXTERNAL_ARRAY_CASE)
       return GetVisitorIdForSize(kVisitDataObject, kVisitDataObjectGeneric,
-                                 instance_size);
+                                 instance_size, has_unboxed_fields);
 #undef EXTERNAL_ARRAY_CASE
 
     case FIXED_UINT8_ARRAY_TYPE:
@@ -167,7 +167,7 @@ StaticVisitorBase::VisitorId StaticVisitorBase::GetVisitorId(
       }
 
       return GetVisitorIdForSize(kVisitStruct, kVisitStructGeneric,
-                                 instance_size);
+                                 instance_size, has_unboxed_fields);
 
     default:
       UNREACHABLE();
index db6b892..a442867 100644 (file)
@@ -6,6 +6,7 @@
 #define V8_OBJECTS_VISITING_H_
 
 #include "src/allocation.h"
+#include "src/layout-descriptor.h"
 
 // This file provides base classes and auxiliary methods for defining
 // static object visitors used during GC.
@@ -105,26 +106,35 @@ class StaticVisitorBase : public AllStatic {
 
   // Determine which specialized visitor should be used for given instance type
   // and instance type.
-  static VisitorId GetVisitorId(int instance_type, int instance_size);
+  static VisitorId GetVisitorId(int instance_type, int instance_size,
+                                bool has_unboxed_fields);
 
+  // Determine which specialized visitor should be used for given map.
   static VisitorId GetVisitorId(Map* map) {
-    return GetVisitorId(map->instance_type(), map->instance_size());
+    return GetVisitorId(
+        map->instance_type(), map->instance_size(),
+        FLAG_unbox_double_fields && !map->HasFastPointerLayout());
   }
 
   // For visitors that allow specialization by size calculate VisitorId based
   // on size, base visitor id and generic visitor id.
   static VisitorId GetVisitorIdForSize(VisitorId base, VisitorId generic,
-                                       int object_size) {
+                                       int object_size,
+                                       bool has_unboxed_fields) {
     DCHECK((base == kVisitDataObject) || (base == kVisitStruct) ||
            (base == kVisitJSObject));
     DCHECK(IsAligned(object_size, kPointerSize));
     DCHECK(kMinObjectSizeInWords * kPointerSize <= object_size);
     DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+    DCHECK(!has_unboxed_fields || (base == kVisitJSObject));
 
-    const VisitorId specialization = static_cast<VisitorId>(
-        base + (object_size >> kPointerSizeLog2) - kMinObjectSizeInWords);
+    if (has_unboxed_fields) return generic;
 
-    return Min(specialization, generic);
+    int visitor_id =
+        Min(base + (object_size >> kPointerSizeLog2) - kMinObjectSizeInWords,
+            static_cast<int>(generic));
+
+    return static_cast<VisitorId>(visitor_id);
   }
 };
 
@@ -158,7 +168,7 @@ class VisitorDispatchTable {
             StaticVisitorBase::VisitorId generic, int object_size_in_words>
   void RegisterSpecialization() {
     static const int size = object_size_in_words * kPointerSize;
-    Register(StaticVisitorBase::GetVisitorIdForSize(base, generic, size),
+    Register(StaticVisitorBase::GetVisitorIdForSize(base, generic, size, false),
              &Visitor::template VisitSpecialized<size>);
   }
 
@@ -189,11 +199,43 @@ class BodyVisitorBase : public AllStatic {
  public:
   INLINE(static void IteratePointers(Heap* heap, HeapObject* object,
                                      int start_offset, int end_offset)) {
-    Object** start_slot =
-        reinterpret_cast<Object**>(object->address() + start_offset);
-    Object** end_slot =
-        reinterpret_cast<Object**>(object->address() + end_offset);
-    StaticVisitor::VisitPointers(heap, start_slot, end_slot);
+    DCHECK(!FLAG_unbox_double_fields || object->map()->HasFastPointerLayout());
+    IterateRawPointers(heap, object, start_offset, end_offset);
+  }
+
+  INLINE(static void IterateBody(Heap* heap, HeapObject* object,
+                                 int start_offset, int end_offset)) {
+    if (!FLAG_unbox_double_fields || object->map()->HasFastPointerLayout()) {
+      IterateRawPointers(heap, object, start_offset, end_offset);
+    } else {
+      IterateBodyUsingLayoutDescriptor(heap, object, start_offset, end_offset);
+    }
+  }
+
+ private:
+  INLINE(static void IterateRawPointers(Heap* heap, HeapObject* object,
+                                        int start_offset, int end_offset)) {
+    StaticVisitor::VisitPointers(heap,
+                                 HeapObject::RawField(object, start_offset),
+                                 HeapObject::RawField(object, end_offset));
+  }
+
+  static void IterateBodyUsingLayoutDescriptor(Heap* heap, HeapObject* object,
+                                               int start_offset,
+                                               int end_offset) {
+    DCHECK(FLAG_unbox_double_fields);
+    DCHECK(IsAligned(start_offset, kPointerSize) &&
+           IsAligned(end_offset, kPointerSize));
+
+    LayoutDescriptorHelper helper(object->map());
+    DCHECK(!helper.all_fields_tagged());
+    for (int offset = start_offset; offset < end_offset;) {
+      int end_of_region_offset;
+      if (helper.IsTagged(offset, end_offset, &end_of_region_offset)) {
+        IterateRawPointers(heap, object, offset, end_of_region_offset);
+      }
+      offset = end_of_region_offset;
+    }
   }
 };
 
@@ -203,7 +245,7 @@ class FlexibleBodyVisitor : public BodyVisitorBase<StaticVisitor> {
  public:
   INLINE(static ReturnType Visit(Map* map, HeapObject* object)) {
     int object_size = BodyDescriptor::SizeOf(map, object);
-    BodyVisitorBase<StaticVisitor>::IteratePointers(
+    BodyVisitorBase<StaticVisitor>::IterateBody(
         map->GetHeap(), object, BodyDescriptor::kStartOffset, object_size);
     return static_cast<ReturnType>(object_size);
   }
@@ -222,9 +264,9 @@ template <typename StaticVisitor, typename BodyDescriptor, typename ReturnType>
 class FixedBodyVisitor : public BodyVisitorBase<StaticVisitor> {
  public:
   INLINE(static ReturnType Visit(Map* map, HeapObject* object)) {
-    BodyVisitorBase<StaticVisitor>::IteratePointers(
-        map->GetHeap(), object, BodyDescriptor::kStartOffset,
-        BodyDescriptor::kEndOffset);
+    BodyVisitorBase<StaticVisitor>::IterateBody(map->GetHeap(), object,
+                                                BodyDescriptor::kStartOffset,
+                                                BodyDescriptor::kEndOffset);
     return static_cast<ReturnType>(BodyDescriptor::kSize);
   }
 };
index 2b696ea..37a123d 100644 (file)
@@ -93,7 +93,8 @@ CodeRange::CodeRange(Isolate* isolate)
       code_range_(NULL),
       free_list_(0),
       allocation_list_(0),
-      current_allocation_block_index_(0) {}
+      current_allocation_block_index_(0),
+      emergency_block_() {}
 
 
 bool CodeRange::SetUp(size_t requested) {
@@ -144,6 +145,7 @@ bool CodeRange::SetUp(size_t requested) {
   current_allocation_block_index_ = 0;
 
   LOG(isolate_, NewEvent("CodeRange", code_range_->address(), requested));
+  ReserveEmergencyBlock();
   return true;
 }
 
@@ -202,35 +204,20 @@ Address CodeRange::AllocateRawMemory(const size_t requested_size,
                                      const size_t commit_size,
                                      size_t* allocated) {
   DCHECK(commit_size <= requested_size);
-  DCHECK(allocation_list_.length() == 0 ||
-         current_allocation_block_index_ < allocation_list_.length());
-  if (allocation_list_.length() == 0 ||
-      requested_size > allocation_list_[current_allocation_block_index_].size) {
-    // Find an allocation block large enough.
-    if (!GetNextAllocationBlock(requested_size)) return NULL;
-  }
-  // Commit the requested memory at the start of the current allocation block.
-  size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment);
-  FreeBlock current = allocation_list_[current_allocation_block_index_];
-  if (aligned_requested >= (current.size - Page::kPageSize)) {
-    // Don't leave a small free block, useless for a large object or chunk.
-    *allocated = current.size;
-  } else {
-    *allocated = aligned_requested;
+  FreeBlock current;
+  if (!ReserveBlock(requested_size, &current)) {
+    *allocated = 0;
+    return NULL;
   }
+  *allocated = current.size;
   DCHECK(*allocated <= current.size);
   DCHECK(IsAddressAligned(current.start, MemoryChunk::kAlignment));
   if (!isolate_->memory_allocator()->CommitExecutableMemory(
           code_range_, current.start, commit_size, *allocated)) {
     *allocated = 0;
+    ReleaseBlock(&current);
     return NULL;
   }
-  allocation_list_[current_allocation_block_index_].start += *allocated;
-  allocation_list_[current_allocation_block_index_].size -= *allocated;
-  if (*allocated == current.size) {
-    // This block is used up, get the next one.
-    GetNextAllocationBlock(0);
-  }
   return current.start;
 }
 
@@ -260,6 +247,49 @@ void CodeRange::TearDown() {
 }
 
 
+bool CodeRange::ReserveBlock(const size_t requested_size, FreeBlock* block) {
+  DCHECK(allocation_list_.length() == 0 ||
+         current_allocation_block_index_ < allocation_list_.length());
+  if (allocation_list_.length() == 0 ||
+      requested_size > allocation_list_[current_allocation_block_index_].size) {
+    // Find an allocation block large enough.
+    if (!GetNextAllocationBlock(requested_size)) return false;
+  }
+  // Commit the requested memory at the start of the current allocation block.
+  size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment);
+  *block = allocation_list_[current_allocation_block_index_];
+  // Don't leave a small free block, useless for a large object or chunk.
+  if (aligned_requested < (block->size - Page::kPageSize)) {
+    block->size = aligned_requested;
+  }
+  DCHECK(IsAddressAligned(block->start, MemoryChunk::kAlignment));
+  allocation_list_[current_allocation_block_index_].start += block->size;
+  allocation_list_[current_allocation_block_index_].size -= block->size;
+  return true;
+}
+
+
+void CodeRange::ReleaseBlock(const FreeBlock* block) { free_list_.Add(*block); }
+
+
+void CodeRange::ReserveEmergencyBlock() {
+  const size_t requested_size = MemoryAllocator::CodePageAreaSize();
+  if (emergency_block_.size == 0) {
+    ReserveBlock(requested_size, &emergency_block_);
+  } else {
+    DCHECK(emergency_block_.size >= requested_size);
+  }
+}
+
+
+void CodeRange::ReleaseEmergencyBlock() {
+  if (emergency_block_.size != 0) {
+    ReleaseBlock(&emergency_block_);
+    emergency_block_.size = 0;
+  }
+}
+
+
 // -----------------------------------------------------------------------------
 // MemoryAllocator
 //
@@ -892,6 +922,24 @@ void MemoryChunk::IncrementLiveBytesFromMutator(Address address, int by) {
 // -----------------------------------------------------------------------------
 // PagedSpace implementation
 
+STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::NEW_SPACE) ==
+              ObjectSpace::kObjectSpaceNewSpace);
+STATIC_ASSERT(static_cast<ObjectSpace>(1
+                                       << AllocationSpace::OLD_POINTER_SPACE) ==
+              ObjectSpace::kObjectSpaceOldPointerSpace);
+STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::OLD_DATA_SPACE) ==
+              ObjectSpace::kObjectSpaceOldDataSpace);
+STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::CODE_SPACE) ==
+              ObjectSpace::kObjectSpaceCodeSpace);
+STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::CELL_SPACE) ==
+              ObjectSpace::kObjectSpaceCellSpace);
+STATIC_ASSERT(
+    static_cast<ObjectSpace>(1 << AllocationSpace::PROPERTY_CELL_SPACE) ==
+    ObjectSpace::kObjectSpacePropertyCellSpace);
+STATIC_ASSERT(static_cast<ObjectSpace>(1 << AllocationSpace::MAP_SPACE) ==
+              ObjectSpace::kObjectSpaceMapSpace);
+
+
 PagedSpace::PagedSpace(Heap* heap, intptr_t max_capacity, AllocationSpace space,
                        Executability executable)
     : Space(heap, space, executable),
@@ -1106,6 +1154,14 @@ void PagedSpace::ReleasePage(Page* page) {
 
 
 void PagedSpace::CreateEmergencyMemory() {
+  if (identity() == CODE_SPACE) {
+    // Make the emergency block available to the allocator.
+    CodeRange* code_range = heap()->isolate()->code_range();
+    if (code_range != NULL && code_range->valid()) {
+      code_range->ReleaseEmergencyBlock();
+    }
+    DCHECK(MemoryAllocator::CodePageAreaSize() == AreaSize());
+  }
   emergency_memory_ = heap()->isolate()->memory_allocator()->AllocateChunk(
       AreaSize(), AreaSize(), executable(), this);
 }
@@ -1275,7 +1331,8 @@ void NewSpace::Grow() {
   // Double the semispace size but only up to maximum capacity.
   DCHECK(TotalCapacity() < MaximumCapacity());
   int new_capacity =
-      Min(MaximumCapacity(), 2 * static_cast<int>(TotalCapacity()));
+      Min(MaximumCapacity(),
+          FLAG_semi_space_growth_factor * static_cast<int>(TotalCapacity()));
   if (to_space_.GrowTo(new_capacity)) {
     // Only grow from space if we managed to grow to-space.
     if (!from_space_.GrowTo(new_capacity)) {
@@ -2569,7 +2626,7 @@ void PagedSpace::PrepareForMarkCompact() {
 
 
 intptr_t PagedSpace::SizeOfObjects() {
-  DCHECK(FLAG_predictable ||
+  DCHECK(!FLAG_concurrent_sweeping ||
          heap()->mark_compact_collector()->sweeping_in_progress() ||
          (unswept_free_bytes_ == 0));
   return Size() - unswept_free_bytes_ - (limit() - top());
index ef294b2..dcd3364 100644 (file)
@@ -900,6 +900,9 @@ class CodeRange {
   bool UncommitRawMemory(Address start, size_t length);
   void FreeRawMemory(Address buf, size_t length);
 
+  void ReserveEmergencyBlock();
+  void ReleaseEmergencyBlock();
+
  private:
   Isolate* isolate_;
 
@@ -908,6 +911,7 @@ class CodeRange {
   // Plain old data class, just a struct plus a constructor.
   class FreeBlock {
    public:
+    FreeBlock() : start(0), size(0) {}
     FreeBlock(Address start_arg, size_t size_arg)
         : start(start_arg), size(size_arg) {
       DCHECK(IsAddressAligned(start, MemoryChunk::kAlignment));
@@ -932,6 +936,12 @@ class CodeRange {
   List<FreeBlock> allocation_list_;
   int current_allocation_block_index_;
 
+  // Emergency block guarantees that we can always allocate a page for
+  // evacuation candidates when code space is compacted. Emergency block is
+  // reserved immediately after GC and is released immedietely before
+  // allocating a page for evacuation.
+  FreeBlock emergency_block_;
+
   // Finds a block on the allocation list that contains at least the
   // requested amount of memory.  If none is found, sorts and merges
   // the existing free memory blocks, and searches again.
@@ -940,6 +950,8 @@ class CodeRange {
   // Compares the start addresses of two free blocks.
   static int CompareFreeBlockAddress(const FreeBlock* left,
                                      const FreeBlock* right);
+  bool ReserveBlock(const size_t requested_size, FreeBlock* block);
+  void ReleaseBlock(const FreeBlock* block);
 
   DISALLOW_COPY_AND_ASSIGN(CodeRange);
 };
index 278e9f2..aac6811 100644 (file)
@@ -109,7 +109,9 @@ void StoreBuffer::Uniq() {
   for (Address* read = old_start_; read < old_top_; read++) {
     Address current = *read;
     if (current != previous) {
-      if (heap_->InNewSpace(*reinterpret_cast<Object**>(current))) {
+      Object* object = reinterpret_cast<Object*>(
+          base::NoBarrier_Load(reinterpret_cast<base::AtomicWord*>(current)));
+      if (heap_->InNewSpace(object)) {
         *write++ = current;
       }
     }
@@ -507,11 +509,37 @@ void StoreBuffer::IteratePointersToNewSpace(ObjectSlotCallback slot_callback,
             for (HeapObject* heap_object = iterator.Next(); heap_object != NULL;
                  heap_object = iterator.Next()) {
               // We iterate over objects that contain new space pointers only.
-              if (!heap_object->MayContainRawValues()) {
-                FindPointersToNewSpaceInRegion(
-                    heap_object->address() + HeapObject::kHeaderSize,
-                    heap_object->address() + heap_object->Size(), slot_callback,
-                    clear_maps);
+              bool may_contain_raw_values = heap_object->MayContainRawValues();
+              if (!may_contain_raw_values) {
+                Address obj_address = heap_object->address();
+                const int start_offset = HeapObject::kHeaderSize;
+                const int end_offset = heap_object->Size();
+#if V8_DOUBLE_FIELDS_UNBOXING
+                LayoutDescriptorHelper helper(heap_object->map());
+                bool has_only_tagged_fields = helper.all_fields_tagged();
+
+                if (!has_only_tagged_fields) {
+                  for (int offset = start_offset; offset < end_offset;) {
+                    int end_of_region_offset;
+                    if (helper.IsTagged(offset, end_offset,
+                                        &end_of_region_offset)) {
+                      FindPointersToNewSpaceInRegion(
+                          obj_address + offset,
+                          obj_address + end_of_region_offset, slot_callback,
+                          clear_maps);
+                    }
+                    offset = end_of_region_offset;
+                  }
+                } else {
+#endif
+                  Address start_address = obj_address + start_offset;
+                  Address end_address = obj_address + end_offset;
+                  // Object has only tagged fields.
+                  FindPointersToNewSpaceInRegion(start_address, end_address,
+                                                 slot_callback, clear_maps);
+#if V8_DOUBLE_FIELDS_UNBOXING
+                }
+#endif
               }
             }
           }
index ce76fbe..0e6a03d 100644 (file)
@@ -796,7 +796,6 @@ bool HInstruction::CanDeoptimize() {
     case HValue::kCallNew:
     case HValue::kCallNewArray:
     case HValue::kCallStub:
-    case HValue::kCallWithDescriptor:
     case HValue::kCapturedObject:
     case HValue::kClassOfTestAndBranch:
     case HValue::kCompareGeneric:
@@ -863,6 +862,7 @@ bool HInstruction::CanDeoptimize() {
     case HValue::kBranch:
     case HValue::kCallJSFunction:
     case HValue::kCallRuntime:
+    case HValue::kCallWithDescriptor:
     case HValue::kChange:
     case HValue::kCheckHeapObject:
     case HValue::kCheckInstanceType:
@@ -1155,12 +1155,10 @@ std::ostream& HReturn::PrintDataTo(std::ostream& os) const {  // NOLINT
 
 
 Representation HBranch::observed_input_representation(int index) {
-  static const ToBooleanStub::Types tagged_types(
-      ToBooleanStub::NULL_TYPE |
-      ToBooleanStub::SPEC_OBJECT |
-      ToBooleanStub::STRING |
-      ToBooleanStub::SYMBOL);
-  if (expected_input_types_.ContainsAnyOf(tagged_types)) {
+  if (expected_input_types_.Contains(ToBooleanStub::NULL_TYPE) ||
+      expected_input_types_.Contains(ToBooleanStub::SPEC_OBJECT) ||
+      expected_input_types_.Contains(ToBooleanStub::STRING) ||
+      expected_input_types_.Contains(ToBooleanStub::SYMBOL)) {
     return Representation::Tagged();
   }
   if (expected_input_types_.Contains(ToBooleanStub::UNDEFINED)) {
@@ -1718,6 +1716,13 @@ std::ostream& HCallStub::PrintDataTo(std::ostream& os) const {  // NOLINT
 }
 
 
+Code::Flags HTailCallThroughMegamorphicCache::flags() const {
+  Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags(
+      Code::ComputeHandlerFlags(Code::LOAD_IC));
+  return code_flags;
+}
+
+
 std::ostream& HTailCallThroughMegamorphicCache::PrintDataTo(
     std::ostream& os) const {  // NOLINT
   for (int i = 0; i < OperandCount(); i++) {
@@ -2662,14 +2667,17 @@ void HEnterInlined::RegisterReturnTarget(HBasicBlock* return_target,
 
 
 std::ostream& HEnterInlined::PrintDataTo(std::ostream& os) const {  // NOLINT
-  return os << function()->debug_name()->ToCString().get()
-            << ", id=" << function()->id().ToInt();
+  return os << function()->debug_name()->ToCString().get();
 }
 
 
 static bool IsInteger32(double value) {
-  double roundtrip_value = static_cast<double>(static_cast<int32_t>(value));
-  return bit_cast<int64_t>(roundtrip_value) == bit_cast<int64_t>(value);
+  if (value >= std::numeric_limits<int32_t>::min() &&
+      value <= std::numeric_limits<int32_t>::max()) {
+    double roundtrip_value = static_cast<double>(static_cast<int32_t>(value));
+    return bit_cast<int64_t>(roundtrip_value) == bit_cast<int64_t>(value);
+  }
+  return false;
 }
 
 
@@ -2775,7 +2783,7 @@ HConstant::HConstant(double double_value, Representation r,
                                            !std::isnan(double_value)) |
                  IsUndetectableField::encode(false) |
                  InstanceTypeField::encode(kUnknownInstanceType)),
-      int32_value_(DoubleToInt32(double_value)),
+      int32_value_(HasInteger32Value() ? DoubleToInt32(double_value) : 0),
       double_value_(double_value) {
   bit_field_ = HasSmiValueField::update(
       bit_field_, HasInteger32Value() && Smi::IsValid(int32_value_));
@@ -2834,6 +2842,10 @@ void HConstant::Initialize(Representation r) {
     // could cause heap object checks not to get emitted.
     object_ = Unique<Object>(Handle<Object>::null());
   }
+  if (r.IsSmiOrInteger32()) {
+    // If it's not a heap object, it can't be in new space.
+    bit_field_ = IsNotInNewSpaceField::update(bit_field_, true);
+  }
   set_representation(r);
   SetFlag(kUseGVN);
 }
@@ -4489,18 +4501,24 @@ void HPhi::SimplifyConstantInputs() {
 
 void HPhi::InferRepresentation(HInferRepresentationPhase* h_infer) {
   DCHECK(CheckFlag(kFlexibleRepresentation));
-  Representation new_rep = RepresentationFromInputs();
-  UpdateRepresentation(new_rep, h_infer, "inputs");
-  new_rep = RepresentationFromUses();
+  Representation new_rep = RepresentationFromUses();
   UpdateRepresentation(new_rep, h_infer, "uses");
+  new_rep = RepresentationFromInputs();
+  UpdateRepresentation(new_rep, h_infer, "inputs");
   new_rep = RepresentationFromUseRequirements();
   UpdateRepresentation(new_rep, h_infer, "use requirements");
 }
 
 
 Representation HPhi::RepresentationFromInputs() {
-  Representation r = Representation::None();
+  bool has_type_feedback =
+      smi_non_phi_uses() + int32_non_phi_uses() + double_non_phi_uses() > 0;
+  Representation r = representation();
   for (int i = 0; i < OperandCount(); ++i) {
+    // Ignore conservative Tagged assumption of parameters if we have
+    // reason to believe that it's too conservative.
+    if (has_type_feedback && OperandAt(i)->IsParameter()) continue;
+
     r = r.generalize(OperandAt(i)->KnownOptimalRepresentation());
   }
   return r;
@@ -4641,6 +4659,14 @@ HObjectAccess HObjectAccess::ForContextSlot(int index) {
 }
 
 
+HObjectAccess HObjectAccess::ForScriptContext(int index) {
+  DCHECK(index >= 0);
+  Portion portion = kInobject;
+  int offset = ScriptContextTable::GetContextOffset(index);
+  return HObjectAccess(portion, offset, Representation::Tagged());
+}
+
+
 HObjectAccess HObjectAccess::ForJSArrayOffset(int offset) {
   DCHECK(offset >= 0);
   Portion portion = kInobject;
index 233ca42..74f2711 100644 (file)
@@ -190,24 +190,21 @@ class LChunkBuilder;
   V(TypedArrayElements)
 
 
-#define DECLARE_ABSTRACT_INSTRUCTION(type)                              \
-  virtual bool Is##type() const FINAL OVERRIDE { return true; }   \
-  static H##type* cast(HValue* value) {                                 \
-    DCHECK(value->Is##type());                                          \
-    return reinterpret_cast<H##type*>(value);                           \
+#define DECLARE_ABSTRACT_INSTRUCTION(type)     \
+  bool Is##type() const FINAL { return true; } \
+  static H##type* cast(HValue* value) {        \
+    DCHECK(value->Is##type());                 \
+    return reinterpret_cast<H##type*>(value);  \
   }
 
 
-#define DECLARE_CONCRETE_INSTRUCTION(type)              \
-  virtual LInstruction* CompileToLithium(               \
-     LChunkBuilder* builder) FINAL OVERRIDE;      \
-  static H##type* cast(HValue* value) {                 \
-    DCHECK(value->Is##type());                          \
-    return reinterpret_cast<H##type*>(value);           \
-  }                                                     \
-  virtual Opcode opcode() const FINAL OVERRIDE {  \
-    return HValue::k##type;                             \
-  }
+#define DECLARE_CONCRETE_INSTRUCTION(type)                      \
+  LInstruction* CompileToLithium(LChunkBuilder* builder) FINAL; \
+  static H##type* cast(HValue* value) {                         \
+    DCHECK(value->Is##type());                                  \
+    return reinterpret_cast<H##type*>(value);                   \
+  }                                                             \
+  Opcode opcode() const FINAL { return HValue::k##type; }
 
 
 enum PropertyAccessType { LOAD, STORE };
@@ -1147,7 +1144,7 @@ class HInstruction : public HValue {
   HInstruction* next() const { return next_; }
   HInstruction* previous() const { return previous_; }
 
-  virtual std::ostream& PrintTo(std::ostream& os) const OVERRIDE;  // NOLINT
+  std::ostream& PrintTo(std::ostream& os) const OVERRIDE;          // NOLINT
   virtual std::ostream& PrintDataTo(std::ostream& os) const;       // NOLINT
 
   bool IsLinked() const { return block() != NULL; }
@@ -1168,7 +1165,7 @@ class HInstruction : public HValue {
   }
 
   // The position is a write-once variable.
-  virtual HSourcePosition position() const OVERRIDE {
+  HSourcePosition position() const OVERRIDE {
     return HSourcePosition(position_.position());
   }
   bool has_position() const {
@@ -1180,7 +1177,7 @@ class HInstruction : public HValue {
     position_.set_position(position);
   }
 
-  virtual HSourcePosition operand_position(int index) const OVERRIDE {
+  HSourcePosition operand_position(int index) const OVERRIDE {
     const HSourcePosition pos = position_.operand_position(index);
     return pos.IsUnknown() ? position() : pos;
   }
@@ -1197,7 +1194,7 @@ class HInstruction : public HValue {
   virtual LInstruction* CompileToLithium(LChunkBuilder* builder) = 0;
 
 #ifdef DEBUG
-  virtual void Verify() OVERRIDE;
+  void Verify() OVERRIDE;
 #endif
 
   bool CanDeoptimize();
@@ -1215,7 +1212,7 @@ class HInstruction : public HValue {
     SetDependsOnFlag(kOsrEntries);
   }
 
-  virtual void DeleteFromGraph() OVERRIDE { Unlink(); }
+  void DeleteFromGraph() OVERRIDE { Unlink(); }
 
  private:
   void InitializeAsFirst(HBasicBlock* block) {
@@ -1234,18 +1231,14 @@ class HInstruction : public HValue {
 template<int V>
 class HTemplateInstruction : public HInstruction {
  public:
-  virtual int OperandCount() const FINAL OVERRIDE { return V; }
-  virtual HValue* OperandAt(int i) const FINAL OVERRIDE {
-    return inputs_[i];
-  }
+  int OperandCount() const FINAL { return V; }
+  HValue* OperandAt(int i) const FINAL { return inputs_[i]; }
 
  protected:
   explicit HTemplateInstruction(HType type = HType::Tagged())
       : HInstruction(type) {}
 
-  virtual void InternalSetOperandAt(int i, HValue* value) FINAL OVERRIDE {
-    inputs_[i] = value;
-  }
+  void InternalSetOperandAt(int i, HValue* value) FINAL { inputs_[i] = value; }
 
  private:
   EmbeddedContainer<HValue*, V> inputs_;
@@ -1258,7 +1251,7 @@ class HControlInstruction : public HInstruction {
   virtual int SuccessorCount() const = 0;
   virtual void SetSuccessorAt(int i, HBasicBlock* block) = 0;
 
-  virtual std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
+  std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
 
   virtual bool KnownSuccessorBlock(HBasicBlock** block) {
     *block = NULL;
@@ -1323,7 +1316,7 @@ class HTemplateControlInstruction : public HControlInstruction {
 
 class HBlockEntry FINAL : public HTemplateInstruction<0> {
  public:
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::None();
   }
 
@@ -1343,12 +1336,12 @@ class HDummyUse FINAL : public HTemplateInstruction<1> {
 
   HValue* value() const { return OperandAt(0); }
 
-  virtual bool HasEscapingOperandAt(int index) OVERRIDE { return false; }
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  bool HasEscapingOperandAt(int index) OVERRIDE { return false; }
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::None();
   }
 
-  virtual std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
+  std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
 
   DECLARE_CONCRETE_INSTRUCTION(DummyUse);
 };
@@ -1359,7 +1352,7 @@ class HDebugBreak FINAL : public HTemplateInstruction<0> {
  public:
   DECLARE_INSTRUCTION_FACTORY_P0(HDebugBreak);
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::None();
   }
 
@@ -1373,16 +1366,16 @@ class HGoto FINAL : public HTemplateControlInstruction<1, 0> {
     SetSuccessorAt(0, target);
   }
 
-  virtual bool KnownSuccessorBlock(HBasicBlock** block) OVERRIDE {
+  bool KnownSuccessorBlock(HBasicBlock** block) OVERRIDE {
     *block = FirstSuccessor();
     return true;
   }
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::None();
   }
 
-  virtual std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
+  std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
 
   DECLARE_CONCRETE_INSTRUCTION(Goto)
 };
@@ -1398,12 +1391,12 @@ class HDeoptimize FINAL : public HTemplateControlInstruction<1, 0> {
     return new(zone) HDeoptimize(reason, type, unreachable_continuation);
   }
 
-  virtual bool KnownSuccessorBlock(HBasicBlock** block) OVERRIDE {
+  bool KnownSuccessorBlock(HBasicBlock** block) OVERRIDE {
     *block = NULL;
     return true;
   }
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::None();
   }
 
@@ -1435,7 +1428,7 @@ class HUnaryControlInstruction : public HTemplateControlInstruction<2, 1> {
     SetSuccessorAt(1, false_target);
   }
 
-  virtual std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
+  std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
 
   HValue* value() const { return OperandAt(0); }
 };
@@ -1450,14 +1443,14 @@ class HBranch FINAL : public HUnaryControlInstruction {
                                  ToBooleanStub::Types,
                                  HBasicBlock*, HBasicBlock*);
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::None();
   }
-  virtual Representation observed_input_representation(int index) OVERRIDE;
+  Representation observed_input_representation(int index) OVERRIDE;
 
-  virtual bool KnownSuccessorBlock(HBasicBlock** block) OVERRIDE;
+  bool KnownSuccessorBlock(HBasicBlock** block) OVERRIDE;
 
-  virtual std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
+  std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
 
   ToBooleanStub::Types expected_input_types() const {
     return expected_input_types_;
@@ -1485,7 +1478,7 @@ class HCompareMap FINAL : public HUnaryControlInstruction {
   DECLARE_INSTRUCTION_FACTORY_P4(HCompareMap, HValue*, Handle<Map>,
                                  HBasicBlock*, HBasicBlock*);
 
-  virtual bool KnownSuccessorBlock(HBasicBlock** block) OVERRIDE {
+  bool KnownSuccessorBlock(HBasicBlock** block) OVERRIDE {
     if (known_successor_index() != kNoKnownSuccessorIndex) {
       *block = SuccessorAt(known_successor_index());
       return true;
@@ -1494,7 +1487,7 @@ class HCompareMap FINAL : public HUnaryControlInstruction {
     return false;
   }
 
-  virtual std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
+  std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
 
   static const int kNoKnownSuccessorIndex = -1;
   int known_successor_index() const {
@@ -1510,14 +1503,14 @@ class HCompareMap FINAL : public HUnaryControlInstruction {
   Unique<Map> map() const { return map_; }
   bool map_is_stable() const { return MapIsStableField::decode(bit_field_); }
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
   DECLARE_CONCRETE_INSTRUCTION(CompareMap)
 
  protected:
-  virtual int RedefinedOperandIndex() OVERRIDE { return 0; }
+  int RedefinedOperandIndex() OVERRIDE { return 0; }
 
  private:
   HCompareMap(HValue* value, Handle<Map> map, HBasicBlock* true_target = NULL,
@@ -1549,14 +1542,14 @@ class HContext FINAL : public HTemplateInstruction<0> {
     return new(zone) HContext();
   }
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::None();
   }
 
   DECLARE_CONCRETE_INSTRUCTION(Context)
 
  protected:
-  virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
+  bool DataEquals(HValue* other) OVERRIDE { return true; }
 
  private:
   HContext() {
@@ -1564,7 +1557,7 @@ class HContext FINAL : public HTemplateInstruction<0> {
     SetFlag(kUseGVN);
   }
 
-  virtual bool IsDeletable() const OVERRIDE { return true; }
+  bool IsDeletable() const OVERRIDE { return true; }
 };
 
 
@@ -1573,13 +1566,13 @@ class HReturn FINAL : public HTemplateControlInstruction<0, 3> {
   DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HReturn, HValue*, HValue*);
   DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P1(HReturn, HValue*);
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     // TODO(titzer): require an Int32 input for faster returns.
     if (index == 2) return Representation::Smi();
     return Representation::Tagged();
   }
 
-  virtual std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
+  std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
 
   HValue* value() const { return OperandAt(0); }
   HValue* context() const { return OperandAt(1); }
@@ -1600,7 +1593,7 @@ class HAbnormalExit FINAL : public HTemplateControlInstruction<0, 0> {
  public:
   DECLARE_INSTRUCTION_FACTORY_P0(HAbnormalExit);
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::None();
   }
 
@@ -1622,7 +1615,7 @@ class HUnaryOperation : public HTemplateInstruction<1> {
   }
 
   HValue* value() const { return OperandAt(0); }
-  virtual std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
+  std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
 };
 
 
@@ -1630,7 +1623,7 @@ class HUseConst FINAL : public HUnaryOperation {
  public:
   DECLARE_INSTRUCTION_FACTORY_P1(HUseConst, HValue*);
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::None();
   }
 
@@ -1648,11 +1641,16 @@ class HForceRepresentation FINAL : public HTemplateInstruction<1> {
 
   HValue* value() const { return OperandAt(0); }
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation observed_input_representation(int index) OVERRIDE {
+    // We haven't actually *observed* this, but it's closer to the truth
+    // than 'None'.
+    return representation();  // Same as the output representation.
+  }
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return representation();  // Same as the output representation.
   }
 
-  virtual std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
+  std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
 
   DECLARE_CONCRETE_INSTRUCTION(ForceRepresentation)
 
@@ -1694,29 +1692,29 @@ class HChange FINAL : public HUnaryOperation {
     return CheckUsesForFlag(kAllowUndefinedAsNaN);
   }
 
-  virtual HType CalculateInferredType() OVERRIDE;
-  virtual HValue* Canonicalize() OVERRIDE;
+  HType CalculateInferredType() OVERRIDE;
+  HValue* Canonicalize() OVERRIDE;
 
   Representation from() const { return value()->representation(); }
   Representation to() const { return representation(); }
   bool deoptimize_on_minus_zero() const {
     return CheckFlag(kBailoutOnMinusZero);
   }
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return from();
   }
 
-  virtual Range* InferRange(Zone* zone) OVERRIDE;
+  Range* InferRange(Zone* zone) OVERRIDE;
 
-  virtual std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
+  std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
 
   DECLARE_CONCRETE_INSTRUCTION(Change)
 
  protected:
-  virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
+  bool DataEquals(HValue* other) OVERRIDE { return true; }
 
  private:
-  virtual bool IsDeletable() const OVERRIDE {
+  bool IsDeletable() const OVERRIDE {
     return !from().IsTagged() || value()->type().IsSmi();
   }
 };
@@ -1726,14 +1724,14 @@ class HClampToUint8 FINAL : public HUnaryOperation {
  public:
   DECLARE_INSTRUCTION_FACTORY_P1(HClampToUint8, HValue*);
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::None();
   }
 
   DECLARE_CONCRETE_INSTRUCTION(ClampToUint8)
 
  protected:
-  virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
+  bool DataEquals(HValue* other) OVERRIDE { return true; }
 
  private:
   explicit HClampToUint8(HValue* value)
@@ -1743,7 +1741,7 @@ class HClampToUint8 FINAL : public HUnaryOperation {
     SetFlag(kUseGVN);
   }
 
-  virtual bool IsDeletable() const OVERRIDE { return true; }
+  bool IsDeletable() const OVERRIDE { return true; }
 };
 
 
@@ -1752,7 +1750,7 @@ class HDoubleBits FINAL : public HUnaryOperation {
   enum Bits { HIGH, LOW };
   DECLARE_INSTRUCTION_FACTORY_P2(HDoubleBits, HValue*, Bits);
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Double();
   }
 
@@ -1761,7 +1759,7 @@ class HDoubleBits FINAL : public HUnaryOperation {
   Bits bits() { return bits_; }
 
  protected:
-  virtual bool DataEquals(HValue* other) OVERRIDE {
+  bool DataEquals(HValue* other) OVERRIDE {
     return other->IsDoubleBits() && HDoubleBits::cast(other)->bits() == bits();
   }
 
@@ -1772,7 +1770,7 @@ class HDoubleBits FINAL : public HUnaryOperation {
     SetFlag(kUseGVN);
   }
 
-  virtual bool IsDeletable() const OVERRIDE { return true; }
+  bool IsDeletable() const OVERRIDE { return true; }
 
   Bits bits_;
 };
@@ -1782,7 +1780,7 @@ class HConstructDouble FINAL : public HTemplateInstruction<2> {
  public:
   DECLARE_INSTRUCTION_FACTORY_P2(HConstructDouble, HValue*, HValue*);
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Integer32();
   }
 
@@ -1792,7 +1790,7 @@ class HConstructDouble FINAL : public HTemplateInstruction<2> {
   HValue* lo() { return OperandAt(1); }
 
  protected:
-  virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
+  bool DataEquals(HValue* other) OVERRIDE { return true; }
 
  private:
   explicit HConstructDouble(HValue* hi, HValue* lo) {
@@ -1802,7 +1800,7 @@ class HConstructDouble FINAL : public HTemplateInstruction<2> {
     SetOperandAt(1, lo);
   }
 
-  virtual bool IsDeletable() const OVERRIDE { return true; }
+  bool IsDeletable() const OVERRIDE { return true; }
 };
 
 
@@ -1825,7 +1823,7 @@ class HSimulate FINAL : public HInstruction {
                    DoneWithReplayField::encode(false)) {}
   ~HSimulate() {}
 
-  virtual std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
+  std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
 
   bool HasAstId() const { return !ast_id_.IsNone(); }
   BailoutId ast_id() const { return ast_id_; }
@@ -1855,13 +1853,11 @@ class HSimulate FINAL : public HInstruction {
     }
     return -1;
   }
-  virtual int OperandCount() const OVERRIDE { return values_.length(); }
-  virtual HValue* OperandAt(int index) const OVERRIDE {
-    return values_[index];
-  }
+  int OperandCount() const OVERRIDE { return values_.length(); }
+  HValue* OperandAt(int index) const OVERRIDE { return values_[index]; }
 
-  virtual bool HasEscapingOperandAt(int index) OVERRIDE { return false; }
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  bool HasEscapingOperandAt(int index) OVERRIDE { return false; }
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::None();
   }
 
@@ -1876,13 +1872,13 @@ class HSimulate FINAL : public HInstruction {
   DECLARE_CONCRETE_INSTRUCTION(Simulate)
 
 #ifdef DEBUG
-  virtual void Verify() OVERRIDE;
+  void Verify() OVERRIDE;
   void set_closure(Handle<JSFunction> closure) { closure_ = closure; }
   Handle<JSFunction> closure() const { return closure_; }
 #endif
 
  protected:
-  virtual void InternalSetOperandAt(int index, HValue* value) OVERRIDE {
+  void InternalSetOperandAt(int index, HValue* value) OVERRIDE {
     values_[index] = value;
   }
 
@@ -1938,11 +1934,11 @@ class HEnvironmentMarker FINAL : public HTemplateInstruction<1> {
     next_simulate_ = simulate;
   }
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::None();
   }
 
-  virtual std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
+  std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
 
 #ifdef DEBUG
   void set_closure(Handle<JSFunction> closure) {
@@ -1980,7 +1976,7 @@ class HStackCheck FINAL : public HTemplateInstruction<1> {
 
   HValue* context() { return OperandAt(0); }
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
@@ -2035,7 +2031,7 @@ class HEnterInlined FINAL : public HTemplateInstruction<0> {
   void RegisterReturnTarget(HBasicBlock* return_target, Zone* zone);
   ZoneList<HBasicBlock*>* return_targets() { return &return_targets_; }
 
-  virtual std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
+  std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
 
   Handle<JSFunction> closure() const { return closure_; }
   HConstant* closure_context() const { return closure_context_; }
@@ -2046,7 +2042,7 @@ class HEnterInlined FINAL : public HTemplateInstruction<0> {
   InliningKind inlining_kind() const { return inlining_kind_; }
   BailoutId ReturnId() const { return return_id_; }
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::None();
   }
 
@@ -2092,11 +2088,11 @@ class HLeaveInlined FINAL : public HTemplateInstruction<0> {
       : entry_(entry),
         drop_count_(drop_count) { }
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::None();
   }
 
-  virtual int argument_delta() const OVERRIDE {
+  int argument_delta() const OVERRIDE {
     return entry_->arguments_pushed() ? -drop_count_ : 0;
   }
 
@@ -2143,28 +2139,22 @@ class HPushArguments FINAL : public HInstruction {
     return instr;
   }
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
-  virtual int argument_delta() const OVERRIDE { return inputs_.length(); }
+  int argument_delta() const OVERRIDE { return inputs_.length(); }
   HValue* argument(int i) { return OperandAt(i); }
 
-  virtual int OperandCount() const FINAL OVERRIDE {
-    return inputs_.length();
-  }
-  virtual HValue* OperandAt(int i) const FINAL OVERRIDE {
-    return inputs_[i];
-  }
+  int OperandCount() const FINAL { return inputs_.length(); }
+  HValue* OperandAt(int i) const FINAL { return inputs_[i]; }
 
   void AddInput(HValue* value);
 
   DECLARE_CONCRETE_INSTRUCTION(PushArguments)
 
  protected:
-  virtual void InternalSetOperandAt(int i, HValue* value) FINAL OVERRIDE {
-    inputs_[i] = value;
-  }
+  void InternalSetOperandAt(int i, HValue* value) FINAL { inputs_[i] = value; }
 
  private:
   explicit HPushArguments(Zone* zone)
@@ -2180,14 +2170,14 @@ class HThisFunction FINAL : public HTemplateInstruction<0> {
  public:
   DECLARE_INSTRUCTION_FACTORY_P0(HThisFunction);
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::None();
   }
 
   DECLARE_CONCRETE_INSTRUCTION(ThisFunction)
 
  protected:
-  virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
+  bool DataEquals(HValue* other) OVERRIDE { return true; }
 
  private:
   HThisFunction() {
@@ -2195,7 +2185,7 @@ class HThisFunction FINAL : public HTemplateInstruction<0> {
     SetFlag(kUseGVN);
   }
 
-  virtual bool IsDeletable() const OVERRIDE { return true; }
+  bool IsDeletable() const OVERRIDE { return true; }
 };
 
 
@@ -2211,7 +2201,7 @@ class HDeclareGlobals FINAL : public HUnaryOperation {
 
   DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals)
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
@@ -2240,17 +2230,13 @@ class HCall : public HTemplateInstruction<V> {
     this->SetAllSideEffects();
   }
 
-  virtual HType CalculateInferredType() FINAL OVERRIDE {
-    return HType::Tagged();
-  }
+  HType CalculateInferredType() FINAL { return HType::Tagged(); }
 
   virtual int argument_count() const {
     return argument_count_;
   }
 
-  virtual int argument_delta() const OVERRIDE {
-    return -argument_count();
-  }
+  int argument_delta() const OVERRIDE { return -argument_count(); }
 
  private:
   int argument_count_;
@@ -2264,12 +2250,11 @@ class HUnaryCall : public HCall<1> {
     SetOperandAt(0, value);
   }
 
-  virtual Representation RequiredInputRepresentation(
-      int index) FINAL OVERRIDE {
+  Representation RequiredInputRepresentation(int index) FINAL {
     return Representation::Tagged();
   }
 
-  virtual std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
+  std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
 
   HValue* value() const { return OperandAt(0); }
 };
@@ -2283,10 +2268,9 @@ class HBinaryCall : public HCall<2> {
     SetOperandAt(1, second);
   }
 
-  virtual std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
+  std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
 
-  virtual Representation RequiredInputRepresentation(
-      int index) FINAL OVERRIDE {
+  Representation RequiredInputRepresentation(int index) FINAL {
     return Representation::Tagged();
   }
 
@@ -2305,19 +2289,16 @@ class HCallJSFunction FINAL : public HCall<1> {
 
   HValue* function() const { return OperandAt(0); }
 
-  virtual std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
+  std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
 
-  virtual Representation RequiredInputRepresentation(
-      int index) FINAL OVERRIDE {
+  Representation RequiredInputRepresentation(int index) FINAL {
     DCHECK(index == 0);
     return Representation::Tagged();
   }
 
   bool pass_argument_count() const { return pass_argument_count_; }
 
-  virtual bool HasStackCheck() FINAL OVERRIDE {
-    return has_stack_check_;
-  }
+  bool HasStackCheck() FINAL { return has_stack_check_; }
 
   DECLARE_CONCRETE_INSTRUCTION(CallJSFunction)
 
@@ -2338,27 +2319,26 @@ class HCallJSFunction FINAL : public HCall<1> {
 };
 
 
+enum CallMode { NORMAL_CALL, TAIL_CALL };
+
+
 class HCallWithDescriptor FINAL : public HInstruction {
  public:
   static HCallWithDescriptor* New(Zone* zone, HValue* context, HValue* target,
                                   int argument_count,
                                   CallInterfaceDescriptor descriptor,
-                                  const Vector<HValue*>& operands) {
+                                  const Vector<HValue*>& operands,
+                                  CallMode call_mode = NORMAL_CALL) {
     DCHECK(operands.length() == descriptor.GetEnvironmentLength());
-    HCallWithDescriptor* res = new (zone)
-        HCallWithDescriptor(target, argument_count, descriptor, operands, zone);
+    HCallWithDescriptor* res = new (zone) HCallWithDescriptor(
+        target, argument_count, descriptor, operands, call_mode, zone);
     return res;
   }
 
-  virtual int OperandCount() const FINAL OVERRIDE {
-    return values_.length();
-  }
-  virtual HValue* OperandAt(int index) const FINAL OVERRIDE {
-    return values_[index];
-  }
+  int OperandCount() const FINAL { return values_.length(); }
+  HValue* OperandAt(int index) const FINAL { return values_[index]; }
 
-  virtual Representation RequiredInputRepresentation(
-      int index) FINAL OVERRIDE {
+  Representation RequiredInputRepresentation(int index) FINAL {
     if (index == 0) {
       return Representation::Tagged();
     } else {
@@ -2370,17 +2350,15 @@ class HCallWithDescriptor FINAL : public HInstruction {
 
   DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor)
 
-  virtual HType CalculateInferredType() FINAL OVERRIDE {
-    return HType::Tagged();
-  }
+  HType CalculateInferredType() FINAL { return HType::Tagged(); }
+
+  bool IsTailCall() const { return call_mode_ == TAIL_CALL; }
 
   virtual int argument_count() const {
     return argument_count_;
   }
 
-  virtual int argument_delta() const OVERRIDE {
-    return -argument_count_;
-  }
+  int argument_delta() const OVERRIDE { return -argument_count_; }
 
   CallInterfaceDescriptor descriptor() const { return descriptor_; }
 
@@ -2388,16 +2366,20 @@ class HCallWithDescriptor FINAL : public HInstruction {
     return OperandAt(0);
   }
 
-  virtual std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
+  std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
 
  private:
   // The argument count includes the receiver.
   HCallWithDescriptor(HValue* target, int argument_count,
                       CallInterfaceDescriptor descriptor,
-                      const Vector<HValue*>& operands, Zone* zone)
+                      const Vector<HValue*>& operands, CallMode call_mode,
+                      Zone* zone)
       : descriptor_(descriptor),
-        values_(descriptor.GetEnvironmentLength() + 1, zone) {
-    argument_count_ = argument_count;
+        values_(descriptor.GetEnvironmentLength() + 1, zone),
+        argument_count_(argument_count),
+        call_mode_(call_mode) {
+    // We can only tail call without any stack arguments.
+    DCHECK(call_mode != TAIL_CALL || argument_count == 0);
     AddOperand(target, zone);
     for (int i = 0; i < operands.length(); i++) {
       AddOperand(operands[i], zone);
@@ -2411,14 +2393,14 @@ class HCallWithDescriptor FINAL : public HInstruction {
     SetOperandAt(values_.length() - 1, v);
   }
 
-  void InternalSetOperandAt(int index,
-                            HValue* value) FINAL OVERRIDE {
+  void InternalSetOperandAt(int index, HValue* value) FINAL {
     values_[index] = value;
   }
 
   CallInterfaceDescriptor descriptor_;
   ZoneList<HValue*> values_;
   int argument_count_;
+  CallMode call_mode_;
 };
 
 
@@ -2453,9 +2435,7 @@ class HInvokeFunction FINAL : public HBinaryCall {
   Handle<JSFunction> known_function() { return known_function_; }
   int formal_parameter_count() const { return formal_parameter_count_; }
 
-  virtual bool HasStackCheck() FINAL OVERRIDE {
-    return has_stack_check_;
-  }
+  bool HasStackCheck() FINAL { return has_stack_check_; }
 
   DECLARE_CONCRETE_INSTRUCTION(InvokeFunction)
 
@@ -2483,7 +2463,7 @@ class HCallFunction FINAL : public HBinaryCall {
 
   DECLARE_CONCRETE_INSTRUCTION(CallFunction)
 
-  virtual int argument_delta() const OVERRIDE { return -argument_count(); }
+  int argument_delta() const OVERRIDE { return -argument_count(); }
 
  private:
   HCallFunction(HValue* context,
@@ -2521,7 +2501,7 @@ class HCallNewArray FINAL : public HBinaryCall {
   HValue* context() { return first(); }
   HValue* constructor() { return second(); }
 
-  virtual std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
+  std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
 
   ElementsKind elements_kind() const { return elements_kind_; }
 
@@ -2544,7 +2524,7 @@ class HCallRuntime FINAL : public HCall<1> {
                                               const Runtime::Function*,
                                               int);
 
-  virtual std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
+  std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
 
   HValue* context() { return OperandAt(0); }
   const Runtime::Function* function() const { return c_function_; }
@@ -2554,7 +2534,7 @@ class HCallRuntime FINAL : public HCall<1> {
     save_doubles_ = save_doubles;
   }
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
@@ -2580,14 +2560,14 @@ class HMapEnumLength FINAL : public HUnaryOperation {
  public:
   DECLARE_INSTRUCTION_FACTORY_P1(HMapEnumLength, HValue*);
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
   DECLARE_CONCRETE_INSTRUCTION(MapEnumLength)
 
  protected:
-  virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
+  bool DataEquals(HValue* other) OVERRIDE { return true; }
 
  private:
   explicit HMapEnumLength(HValue* value)
@@ -2597,7 +2577,7 @@ class HMapEnumLength FINAL : public HUnaryOperation {
     SetDependsOnFlag(kMaps);
   }
 
-  virtual bool IsDeletable() const OVERRIDE { return true; }
+  bool IsDeletable() const OVERRIDE { return true; }
 };
 
 
@@ -2611,9 +2591,9 @@ class HUnaryMathOperation FINAL : public HTemplateInstruction<2> {
   HValue* context() const { return OperandAt(0); }
   HValue* value() const { return OperandAt(1); }
 
-  virtual std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
+  std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     if (index == 0) {
       return Representation::Tagged();
     } else {
@@ -2637,11 +2617,11 @@ class HUnaryMathOperation FINAL : public HTemplateInstruction<2> {
     }
   }
 
-  virtual Range* InferRange(Zone* zone) OVERRIDE;
+  Range* InferRange(Zone* zone) OVERRIDE;
 
-  virtual HValue* Canonicalize() OVERRIDE;
-  virtual Representation RepresentationFromUses() OVERRIDE;
-  virtual Representation RepresentationFromInputs() OVERRIDE;
+  HValue* Canonicalize() OVERRIDE;
+  Representation RepresentationFromUses() OVERRIDE;
+  Representation RepresentationFromInputs() OVERRIDE;
 
   BuiltinFunctionId op() const { return op_; }
   const char* OpName() const;
@@ -2649,7 +2629,7 @@ class HUnaryMathOperation FINAL : public HTemplateInstruction<2> {
   DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation)
 
  protected:
-  virtual bool DataEquals(HValue* other) OVERRIDE {
+  bool DataEquals(HValue* other) OVERRIDE {
     HUnaryMathOperation* b = HUnaryMathOperation::cast(other);
     return op_ == b->op();
   }
@@ -2701,7 +2681,7 @@ class HUnaryMathOperation FINAL : public HTemplateInstruction<2> {
     SetFlag(kAllowUndefinedAsNaN);
   }
 
-  virtual bool IsDeletable() const OVERRIDE { return true; }
+  bool IsDeletable() const OVERRIDE { return true; }
 
   HValue* SimplifiedDividendForMathFloorOfDiv(HDiv* hdiv);
   HValue* SimplifiedDivisorForMathFloorOfDiv(HDiv* hdiv);
@@ -2715,7 +2695,7 @@ class HLoadRoot FINAL : public HTemplateInstruction<0> {
   DECLARE_INSTRUCTION_FACTORY_P1(HLoadRoot, Heap::RootListIndex);
   DECLARE_INSTRUCTION_FACTORY_P2(HLoadRoot, Heap::RootListIndex, HType);
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::None();
   }
 
@@ -2724,7 +2704,7 @@ class HLoadRoot FINAL : public HTemplateInstruction<0> {
   DECLARE_CONCRETE_INSTRUCTION(LoadRoot)
 
  protected:
-  virtual bool DataEquals(HValue* other) OVERRIDE {
+  bool DataEquals(HValue* other) OVERRIDE {
     HLoadRoot* b = HLoadRoot::cast(other);
     return index_ == b->index_;
   }
@@ -2739,7 +2719,7 @@ class HLoadRoot FINAL : public HTemplateInstruction<0> {
     set_representation(Representation::Tagged());
   }
 
-  virtual bool IsDeletable() const OVERRIDE { return true; }
+  bool IsDeletable() const OVERRIDE { return true; }
 
   const Heap::RootListIndex index_;
 };
@@ -2774,17 +2754,17 @@ class HCheckMaps FINAL : public HTemplateInstruction<2> {
     ClearDependsOnFlag(kMaps);
   }
 
-  virtual bool HasEscapingOperandAt(int index) OVERRIDE { return false; }
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  bool HasEscapingOperandAt(int index) OVERRIDE { return false; }
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
-  virtual HType CalculateInferredType() OVERRIDE {
+  HType CalculateInferredType() OVERRIDE {
     if (value()->type().IsHeapObject()) return value()->type();
     return HType::HeapObject();
   }
 
-  virtual std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
+  std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
 
   HValue* value() const { return OperandAt(0); }
   HValue* typecheck() const { return OperandAt(1); }
@@ -2800,7 +2780,7 @@ class HCheckMaps FINAL : public HTemplateInstruction<2> {
     return HasMigrationTargetField::decode(bit_field_);
   }
 
-  virtual HValue* Canonicalize() OVERRIDE;
+  HValue* Canonicalize() OVERRIDE;
 
   static HCheckMaps* CreateAndInsertAfter(Zone* zone,
                                           HValue* value,
@@ -2822,11 +2802,11 @@ class HCheckMaps FINAL : public HTemplateInstruction<2> {
   DECLARE_CONCRETE_INSTRUCTION(CheckMaps)
 
  protected:
-  virtual bool DataEquals(HValue* other) OVERRIDE {
+  bool DataEquals(HValue* other) OVERRIDE {
     return this->maps()->Equals(HCheckMaps::cast(other)->maps());
   }
 
-  virtual int RedefinedOperandIndex() OVERRIDE { return 0; }
+  int RedefinedOperandIndex() OVERRIDE { return 0; }
 
  private:
   HCheckMaps(HValue* value, const UniqueSet<Map>* maps, bool maps_are_stable)
@@ -2899,19 +2879,19 @@ class HCheckValue FINAL : public HUnaryOperation {
     return new(zone) HCheckValue(value, target, object_in_new_space);
   }
 
-  virtual void FinalizeUniqueness() OVERRIDE {
+  void FinalizeUniqueness() OVERRIDE {
     object_ = Unique<HeapObject>(object_.handle());
   }
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
-  virtual std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
+  std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
 
-  virtual HValue* Canonicalize() OVERRIDE;
+  HValue* Canonicalize() OVERRIDE;
 
 #ifdef DEBUG
-  virtual void Verify() OVERRIDE;
+  void Verify() OVERRIDE;
 #endif
 
   Unique<HeapObject> object() const { return object_; }
@@ -2920,7 +2900,7 @@ class HCheckValue FINAL : public HUnaryOperation {
   DECLARE_CONCRETE_INSTRUCTION(CheckValue)
 
  protected:
-  virtual bool DataEquals(HValue* other) OVERRIDE {
+  bool DataEquals(HValue* other) OVERRIDE {
     HCheckValue* b = HCheckValue::cast(other);
     return object_ == b->object_;
   }
@@ -2952,13 +2932,13 @@ class HCheckInstanceType FINAL : public HUnaryOperation {
 
   DECLARE_INSTRUCTION_FACTORY_P2(HCheckInstanceType, HValue*, Check);
 
-  virtual std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
+  std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
-  virtual HType CalculateInferredType() OVERRIDE {
+  HType CalculateInferredType() OVERRIDE {
     switch (check_) {
       case IS_SPEC_OBJECT: return HType::JSObject();
       case IS_JS_ARRAY: return HType::JSArray();
@@ -2969,7 +2949,7 @@ class HCheckInstanceType FINAL : public HUnaryOperation {
     return HType::Tagged();
   }
 
-  virtual HValue* Canonicalize() OVERRIDE;
+  HValue* Canonicalize() OVERRIDE;
 
   bool is_interval_check() const { return check_ <= LAST_INTERVAL_CHECK; }
   void GetCheckInterval(InstanceType* first, InstanceType* last);
@@ -2983,12 +2963,12 @@ class HCheckInstanceType FINAL : public HUnaryOperation {
   // TODO(ager): It could be nice to allow the ommision of instance
   // type checks if we have already performed an instance type check
   // with a larger range.
-  virtual bool DataEquals(HValue* other) OVERRIDE {
+  bool DataEquals(HValue* other) OVERRIDE {
     HCheckInstanceType* b = HCheckInstanceType::cast(other);
     return check_ == b->check_;
   }
 
-  virtual int RedefinedOperandIndex() OVERRIDE { return 0; }
+  int RedefinedOperandIndex() OVERRIDE { return 0; }
 
  private:
   const char* GetCheckName() const;
@@ -3007,11 +2987,11 @@ class HCheckSmi FINAL : public HUnaryOperation {
  public:
   DECLARE_INSTRUCTION_FACTORY_P1(HCheckSmi, HValue*);
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
-  virtual HValue* Canonicalize() OVERRIDE {
+  HValue* Canonicalize() OVERRIDE {
     HType value_type = value()->type();
     if (value_type.IsSmi()) {
       return NULL;
@@ -3022,7 +3002,7 @@ class HCheckSmi FINAL : public HUnaryOperation {
   DECLARE_CONCRETE_INSTRUCTION(CheckSmi)
 
  protected:
-  virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
+  bool DataEquals(HValue* other) OVERRIDE { return true; }
 
  private:
   explicit HCheckSmi(HValue* value) : HUnaryOperation(value, HType::Smi()) {
@@ -3036,28 +3016,28 @@ class HCheckHeapObject FINAL : public HUnaryOperation {
  public:
   DECLARE_INSTRUCTION_FACTORY_P1(HCheckHeapObject, HValue*);
 
-  virtual bool HasEscapingOperandAt(int index) OVERRIDE { return false; }
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  bool HasEscapingOperandAt(int index) OVERRIDE { return false; }
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
-  virtual HType CalculateInferredType() OVERRIDE {
+  HType CalculateInferredType() OVERRIDE {
     if (value()->type().IsHeapObject()) return value()->type();
     return HType::HeapObject();
   }
 
 #ifdef DEBUG
-  virtual void Verify() OVERRIDE;
+  void Verify() OVERRIDE;
 #endif
 
-  virtual HValue* Canonicalize() OVERRIDE {
+  HValue* Canonicalize() OVERRIDE {
     return value()->type().IsHeapObject() ? NULL : this;
   }
 
   DECLARE_CONCRETE_INSTRUCTION(CheckHeapObject)
 
  protected:
-  virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
+  bool DataEquals(HValue* other) OVERRIDE { return true; }
 
  private:
   explicit HCheckHeapObject(HValue* value) : HUnaryOperation(value) {
@@ -3303,22 +3283,20 @@ class HPhi FINAL : public HValue {
     SetFlag(kAllowUndefinedAsNaN);
   }
 
-  virtual Representation RepresentationFromInputs() OVERRIDE;
+  Representation RepresentationFromInputs() OVERRIDE;
 
-  virtual Range* InferRange(Zone* zone) OVERRIDE;
+  Range* InferRange(Zone* zone) OVERRIDE;
   virtual void InferRepresentation(
       HInferRepresentationPhase* h_infer) OVERRIDE;
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return representation();
   }
-  virtual Representation KnownOptimalRepresentation() OVERRIDE {
+  Representation KnownOptimalRepresentation() OVERRIDE {
     return representation();
   }
-  virtual HType CalculateInferredType() OVERRIDE;
-  virtual int OperandCount() const OVERRIDE { return inputs_.length(); }
-  virtual HValue* OperandAt(int index) const OVERRIDE {
-    return inputs_[index];
-  }
+  HType CalculateInferredType() OVERRIDE;
+  int OperandCount() const OVERRIDE { return inputs_.length(); }
+  HValue* OperandAt(int index) const OVERRIDE { return inputs_[index]; }
   HValue* GetRedundantReplacement();
   void AddInput(HValue* value);
   bool HasRealUses();
@@ -3326,7 +3304,7 @@ class HPhi FINAL : public HValue {
   bool IsReceiver() const { return merged_index_ == 0; }
   bool HasMergedIndex() const { return merged_index_ != kInvalidMergedIndex; }
 
-  virtual HSourcePosition position() const OVERRIDE;
+  HSourcePosition position() const OVERRIDE;
 
   int merged_index() const { return merged_index_; }
 
@@ -3345,10 +3323,10 @@ class HPhi FINAL : public HValue {
     induction_variable_data_ = InductionVariableData::ExaminePhi(this);
   }
 
-  virtual std::ostream& PrintTo(std::ostream& os) const OVERRIDE;  // NOLINT
+  std::ostream& PrintTo(std::ostream& os) const OVERRIDE;  // NOLINT
 
 #ifdef DEBUG
-  virtual void Verify() OVERRIDE;
+  void Verify() OVERRIDE;
 #endif
 
   void InitRealUses(int id);
@@ -3385,7 +3363,7 @@ class HPhi FINAL : public HValue {
     DCHECK(value->IsPhi());
     return reinterpret_cast<HPhi*>(value);
   }
-  virtual Opcode opcode() const OVERRIDE { return HValue::kPhi; }
+  Opcode opcode() const OVERRIDE { return HValue::kPhi; }
 
   void SimplifyConstantInputs();
 
@@ -3393,8 +3371,8 @@ class HPhi FINAL : public HValue {
   static const int kInvalidMergedIndex = -1;
 
  protected:
-  virtual void DeleteFromGraph() OVERRIDE;
-  virtual void InternalSetOperandAt(int index, HValue* value) OVERRIDE {
+  void DeleteFromGraph() OVERRIDE;
+  void InternalSetOperandAt(int index, HValue* value) OVERRIDE {
     inputs_[index] = value;
   }
 
@@ -3408,7 +3386,7 @@ class HPhi FINAL : public HValue {
   InductionVariableData* induction_variable_data_;
 
   // TODO(titzer): we can't eliminate the receiver for generating backtraces
-  virtual bool IsDeletable() const OVERRIDE { return !IsReceiver(); }
+  bool IsDeletable() const OVERRIDE { return !IsReceiver(); }
 };
 
 
@@ -3417,24 +3395,16 @@ class HDematerializedObject : public HInstruction {
  public:
   HDematerializedObject(int count, Zone* zone) : values_(count, zone) {}
 
-  virtual int OperandCount() const FINAL OVERRIDE {
-    return values_.length();
-  }
-  virtual HValue* OperandAt(int index) const FINAL OVERRIDE {
-    return values_[index];
-  }
+  int OperandCount() const FINAL { return values_.length(); }
+  HValue* OperandAt(int index) const FINAL { return values_[index]; }
 
-  virtual bool HasEscapingOperandAt(int index) FINAL OVERRIDE {
-    return false;
-  }
-  virtual Representation RequiredInputRepresentation(
-      int index) FINAL OVERRIDE {
+  bool HasEscapingOperandAt(int index) FINAL { return false; }
+  Representation RequiredInputRepresentation(int index) FINAL {
     return Representation::None();
   }
 
  protected:
-  virtual void InternalSetOperandAt(int index,
-                                    HValue* value) FINAL OVERRIDE {
+  void InternalSetOperandAt(int index, HValue* value) FINAL {
     values_[index] = value;
   }
 
@@ -3497,7 +3467,7 @@ class HCapturedObject FINAL : public HDematerializedObject {
   // Replay effects of this instruction on the given environment.
   void ReplayEnvironment(HEnvironment* env);
 
-  virtual std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
+  std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
 
   DECLARE_CONCRETE_INSTRUCTION(CapturedObject)
 
@@ -3507,7 +3477,7 @@ class HCapturedObject FINAL : public HDematerializedObject {
   // Note that we cannot DCE captured objects as they are used to replay
   // the environment. This method is here as an explicit reminder.
   // TODO(mstarzinger): Turn HSimulates into full snapshots maybe?
-  virtual bool IsDeletable() const FINAL OVERRIDE { return false; }
+  bool IsDeletable() const FINAL { return false; }
 };
 
 
@@ -3528,7 +3498,7 @@ class HConstant FINAL : public HTemplateInstruction<0> {
         zone, context, value, representation));
   }
 
-  virtual Handle<Map> GetMonomorphicJSObjectMap() OVERRIDE {
+  Handle<Map> GetMonomorphicJSObjectMap() OVERRIDE {
     Handle<Object> object = object_.handle();
     if (!object.is_null() && object->IsHeapObject()) {
       return v8::internal::handle(HeapObject::cast(*object)->map());
@@ -3595,11 +3565,11 @@ class HConstant FINAL : public HTemplateInstruction<0> {
     return instance_type == CELL_TYPE || instance_type == PROPERTY_CELL_TYPE;
   }
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::None();
   }
 
-  virtual Representation KnownOptimalRepresentation() OVERRIDE {
+  Representation KnownOptimalRepresentation() OVERRIDE {
     if (HasSmiValue() && SmiValuesAre31Bits()) return Representation::Smi();
     if (HasInteger32Value()) return Representation::Integer32();
     if (HasNumberValue()) return Representation::Double();
@@ -3607,8 +3577,8 @@ class HConstant FINAL : public HTemplateInstruction<0> {
     return Representation::Tagged();
   }
 
-  virtual bool EmitAtUses() OVERRIDE;
-  virtual std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
+  bool EmitAtUses() OVERRIDE;
+  std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
   HConstant* CopyToRepresentation(Representation r, Zone* zone) const;
   Maybe<HConstant*> CopyToTruncatedInt32(Zone* zone);
   Maybe<HConstant*> CopyToTruncatedNumber(Zone* zone);
@@ -3687,7 +3657,7 @@ class HConstant FINAL : public HTemplateInstruction<0> {
     return object_map_;
   }
 
-  virtual intptr_t Hashcode() OVERRIDE {
+  intptr_t Hashcode() OVERRIDE {
     if (HasInteger32Value()) {
       return static_cast<intptr_t>(int32_value_);
     } else if (HasDoubleValue()) {
@@ -3700,7 +3670,7 @@ class HConstant FINAL : public HTemplateInstruction<0> {
     }
   }
 
-  virtual void FinalizeUniqueness() OVERRIDE {
+  void FinalizeUniqueness() OVERRIDE {
     if (!HasDoubleValue() && !HasExternalReferenceValue()) {
       DCHECK(!object_.handle().is_null());
       object_ = Unique<Object>(object_.handle());
@@ -3715,7 +3685,7 @@ class HConstant FINAL : public HTemplateInstruction<0> {
     return object_.IsInitialized() && object_ == other;
   }
 
-  virtual bool DataEquals(HValue* other) OVERRIDE {
+  bool DataEquals(HValue* other) OVERRIDE {
     HConstant* other_constant = HConstant::cast(other);
     if (HasInteger32Value()) {
       return other_constant->HasInteger32Value() &&
@@ -3740,13 +3710,13 @@ class HConstant FINAL : public HTemplateInstruction<0> {
   }
 
 #ifdef DEBUG
-  virtual void Verify() OVERRIDE { }
+  void Verify() OVERRIDE {}
 #endif
 
   DECLARE_CONCRETE_INSTRUCTION(Constant)
 
  protected:
-  virtual Range* InferRange(Zone* zone) OVERRIDE;
+  Range* InferRange(Zone* zone) OVERRIDE;
 
  private:
   friend class HGraph;
@@ -3774,7 +3744,7 @@ class HConstant FINAL : public HTemplateInstruction<0> {
 
   void Initialize(Representation r);
 
-  virtual bool IsDeletable() const OVERRIDE { return true; }
+  bool IsDeletable() const OVERRIDE { return true; }
 
   // If object_ is a map, this indicates whether the map is stable.
   class HasStableMapValueField : public BitField<bool, 0, 1> {};
@@ -3862,7 +3832,7 @@ class HBinaryOperation : public HTemplateInstruction<3> {
     observed_output_representation_ = observed;
   }
 
-  virtual Representation observed_input_representation(int index) OVERRIDE {
+  Representation observed_input_representation(int index) OVERRIDE {
     if (index == 0) return Representation::Tagged();
     return observed_input_representation_[index - 1];
   }
@@ -3877,15 +3847,15 @@ class HBinaryOperation : public HTemplateInstruction<3> {
 
   virtual void InferRepresentation(
       HInferRepresentationPhase* h_infer) OVERRIDE;
-  virtual Representation RepresentationFromInputs() OVERRIDE;
+  Representation RepresentationFromInputs() OVERRIDE;
   Representation RepresentationFromOutput();
-  virtual void AssumeRepresentation(Representation r) OVERRIDE;
+  void AssumeRepresentation(Representation r) OVERRIDE;
 
   virtual bool IsCommutative() const { return false; }
 
-  virtual std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
+  std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     if (index == 0) return Representation::Tagged();
     return representation();
   }
@@ -3920,18 +3890,18 @@ class HWrapReceiver FINAL : public HTemplateInstruction<2> {
  public:
   DECLARE_INSTRUCTION_FACTORY_P2(HWrapReceiver, HValue*, HValue*);
 
-  virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
+  bool DataEquals(HValue* other) OVERRIDE { return true; }
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
   HValue* receiver() const { return OperandAt(0); }
   HValue* function() const { return OperandAt(1); }
 
-  virtual HValue* Canonicalize() OVERRIDE;
+  HValue* Canonicalize() OVERRIDE;
 
-  virtual std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
+  std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
   bool known_function() const { return known_function_; }
 
   DECLARE_CONCRETE_INSTRUCTION(WrapReceiver)
@@ -3955,7 +3925,7 @@ class HApplyArguments FINAL : public HTemplateInstruction<4> {
   DECLARE_INSTRUCTION_FACTORY_P4(HApplyArguments, HValue*, HValue*, HValue*,
                                  HValue*);
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     // The length is untagged, all other inputs are tagged.
     return (index == 2)
         ? Representation::Integer32()
@@ -3990,14 +3960,14 @@ class HArgumentsElements FINAL : public HTemplateInstruction<0> {
 
   DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements)
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::None();
   }
 
   bool from_inlined() const { return from_inlined_; }
 
  protected:
-  virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
+  bool DataEquals(HValue* other) OVERRIDE { return true; }
 
  private:
   explicit HArgumentsElements(bool from_inlined) : from_inlined_(from_inlined) {
@@ -4007,7 +3977,7 @@ class HArgumentsElements FINAL : public HTemplateInstruction<0> {
     SetFlag(kUseGVN);
   }
 
-  virtual bool IsDeletable() const OVERRIDE { return true; }
+  bool IsDeletable() const OVERRIDE { return true; }
 
   bool from_inlined_;
 };
@@ -4017,14 +3987,14 @@ class HArgumentsLength FINAL : public HUnaryOperation {
  public:
   DECLARE_INSTRUCTION_FACTORY_P1(HArgumentsLength, HValue*);
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
   DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength)
 
  protected:
-  virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
+  bool DataEquals(HValue* other) OVERRIDE { return true; }
 
  private:
   explicit HArgumentsLength(HValue* value) : HUnaryOperation(value) {
@@ -4032,7 +4002,7 @@ class HArgumentsLength FINAL : public HUnaryOperation {
     SetFlag(kUseGVN);
   }
 
-  virtual bool IsDeletable() const OVERRIDE { return true; }
+  bool IsDeletable() const OVERRIDE { return true; }
 };
 
 
@@ -4040,9 +4010,9 @@ class HAccessArgumentsAt FINAL : public HTemplateInstruction<3> {
  public:
   DECLARE_INSTRUCTION_FACTORY_P3(HAccessArgumentsAt, HValue*, HValue*, HValue*);
 
-  virtual std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
+  std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     // The arguments elements is considered tagged.
     return index == 0
         ? Representation::Tagged()
@@ -4064,7 +4034,7 @@ class HAccessArgumentsAt FINAL : public HTemplateInstruction<3> {
     SetOperandAt(2, index);
   }
 
-  virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
+  bool DataEquals(HValue* other) OVERRIDE { return true; }
 };
 
 
@@ -4100,11 +4070,11 @@ class HBoundsCheck FINAL : public HTemplateInstruction<2> {
     }
   }
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return representation();
   }
 
-  virtual std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
+  std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
   virtual void InferRepresentation(
       HInferRepresentationPhase* h_infer) OVERRIDE;
 
@@ -4113,19 +4083,17 @@ class HBoundsCheck FINAL : public HTemplateInstruction<2> {
   bool allow_equality() const { return allow_equality_; }
   void set_allow_equality(bool v) { allow_equality_ = v; }
 
-  virtual int RedefinedOperandIndex() OVERRIDE { return 0; }
-  virtual bool IsPurelyInformativeDefinition() OVERRIDE {
-    return skip_check();
-  }
+  int RedefinedOperandIndex() OVERRIDE { return 0; }
+  bool IsPurelyInformativeDefinition() OVERRIDE { return skip_check(); }
 
   DECLARE_CONCRETE_INSTRUCTION(BoundsCheck)
 
  protected:
   friend class HBoundsCheckBaseIndexInformation;
 
-  virtual Range* InferRange(Zone* zone) OVERRIDE;
+  Range* InferRange(Zone* zone) OVERRIDE;
 
-  virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
+  bool DataEquals(HValue* other) OVERRIDE { return true; }
   bool skip_check_;
   HValue* base_;
   int offset_;
@@ -4147,9 +4115,7 @@ class HBoundsCheck FINAL : public HTemplateInstruction<2> {
     SetFlag(kUseGVN);
   }
 
-  virtual bool IsDeletable() const OVERRIDE {
-    return skip_check() && !FLAG_debug_code;
-  }
+  bool IsDeletable() const OVERRIDE { return skip_check() && !FLAG_debug_code; }
 };
 
 
@@ -4171,14 +4137,14 @@ class HBoundsCheckBaseIndexInformation FINAL
 
   DECLARE_CONCRETE_INSTRUCTION(BoundsCheckBaseIndexInformation)
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return representation();
   }
 
-  virtual std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
+  std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
 
-  virtual int RedefinedOperandIndex() OVERRIDE { return 0; }
-  virtual bool IsPurelyInformativeDefinition() OVERRIDE { return true; }
+  int RedefinedOperandIndex() OVERRIDE { return 0; }
+  bool IsPurelyInformativeDefinition() OVERRIDE { return true; }
 };
 
 
@@ -4193,7 +4159,7 @@ class HBitwiseBinaryOperation : public HBinaryOperation {
     SetAllSideEffects();
   }
 
-  virtual void RepresentationChanged(Representation to) OVERRIDE {
+  void RepresentationChanged(Representation to) OVERRIDE {
     if (to.IsTagged() &&
         (left()->ToNumberCanBeObserved() || right()->ToNumberCanBeObserved())) {
       SetAllSideEffects();
@@ -4213,7 +4179,7 @@ class HBitwiseBinaryOperation : public HBinaryOperation {
     HBinaryOperation::UpdateRepresentation(new_rep, h_infer, reason);
   }
 
-  virtual Representation observed_input_representation(int index) OVERRIDE {
+  Representation observed_input_representation(int index) OVERRIDE {
     Representation r = HBinaryOperation::observed_input_representation(index);
     if (r.IsDouble()) return Representation::Integer32();
     return r;
@@ -4228,7 +4194,7 @@ class HBitwiseBinaryOperation : public HBinaryOperation {
   DECLARE_ABSTRACT_INSTRUCTION(BitwiseBinaryOperation)
 
  private:
-  virtual bool IsDeletable() const OVERRIDE { return true; }
+  bool IsDeletable() const OVERRIDE { return true; }
 };
 
 
@@ -4241,7 +4207,7 @@ class HMathFloorOfDiv FINAL : public HBinaryOperation {
   DECLARE_CONCRETE_INSTRUCTION(MathFloorOfDiv)
 
  protected:
-  virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
+  bool DataEquals(HValue* other) OVERRIDE { return true; }
 
  private:
   HMathFloorOfDiv(HValue* context, HValue* left, HValue* right)
@@ -4256,9 +4222,9 @@ class HMathFloorOfDiv FINAL : public HBinaryOperation {
     SetFlag(kAllowUndefinedAsNaN);
   }
 
-  virtual Range* InferRange(Zone* zone) OVERRIDE;
+  Range* InferRange(Zone* zone) OVERRIDE;
 
-  virtual bool IsDeletable() const OVERRIDE { return true; }
+  bool IsDeletable() const OVERRIDE { return true; }
 };
 
 
@@ -4271,7 +4237,7 @@ class HArithmeticBinaryOperation : public HBinaryOperation {
     SetFlag(kAllowUndefinedAsNaN);
   }
 
-  virtual void RepresentationChanged(Representation to) OVERRIDE {
+  void RepresentationChanged(Representation to) OVERRIDE {
     if (to.IsTagged() &&
         (left()->ToNumberCanBeObserved() || right()->ToNumberCanBeObserved())) {
       SetAllSideEffects();
@@ -4286,7 +4252,7 @@ class HArithmeticBinaryOperation : public HBinaryOperation {
   DECLARE_ABSTRACT_INSTRUCTION(ArithmeticBinaryOperation)
 
  private:
-  virtual bool IsDeletable() const OVERRIDE { return true; }
+  bool IsDeletable() const OVERRIDE { return true; }
 };
 
 
@@ -4295,14 +4261,14 @@ class HCompareGeneric FINAL : public HBinaryOperation {
   DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(HCompareGeneric, HValue*,
                                               HValue*, Token::Value);
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return index == 0
         ? Representation::Tagged()
         : representation();
   }
 
   Token::Value token() const { return token_; }
-  virtual std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
+  std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
 
   DECLARE_CONCRETE_INSTRUCTION(CompareGeneric)
 
@@ -4343,16 +4309,16 @@ class HCompareNumericAndBranch : public HTemplateControlInstruction<2, 2> {
   virtual void InferRepresentation(
       HInferRepresentationPhase* h_infer) OVERRIDE;
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return representation();
   }
-  virtual Representation observed_input_representation(int index) OVERRIDE {
+  Representation observed_input_representation(int index) OVERRIDE {
     return observed_input_representation_[index];
   }
 
-  virtual bool KnownSuccessorBlock(HBasicBlock** block) OVERRIDE;
+  bool KnownSuccessorBlock(HBasicBlock** block) OVERRIDE;
 
-  virtual std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
+  std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
 
   void SetOperandPositions(Zone* zone,
                            HSourcePosition left_pos,
@@ -4392,7 +4358,7 @@ class HCompareHoleAndBranch FINAL : public HUnaryControlInstruction {
   virtual void InferRepresentation(
       HInferRepresentationPhase* h_infer) OVERRIDE;
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return representation();
   }
 
@@ -4416,11 +4382,11 @@ class HCompareMinusZeroAndBranch FINAL : public HUnaryControlInstruction {
   virtual void InferRepresentation(
       HInferRepresentationPhase* h_infer) OVERRIDE;
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return representation();
   }
 
-  virtual bool KnownSuccessorBlock(HBasicBlock** block) OVERRIDE;
+  bool KnownSuccessorBlock(HBasicBlock** block) OVERRIDE;
 
   DECLARE_CONCRETE_INSTRUCTION(CompareMinusZeroAndBranch)
 
@@ -4437,7 +4403,7 @@ class HCompareObjectEqAndBranch : public HTemplateControlInstruction<2, 2> {
   DECLARE_INSTRUCTION_FACTORY_P4(HCompareObjectEqAndBranch, HValue*, HValue*,
                                  HBasicBlock*, HBasicBlock*);
 
-  virtual bool KnownSuccessorBlock(HBasicBlock** block) OVERRIDE;
+  bool KnownSuccessorBlock(HBasicBlock** block) OVERRIDE;
 
   static const int kNoKnownSuccessorIndex = -1;
   int known_successor_index() const { return known_successor_index_; }
@@ -4448,13 +4414,13 @@ class HCompareObjectEqAndBranch : public HTemplateControlInstruction<2, 2> {
   HValue* left() const { return OperandAt(0); }
   HValue* right() const { return OperandAt(1); }
 
-  virtual std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
+  std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
-  virtual Representation observed_input_representation(int index) OVERRIDE {
+  Representation observed_input_representation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
@@ -4482,11 +4448,11 @@ class HIsObjectAndBranch FINAL : public HUnaryControlInstruction {
   DECLARE_INSTRUCTION_FACTORY_P3(HIsObjectAndBranch, HValue*,
                                  HBasicBlock*, HBasicBlock*);
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
-  virtual bool KnownSuccessorBlock(HBasicBlock** block) OVERRIDE;
+  bool KnownSuccessorBlock(HBasicBlock** block) OVERRIDE;
 
   DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch)
 
@@ -4504,11 +4470,11 @@ class HIsStringAndBranch FINAL : public HUnaryControlInstruction {
   DECLARE_INSTRUCTION_FACTORY_P3(HIsStringAndBranch, HValue*,
                                  HBasicBlock*, HBasicBlock*);
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
-  virtual bool KnownSuccessorBlock(HBasicBlock** block) OVERRIDE;
+  bool KnownSuccessorBlock(HBasicBlock** block) OVERRIDE;
 
   static const int kNoKnownSuccessorIndex = -1;
   int known_successor_index() const { return known_successor_index_; }
@@ -4519,7 +4485,7 @@ class HIsStringAndBranch FINAL : public HUnaryControlInstruction {
   DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch)
 
  protected:
-  virtual int RedefinedOperandIndex() OVERRIDE { return 0; }
+  int RedefinedOperandIndex() OVERRIDE { return 0; }
 
  private:
   HIsStringAndBranch(HValue* value, HBasicBlock* true_target = NULL,
@@ -4541,13 +4507,13 @@ class HIsSmiAndBranch FINAL : public HUnaryControlInstruction {
 
   DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch)
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
  protected:
-  virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
-  virtual int RedefinedOperandIndex() OVERRIDE { return 0; }
+  bool DataEquals(HValue* other) OVERRIDE { return true; }
+  int RedefinedOperandIndex() OVERRIDE { return 0; }
 
  private:
   HIsSmiAndBranch(HValue* value,
@@ -4565,11 +4531,11 @@ class HIsUndetectableAndBranch FINAL : public HUnaryControlInstruction {
   DECLARE_INSTRUCTION_FACTORY_P3(HIsUndetectableAndBranch, HValue*,
                                  HBasicBlock*, HBasicBlock*);
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
-  virtual bool KnownSuccessorBlock(HBasicBlock** block) OVERRIDE;
+  bool KnownSuccessorBlock(HBasicBlock** block) OVERRIDE;
 
   DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch)
 
@@ -4593,9 +4559,9 @@ class HStringCompareAndBranch : public HTemplateControlInstruction<2, 3> {
   HValue* right() { return OperandAt(2); }
   Token::Value token() const { return token_; }
 
-  virtual std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
+  std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
@@ -4627,7 +4593,7 @@ class HIsConstructCallAndBranch : public HTemplateControlInstruction<2, 0> {
  public:
   DECLARE_INSTRUCTION_FACTORY_P0(HIsConstructCallAndBranch);
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::None();
   }
 
@@ -4647,13 +4613,13 @@ class HHasInstanceTypeAndBranch FINAL : public HUnaryControlInstruction {
   InstanceType from() { return from_; }
   InstanceType to() { return to_; }
 
-  virtual std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
+  std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
-  virtual bool KnownSuccessorBlock(HBasicBlock** block) OVERRIDE;
+  bool KnownSuccessorBlock(HBasicBlock** block) OVERRIDE;
 
   DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch)
 
@@ -4674,7 +4640,7 @@ class HHasCachedArrayIndexAndBranch FINAL : public HUnaryControlInstruction {
  public:
   DECLARE_INSTRUCTION_FACTORY_P1(HHasCachedArrayIndexAndBranch, HValue*);
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
@@ -4689,14 +4655,14 @@ class HGetCachedArrayIndex FINAL : public HUnaryOperation {
  public:
   DECLARE_INSTRUCTION_FACTORY_P1(HGetCachedArrayIndex, HValue*);
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
   DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex)
 
  protected:
-  virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
+  bool DataEquals(HValue* other) OVERRIDE { return true; }
 
  private:
   explicit HGetCachedArrayIndex(HValue* value) : HUnaryOperation(value) {
@@ -4704,7 +4670,7 @@ class HGetCachedArrayIndex FINAL : public HUnaryOperation {
     SetFlag(kUseGVN);
   }
 
-  virtual bool IsDeletable() const OVERRIDE { return true; }
+  bool IsDeletable() const OVERRIDE { return true; }
 };
 
 
@@ -4715,11 +4681,11 @@ class HClassOfTestAndBranch FINAL : public HUnaryControlInstruction {
 
   DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch)
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
-  virtual std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
+  std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
 
   Handle<String> class_name() const { return class_name_; }
 
@@ -4737,17 +4703,17 @@ class HTypeofIsAndBranch FINAL : public HUnaryControlInstruction {
   DECLARE_INSTRUCTION_FACTORY_P2(HTypeofIsAndBranch, HValue*, Handle<String>);
 
   Handle<String> type_literal() const { return type_literal_.handle(); }
-  virtual std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
+  std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
 
   DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch)
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::None();
   }
 
-  virtual bool KnownSuccessorBlock(HBasicBlock** block) OVERRIDE;
+  bool KnownSuccessorBlock(HBasicBlock** block) OVERRIDE;
 
-  virtual void FinalizeUniqueness() OVERRIDE {
+  void FinalizeUniqueness() OVERRIDE {
     type_literal_ = Unique<String>(type_literal_.handle());
   }
 
@@ -4764,11 +4730,11 @@ class HInstanceOf FINAL : public HBinaryOperation {
  public:
   DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HInstanceOf, HValue*, HValue*);
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
-  virtual std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
+  std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
 
   DECLARE_CONCRETE_INSTRUCTION(InstanceOf)
 
@@ -4791,7 +4757,7 @@ class HInstanceOfKnownGlobal FINAL : public HTemplateInstruction<2> {
   HValue* left() { return OperandAt(1); }
   Handle<JSFunction> function() { return function_; }
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
@@ -4822,19 +4788,19 @@ class HPower FINAL : public HTemplateInstruction<2> {
   HValue* left() { return OperandAt(0); }
   HValue* right() const { return OperandAt(1); }
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return index == 0
       ? Representation::Double()
       : Representation::None();
   }
-  virtual Representation observed_input_representation(int index) OVERRIDE {
+  Representation observed_input_representation(int index) OVERRIDE {
     return RequiredInputRepresentation(index);
   }
 
   DECLARE_CONCRETE_INSTRUCTION(Power)
 
  protected:
-  virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
+  bool DataEquals(HValue* other) OVERRIDE { return true; }
 
  private:
   HPower(HValue* left, HValue* right) {
@@ -4845,7 +4811,7 @@ class HPower FINAL : public HTemplateInstruction<2> {
     SetChangesFlag(kNewSpacePromotion);
   }
 
-  virtual bool IsDeletable() const OVERRIDE {
+  bool IsDeletable() const OVERRIDE {
     return !right()->representation().IsTagged();
   }
 };
@@ -4861,13 +4827,13 @@ class HAdd FINAL : public HArithmeticBinaryOperation {
   // Add is only commutative if two integer values are added and not if two
   // tagged values are added (because it might be a String concatenation).
   // We also do not commute (pointer + offset).
-  virtual bool IsCommutative() const OVERRIDE {
+  bool IsCommutative() const OVERRIDE {
     return !representation().IsTagged() && !representation().IsExternal();
   }
 
-  virtual HValue* Canonicalize() OVERRIDE;
+  HValue* Canonicalize() OVERRIDE;
 
-  virtual bool TryDecompose(DecompositionResult* decomposition) OVERRIDE {
+  bool TryDecompose(DecompositionResult* decomposition) OVERRIDE {
     if (left()->IsInteger32Constant()) {
       decomposition->Apply(right(), left()->GetInteger32Constant());
       return true;
@@ -4879,7 +4845,7 @@ class HAdd FINAL : public HArithmeticBinaryOperation {
     }
   }
 
-  virtual void RepresentationChanged(Representation to) OVERRIDE {
+  void RepresentationChanged(Representation to) OVERRIDE {
     if (to.IsTagged() &&
         (left()->ToNumberCanBeObserved() || right()->ToNumberCanBeObserved() ||
          left()->ToStringCanBeObserved() || right()->ToStringCanBeObserved())) {
@@ -4895,16 +4861,16 @@ class HAdd FINAL : public HArithmeticBinaryOperation {
     }
   }
 
-  virtual Representation RepresentationFromInputs() OVERRIDE;
+  Representation RepresentationFromInputs() OVERRIDE;
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE;
+  Representation RequiredInputRepresentation(int index) OVERRIDE;
 
   DECLARE_CONCRETE_INSTRUCTION(Add)
 
  protected:
-  virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
+  bool DataEquals(HValue* other) OVERRIDE { return true; }
 
-  virtual Range* InferRange(Zone* zone) OVERRIDE;
+  Range* InferRange(Zone* zone) OVERRIDE;
 
  private:
   HAdd(HValue* context, HValue* left, HValue* right)
@@ -4921,9 +4887,9 @@ class HSub FINAL : public HArithmeticBinaryOperation {
                            HValue* left,
                            HValue* right);
 
-  virtual HValue* Canonicalize() OVERRIDE;
+  HValue* Canonicalize() OVERRIDE;
 
-  virtual bool TryDecompose(DecompositionResult* decomposition) OVERRIDE {
+  bool TryDecompose(DecompositionResult* decomposition) OVERRIDE {
     if (right()->IsInteger32Constant()) {
       decomposition->Apply(left(), -right()->GetInteger32Constant());
       return true;
@@ -4935,9 +4901,9 @@ class HSub FINAL : public HArithmeticBinaryOperation {
   DECLARE_CONCRETE_INSTRUCTION(Sub)
 
  protected:
-  virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
+  bool DataEquals(HValue* other) OVERRIDE { return true; }
 
-  virtual Range* InferRange(Zone* zone) OVERRIDE;
+  Range* InferRange(Zone* zone) OVERRIDE;
 
  private:
   HSub(HValue* context, HValue* left, HValue* right)
@@ -4967,12 +4933,10 @@ class HMul FINAL : public HArithmeticBinaryOperation {
     return mul;
   }
 
-  virtual HValue* Canonicalize() OVERRIDE;
+  HValue* Canonicalize() OVERRIDE;
 
   // Only commutative if it is certain that not two objects are multiplicated.
-  virtual bool IsCommutative() const OVERRIDE {
-    return !representation().IsTagged();
-  }
+  bool IsCommutative() const OVERRIDE { return !representation().IsTagged(); }
 
   virtual void UpdateRepresentation(Representation new_rep,
                                     HInferRepresentationPhase* h_infer,
@@ -4985,9 +4949,9 @@ class HMul FINAL : public HArithmeticBinaryOperation {
   DECLARE_CONCRETE_INSTRUCTION(Mul)
 
  protected:
-  virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
+  bool DataEquals(HValue* other) OVERRIDE { return true; }
 
-  virtual Range* InferRange(Zone* zone) OVERRIDE;
+  Range* InferRange(Zone* zone) OVERRIDE;
 
  private:
   HMul(HValue* context, HValue* left, HValue* right)
@@ -5004,7 +4968,7 @@ class HMod FINAL : public HArithmeticBinaryOperation {
                            HValue* left,
                            HValue* right);
 
-  virtual HValue* Canonicalize() OVERRIDE;
+  HValue* Canonicalize() OVERRIDE;
 
   virtual void UpdateRepresentation(Representation new_rep,
                                     HInferRepresentationPhase* h_infer,
@@ -5016,9 +4980,9 @@ class HMod FINAL : public HArithmeticBinaryOperation {
   DECLARE_CONCRETE_INSTRUCTION(Mod)
 
  protected:
-  virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
+  bool DataEquals(HValue* other) OVERRIDE { return true; }
 
-  virtual Range* InferRange(Zone* zone) OVERRIDE;
+  Range* InferRange(Zone* zone) OVERRIDE;
 
  private:
   HMod(HValue* context,
@@ -5038,7 +5002,7 @@ class HDiv FINAL : public HArithmeticBinaryOperation {
                            HValue* left,
                            HValue* right);
 
-  virtual HValue* Canonicalize() OVERRIDE;
+  HValue* Canonicalize() OVERRIDE;
 
   virtual void UpdateRepresentation(Representation new_rep,
                                     HInferRepresentationPhase* h_infer,
@@ -5050,9 +5014,9 @@ class HDiv FINAL : public HArithmeticBinaryOperation {
   DECLARE_CONCRETE_INSTRUCTION(Div)
 
  protected:
-  virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
+  bool DataEquals(HValue* other) OVERRIDE { return true; }
 
-  virtual Range* InferRange(Zone* zone) OVERRIDE;
+  Range* InferRange(Zone* zone) OVERRIDE;
 
  private:
   HDiv(HValue* context, HValue* left, HValue* right)
@@ -5073,14 +5037,14 @@ class HMathMinMax FINAL : public HArithmeticBinaryOperation {
                            HValue* right,
                            Operation op);
 
-  virtual Representation observed_input_representation(int index) OVERRIDE {
+  Representation observed_input_representation(int index) OVERRIDE {
     return RequiredInputRepresentation(index);
   }
 
   virtual void InferRepresentation(
       HInferRepresentationPhase* h_infer) OVERRIDE;
 
-  virtual Representation RepresentationFromInputs() OVERRIDE {
+  Representation RepresentationFromInputs() OVERRIDE {
     Representation left_rep = left()->representation();
     Representation right_rep = right()->representation();
     Representation result = Representation::Smi();
@@ -5090,19 +5054,19 @@ class HMathMinMax FINAL : public HArithmeticBinaryOperation {
     return result;
   }
 
-  virtual bool IsCommutative() const OVERRIDE { return true; }
+  bool IsCommutative() const OVERRIDE { return true; }
 
   Operation operation() { return operation_; }
 
   DECLARE_CONCRETE_INSTRUCTION(MathMinMax)
 
  protected:
-  virtual bool DataEquals(HValue* other) OVERRIDE {
+  bool DataEquals(HValue* other) OVERRIDE {
     return other->IsMathMinMax() &&
         HMathMinMax::cast(other)->operation_ == operation_;
   }
 
-  virtual Range* InferRange(Zone* zone) OVERRIDE;
+  Range* InferRange(Zone* zone) OVERRIDE;
 
  private:
   HMathMinMax(HValue* context, HValue* left, HValue* right, Operation op)
@@ -5123,20 +5087,20 @@ class HBitwise FINAL : public HBitwiseBinaryOperation {
 
   Token::Value op() const { return op_; }
 
-  virtual bool IsCommutative() const OVERRIDE { return true; }
+  bool IsCommutative() const OVERRIDE { return true; }
 
-  virtual HValue* Canonicalize() OVERRIDE;
+  HValue* Canonicalize() OVERRIDE;
 
-  virtual std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
+  std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
 
   DECLARE_CONCRETE_INSTRUCTION(Bitwise)
 
  protected:
-  virtual bool DataEquals(HValue* other) OVERRIDE {
+  bool DataEquals(HValue* other) OVERRIDE {
     return op() == HBitwise::cast(other)->op();
   }
 
-  virtual Range* InferRange(Zone* zone) OVERRIDE;
+  Range* InferRange(Zone* zone) OVERRIDE;
 
  private:
   HBitwise(HValue* context,
@@ -5182,7 +5146,7 @@ class HShl FINAL : public HBitwiseBinaryOperation {
                            HValue* left,
                            HValue* right);
 
-  virtual Range* InferRange(Zone* zone) OVERRIDE;
+  Range* InferRange(Zone* zone) OVERRIDE;
 
   virtual void UpdateRepresentation(Representation new_rep,
                                     HInferRepresentationPhase* h_infer,
@@ -5198,7 +5162,7 @@ class HShl FINAL : public HBitwiseBinaryOperation {
   DECLARE_CONCRETE_INSTRUCTION(Shl)
 
  protected:
-  virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
+  bool DataEquals(HValue* other) OVERRIDE { return true; }
 
  private:
   HShl(HValue* context, HValue* left, HValue* right)
@@ -5213,7 +5177,7 @@ class HShr FINAL : public HBitwiseBinaryOperation {
                            HValue* left,
                            HValue* right);
 
-  virtual bool TryDecompose(DecompositionResult* decomposition) OVERRIDE {
+  bool TryDecompose(DecompositionResult* decomposition) OVERRIDE {
     if (right()->IsInteger32Constant()) {
       if (decomposition->Apply(left(), 0, right()->GetInteger32Constant())) {
         // This is intended to look for HAdd and HSub, to handle compounds
@@ -5225,7 +5189,7 @@ class HShr FINAL : public HBitwiseBinaryOperation {
     return false;
   }
 
-  virtual Range* InferRange(Zone* zone) OVERRIDE;
+  Range* InferRange(Zone* zone) OVERRIDE;
 
   virtual void UpdateRepresentation(Representation new_rep,
                                     HInferRepresentationPhase* h_infer,
@@ -5237,7 +5201,7 @@ class HShr FINAL : public HBitwiseBinaryOperation {
   DECLARE_CONCRETE_INSTRUCTION(Shr)
 
  protected:
-  virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
+  bool DataEquals(HValue* other) OVERRIDE { return true; }
 
  private:
   HShr(HValue* context, HValue* left, HValue* right)
@@ -5252,7 +5216,7 @@ class HSar FINAL : public HBitwiseBinaryOperation {
                            HValue* left,
                            HValue* right);
 
-  virtual bool TryDecompose(DecompositionResult* decomposition) OVERRIDE {
+  bool TryDecompose(DecompositionResult* decomposition) OVERRIDE {
     if (right()->IsInteger32Constant()) {
       if (decomposition->Apply(left(), 0, right()->GetInteger32Constant())) {
         // This is intended to look for HAdd and HSub, to handle compounds
@@ -5264,7 +5228,7 @@ class HSar FINAL : public HBitwiseBinaryOperation {
     return false;
   }
 
-  virtual Range* InferRange(Zone* zone) OVERRIDE;
+  Range* InferRange(Zone* zone) OVERRIDE;
 
   virtual void UpdateRepresentation(Representation new_rep,
                                     HInferRepresentationPhase* h_infer,
@@ -5276,7 +5240,7 @@ class HSar FINAL : public HBitwiseBinaryOperation {
   DECLARE_CONCRETE_INSTRUCTION(Sar)
 
  protected:
-  virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
+  bool DataEquals(HValue* other) OVERRIDE { return true; }
 
  private:
   HSar(HValue* context, HValue* left, HValue* right)
@@ -5303,7 +5267,7 @@ class HRor FINAL : public HBitwiseBinaryOperation {
   DECLARE_CONCRETE_INSTRUCTION(Ror)
 
  protected:
-  virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
+  bool DataEquals(HValue* other) OVERRIDE { return true; }
 
  private:
   HRor(HValue* context, HValue* left, HValue* right)
@@ -5319,7 +5283,7 @@ class HOsrEntry FINAL : public HTemplateInstruction<0> {
 
   BailoutId ast_id() const { return ast_id_; }
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::None();
   }
 
@@ -5350,13 +5314,13 @@ class HParameter FINAL : public HTemplateInstruction<0> {
   unsigned index() const { return index_; }
   ParameterKind kind() const { return kind_; }
 
-  virtual std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
+  std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::None();
   }
 
-  virtual Representation KnownOptimalRepresentation() OVERRIDE {
+  Representation KnownOptimalRepresentation() OVERRIDE {
     // If a parameter is an input to a phi, that phi should not
     // choose any more optimistic representation than Tagged.
     return Representation::Tagged();
@@ -5392,7 +5356,7 @@ class HCallStub FINAL : public HUnaryCall {
 
   HValue* context() { return value(); }
 
-  virtual std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
+  std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
 
   DECLARE_CONCRETE_INSTRUCTION(CallStub)
 
@@ -5406,34 +5370,90 @@ class HCallStub FINAL : public HUnaryCall {
 };
 
 
-class HTailCallThroughMegamorphicCache FINAL : public HTemplateInstruction<3> {
+class HTailCallThroughMegamorphicCache FINAL : public HInstruction {
  public:
-  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P3(HTailCallThroughMegamorphicCache,
-                                              HValue*, HValue*, Code::Flags);
+  enum Flags {
+    NONE = 0,
+    CALLED_FROM_KEYED_LOAD = 1 << 0,
+    PERFORM_MISS_ONLY = 1 << 1
+  };
+
+  static Flags ComputeFlags(bool called_from_keyed_load,
+                            bool perform_miss_only) {
+    Flags flags = NONE;
+    if (called_from_keyed_load) {
+      flags = static_cast<Flags>(flags | CALLED_FROM_KEYED_LOAD);
+    }
+    if (perform_miss_only) {
+      flags = static_cast<Flags>(flags | PERFORM_MISS_ONLY);
+    }
+    return flags;
+  }
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P5(
+      HTailCallThroughMegamorphicCache, HValue*, HValue*, HValue*, HValue*,
+      HTailCallThroughMegamorphicCache::Flags);
+
+  DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P2(HTailCallThroughMegamorphicCache,
+                                              HValue*, HValue*);
+
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
+  virtual int OperandCount() const FINAL OVERRIDE {
+    return FLAG_vector_ics ? 5 : 3;
+  }
+  virtual HValue* OperandAt(int i) const FINAL OVERRIDE { return inputs_[i]; }
+
   HValue* context() const { return OperandAt(0); }
   HValue* receiver() const { return OperandAt(1); }
   HValue* name() const { return OperandAt(2); }
-  Code::Flags flags() const { return flags_; }
+  HValue* slot() const {
+    DCHECK(FLAG_vector_ics);
+    return OperandAt(3);
+  }
+  HValue* vector() const {
+    DCHECK(FLAG_vector_ics);
+    return OperandAt(4);
+  }
+  Code::Flags flags() const;
 
-  virtual std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
+  bool is_keyed_load() const { return flags_ & CALLED_FROM_KEYED_LOAD; }
+  bool is_just_miss() const { return flags_ & PERFORM_MISS_ONLY; }
+
+  std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
 
   DECLARE_CONCRETE_INSTRUCTION(TailCallThroughMegamorphicCache)
 
+ protected:
+  virtual void InternalSetOperandAt(int i, HValue* value) FINAL OVERRIDE {
+    inputs_[i] = value;
+  }
+
  private:
   HTailCallThroughMegamorphicCache(HValue* context, HValue* receiver,
-                                   HValue* name, Code::Flags flags)
+                                   HValue* name, HValue* slot, HValue* vector,
+                                   Flags flags)
       : flags_(flags) {
+    DCHECK(FLAG_vector_ics);
+    SetOperandAt(0, context);
+    SetOperandAt(1, receiver);
+    SetOperandAt(2, name);
+    SetOperandAt(3, slot);
+    SetOperandAt(4, vector);
+  }
+
+  HTailCallThroughMegamorphicCache(HValue* context, HValue* receiver,
+                                   HValue* name)
+      : flags_(NONE) {
     SetOperandAt(0, context);
     SetOperandAt(1, receiver);
     SetOperandAt(2, name);
   }
 
-  Code::Flags flags_;
+  EmbeddedContainer<HValue*, 5> inputs_;
+  Flags flags_;
 };
 
 
@@ -5441,9 +5461,9 @@ class HUnknownOSRValue FINAL : public HTemplateInstruction<0> {
  public:
   DECLARE_INSTRUCTION_FACTORY_P2(HUnknownOSRValue, HEnvironment*, int);
 
-  virtual std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
+  std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::None();
   }
 
@@ -5452,7 +5472,7 @@ class HUnknownOSRValue FINAL : public HTemplateInstruction<0> {
   HEnvironment *environment() { return environment_; }
   int index() { return index_; }
 
-  virtual Representation KnownOptimalRepresentation() OVERRIDE {
+  Representation KnownOptimalRepresentation() OVERRIDE {
     if (incoming_value_ == NULL) return Representation::None();
     return incoming_value_->KnownOptimalRepresentation();
   }
@@ -5481,24 +5501,20 @@ class HLoadGlobalCell FINAL : public HTemplateInstruction<0> {
   Unique<Cell> cell() const { return cell_; }
   bool RequiresHoleCheck() const;
 
-  virtual std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
+  std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
 
-  virtual intptr_t Hashcode() OVERRIDE {
-    return cell_.Hashcode();
-  }
+  intptr_t Hashcode() OVERRIDE { return cell_.Hashcode(); }
 
-  virtual void FinalizeUniqueness() OVERRIDE {
-    cell_ = Unique<Cell>(cell_.handle());
-  }
+  void FinalizeUniqueness() OVERRIDE { cell_ = Unique<Cell>(cell_.handle()); }
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::None();
   }
 
   DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell)
 
  protected:
-  virtual bool DataEquals(HValue* other) OVERRIDE {
+  bool DataEquals(HValue* other) OVERRIDE {
     return cell_ == HLoadGlobalCell::cast(other)->cell_;
   }
 
@@ -5510,7 +5526,7 @@ class HLoadGlobalCell FINAL : public HTemplateInstruction<0> {
     SetDependsOnFlag(kGlobalVars);
   }
 
-  virtual bool IsDeletable() const OVERRIDE { return !RequiresHoleCheck(); }
+  bool IsDeletable() const OVERRIDE { return !RequiresHoleCheck(); }
 
   Unique<Cell> cell_;
   PropertyDetails details_;
@@ -5526,13 +5542,11 @@ class HLoadGlobalGeneric FINAL : public HTemplateInstruction<2> {
   HValue* global_object() { return OperandAt(1); }
   Handle<String> name() const { return name_; }
   bool for_typeof() const { return for_typeof_; }
-  FeedbackVectorICSlot slot() const {
-    DCHECK(FLAG_vector_ics && !slot_.IsInvalid());
-    return slot_;
-  }
+  FeedbackVectorICSlot slot() const { return slot_; }
   Handle<TypeFeedbackVector> feedback_vector() const {
     return feedback_vector_;
   }
+  bool HasVectorAndSlot() const { return FLAG_vector_ics; }
   void SetVectorAndSlot(Handle<TypeFeedbackVector> vector,
                         FeedbackVectorICSlot slot) {
     DCHECK(FLAG_vector_ics);
@@ -5540,9 +5554,9 @@ class HLoadGlobalGeneric FINAL : public HTemplateInstruction<2> {
     slot_ = slot;
   }
 
-  virtual std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
+  std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
@@ -5600,7 +5614,7 @@ class HAllocate FINAL : public HTemplateInstruction<2> {
     size_upper_bound_ = value;
   }
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     if (index == 0) {
       return Representation::Tagged();
     } else {
@@ -5608,7 +5622,7 @@ class HAllocate FINAL : public HTemplateInstruction<2> {
     }
   }
 
-  virtual Handle<Map> GetMonomorphicJSObjectMap() OVERRIDE {
+  Handle<Map> GetMonomorphicJSObjectMap() OVERRIDE {
     return known_initial_map_;
   }
 
@@ -5651,7 +5665,7 @@ class HAllocate FINAL : public HTemplateInstruction<2> {
   virtual bool HandleSideEffectDominator(GVNFlag side_effect,
                                          HValue* dominator) OVERRIDE;
 
-  virtual std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
+  std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
 
   DECLARE_CONCRETE_INSTRUCTION(Allocate)
 
@@ -5764,7 +5778,7 @@ class HStoreCodeEntry FINAL: public HTemplateInstruction<2> {
     return new(zone) HStoreCodeEntry(function, code);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
@@ -5794,11 +5808,11 @@ class HInnerAllocatedObject FINAL : public HTemplateInstruction<2> {
   HValue* base_object() const { return OperandAt(0); }
   HValue* offset() const { return OperandAt(1); }
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return index == 0 ? Representation::Tagged() : Representation::Integer32();
   }
 
-  virtual std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
+  std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
 
   DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject)
 
@@ -5889,14 +5903,14 @@ class HStoreGlobalCell FINAL : public HUnaryOperation {
     return StoringValueNeedsWriteBarrier(value());
   }
 
-  virtual void FinalizeUniqueness() OVERRIDE {
+  void FinalizeUniqueness() OVERRIDE {
     cell_ = Unique<PropertyCell>(cell_.handle());
   }
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
-  virtual std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
+  std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
 
   DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell)
 
@@ -5948,22 +5962,22 @@ class HLoadContextSlot FINAL : public HUnaryOperation {
     return mode_ != kNoCheck;
   }
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
-  virtual std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
+  std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
 
   DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot)
 
  protected:
-  virtual bool DataEquals(HValue* other) OVERRIDE {
+  bool DataEquals(HValue* other) OVERRIDE {
     HLoadContextSlot* b = HLoadContextSlot::cast(other);
     return (slot_index() == b->slot_index());
   }
 
  private:
-  virtual bool IsDeletable() const OVERRIDE { return !RequiresHoleCheck(); }
+  bool IsDeletable() const OVERRIDE { return !RequiresHoleCheck(); }
 
   int slot_index_;
   Mode mode_;
@@ -6005,11 +6019,11 @@ class HStoreContextSlot FINAL : public HTemplateInstruction<2> {
     return mode_ != kNoCheck;
   }
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
-  virtual std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
+  std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
 
   DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot)
 
@@ -6179,6 +6193,10 @@ class HObjectAccess FINAL {
     return HObjectAccess(kMaps, JSObject::kMapOffset);
   }
 
+  static HObjectAccess ForPrototype() {
+    return HObjectAccess(kMaps, Map::kPrototypeOffset);
+  }
+
   static HObjectAccess ForMapAsInteger32() {
     return HObjectAccess(kMaps, JSObject::kMapOffset,
                          Representation::Integer32());
@@ -6238,6 +6256,10 @@ class HObjectAccess FINAL {
     return HObjectAccess(kInobject, Cell::kValueOffset);
   }
 
+  static HObjectAccess ForWeakCellValue() {
+    return HObjectAccess(kInobject, WeakCell::kValueOffset);
+  }
+
   static HObjectAccess ForAllocationMementoSite() {
     return HObjectAccess(kInobject, AllocationMemento::kAllocationSiteOffset);
   }
@@ -6276,6 +6298,8 @@ class HObjectAccess FINAL {
 
   static HObjectAccess ForContextSlot(int index);
 
+  static HObjectAccess ForScriptContext(int index);
+
   // Create an access to the backing store of an object.
   static HObjectAccess ForBackingStoreOffset(int offset,
       Representation representation = Representation::Tagged());
@@ -6337,6 +6361,51 @@ class HObjectAccess FINAL {
     return HObjectAccess(kInobject, GlobalObject::kNativeContextOffset);
   }
 
+  static HObjectAccess ForJSCollectionTable() {
+    return HObjectAccess::ForObservableJSObjectOffset(
+        JSCollection::kTableOffset);
+  }
+
+  template <typename CollectionType>
+  static HObjectAccess ForOrderedHashTableNumberOfBuckets() {
+    return HObjectAccess(kInobject, CollectionType::kNumberOfBucketsOffset,
+                         Representation::Smi());
+  }
+
+  template <typename CollectionType>
+  static HObjectAccess ForOrderedHashTableNumberOfElements() {
+    return HObjectAccess(kInobject, CollectionType::kNumberOfElementsOffset,
+                         Representation::Smi());
+  }
+
+  template <typename CollectionType>
+  static HObjectAccess ForOrderedHashTableNumberOfDeletedElements() {
+    return HObjectAccess(kInobject,
+                         CollectionType::kNumberOfDeletedElementsOffset,
+                         Representation::Smi());
+  }
+
+  template <typename CollectionType>
+  static HObjectAccess ForOrderedHashTableNextTable() {
+    return HObjectAccess(kInobject, CollectionType::kNextTableOffset);
+  }
+
+  template <typename CollectionType>
+  static HObjectAccess ForOrderedHashTableBucket(int bucket) {
+    return HObjectAccess(kInobject, CollectionType::kHashTableStartOffset +
+                                        (bucket * kPointerSize),
+                         Representation::Smi());
+  }
+
+  // Access into the data table of an OrderedHashTable with a
+  // known-at-compile-time bucket count.
+  template <typename CollectionType, int kBucketCount>
+  static HObjectAccess ForOrderedHashTableDataTableIndex(int index) {
+    return HObjectAccess(kInobject, CollectionType::kHashTableStartOffset +
+                                        (kBucketCount * kPointerSize) +
+                                        (index * kPointerSize));
+  }
+
   inline bool Equals(HObjectAccess that) const {
     return value_ == that.value_;  // portion and offset must match
   }
@@ -6424,11 +6493,11 @@ class HLoadNamedField FINAL : public HTemplateInstruction<2> {
 
   const UniqueSet<Map>* maps() const { return maps_; }
 
-  virtual bool HasEscapingOperandAt(int index) OVERRIDE { return false; }
-  virtual bool HasOutOfBoundsAccess(int size) OVERRIDE {
+  bool HasEscapingOperandAt(int index) OVERRIDE { return false; }
+  bool HasOutOfBoundsAccess(int size) OVERRIDE {
     return !access().IsInobject() || access().offset() >= size;
   }
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     if (index == 0) {
       // object must be external in case of external memory access
       return access().IsExternalMemory() ? Representation::External()
@@ -6437,8 +6506,8 @@ class HLoadNamedField FINAL : public HTemplateInstruction<2> {
     DCHECK(index == 1);
     return Representation::None();
   }
-  virtual Range* InferRange(Zone* zone) OVERRIDE;
-  virtual std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
+  Range* InferRange(Zone* zone) OVERRIDE;
+  std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
 
   bool CanBeReplacedWith(HValue* other) const {
     if (!CheckFlag(HValue::kCantBeReplaced)) return false;
@@ -6454,7 +6523,7 @@ class HLoadNamedField FINAL : public HTemplateInstruction<2> {
   DECLARE_CONCRETE_INSTRUCTION(LoadNamedField)
 
  protected:
-  virtual bool DataEquals(HValue* other) OVERRIDE {
+  bool DataEquals(HValue* other) OVERRIDE {
     HLoadNamedField* that = HLoadNamedField::cast(other);
     if (!this->access_.Equals(that->access_)) return false;
     if (this->maps_ == that->maps_) return true;
@@ -6518,7 +6587,7 @@ class HLoadNamedField FINAL : public HTemplateInstruction<2> {
     access.SetGVNFlags(this, LOAD);
   }
 
-  virtual bool IsDeletable() const OVERRIDE { return true; }
+  bool IsDeletable() const OVERRIDE { return true; }
 
   HObjectAccess access_;
   const UniqueSet<Map>* maps_;
@@ -6534,13 +6603,11 @@ class HLoadNamedGeneric FINAL : public HTemplateInstruction<2> {
   HValue* object() const { return OperandAt(1); }
   Handle<Object> name() const { return name_; }
 
-  FeedbackVectorICSlot slot() const {
-    DCHECK(FLAG_vector_ics && !slot_.IsInvalid());
-    return slot_;
-  }
+  FeedbackVectorICSlot slot() const { return slot_; }
   Handle<TypeFeedbackVector> feedback_vector() const {
     return feedback_vector_;
   }
+  bool HasVectorAndSlot() const { return FLAG_vector_ics; }
   void SetVectorAndSlot(Handle<TypeFeedbackVector> vector,
                         FeedbackVectorICSlot slot) {
     DCHECK(FLAG_vector_ics);
@@ -6548,11 +6615,11 @@ class HLoadNamedGeneric FINAL : public HTemplateInstruction<2> {
     slot_ = slot;
   }
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
-  virtual std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
+  std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
 
   DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric)
 
@@ -6577,14 +6644,14 @@ class HLoadFunctionPrototype FINAL : public HUnaryOperation {
 
   HValue* function() { return OperandAt(0); }
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
   DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype)
 
  protected:
-  virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
+  bool DataEquals(HValue* other) OVERRIDE { return true; }
 
  private:
   explicit HLoadFunctionPrototype(HValue* function)
@@ -6657,14 +6724,14 @@ class HLoadKeyed FINAL
   void SetDehoisted(bool is_dehoisted) OVERRIDE {
     bit_field_ = IsDehoistedField::update(bit_field_, is_dehoisted);
   }
-  virtual ElementsKind elements_kind() const OVERRIDE {
+  ElementsKind elements_kind() const OVERRIDE {
     return ElementsKindField::decode(bit_field_);
   }
   LoadKeyedHoleMode hole_mode() const {
     return HoleModeField::decode(bit_field_);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     // kind_fast:                 tagged[int32] (none)
     // kind_double:               tagged[int32] (none)
     // kind_fixed_typed_array:    tagged[int32] (none)
@@ -6680,27 +6747,26 @@ class HLoadKeyed FINAL
     return Representation::None();
   }
 
-  virtual Representation observed_input_representation(int index) OVERRIDE {
+  Representation observed_input_representation(int index) OVERRIDE {
     return RequiredInputRepresentation(index);
   }
 
-  virtual std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
+  std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
 
   bool UsesMustHandleHole() const;
   bool AllUsesCanTreatHoleAsNaN() const;
   bool RequiresHoleCheck() const;
 
-  virtual Range* InferRange(Zone* zone) OVERRIDE;
+  Range* InferRange(Zone* zone) OVERRIDE;
 
   DECLARE_CONCRETE_INSTRUCTION(LoadKeyed)
 
  protected:
-  virtual bool DataEquals(HValue* other) OVERRIDE {
+  bool DataEquals(HValue* other) OVERRIDE {
     if (!other->IsLoadKeyed()) return false;
     HLoadKeyed* other_load = HLoadKeyed::cast(other);
 
-    if (IsDehoisted() && base_offset() != other_load->base_offset())
-      return false;
+    if (base_offset() != other_load->base_offset()) return false;
     return elements_kind() == other_load->elements_kind();
   }
 
@@ -6772,9 +6838,7 @@ class HLoadKeyed FINAL
     SetFlag(kUseGVN);
   }
 
-  virtual bool IsDeletable() const OVERRIDE {
-    return !RequiresHoleCheck();
-  }
+  bool IsDeletable() const OVERRIDE { return !RequiresHoleCheck(); }
 
   // Establish some checks around our packed fields
   enum LoadKeyedBits {
@@ -6815,13 +6879,11 @@ class HLoadKeyedGeneric FINAL : public HTemplateInstruction<3> {
   HValue* object() const { return OperandAt(0); }
   HValue* key() const { return OperandAt(1); }
   HValue* context() const { return OperandAt(2); }
-  FeedbackVectorICSlot slot() const {
-    DCHECK(FLAG_vector_ics && !slot_.IsInvalid());
-    return slot_;
-  }
+  FeedbackVectorICSlot slot() const { return slot_; }
   Handle<TypeFeedbackVector> feedback_vector() const {
     return feedback_vector_;
   }
+  bool HasVectorAndSlot() const { return FLAG_vector_ics; }
   void SetVectorAndSlot(Handle<TypeFeedbackVector> vector,
                         FeedbackVectorICSlot slot) {
     DCHECK(FLAG_vector_ics);
@@ -6829,14 +6891,14 @@ class HLoadKeyedGeneric FINAL : public HTemplateInstruction<3> {
     slot_ = slot;
   }
 
-  virtual std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
+  std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     // tagged[tagged]
     return Representation::Tagged();
   }
 
-  virtual HValue* Canonicalize() OVERRIDE;
+  HValue* Canonicalize() OVERRIDE;
 
   DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric)
 
@@ -6875,13 +6937,11 @@ class HStoreNamedField FINAL : public HTemplateInstruction<3> {
 
   DECLARE_CONCRETE_INSTRUCTION(StoreNamedField)
 
-  virtual bool HasEscapingOperandAt(int index) OVERRIDE {
-    return index == 1;
-  }
-  virtual bool HasOutOfBoundsAccess(int size) OVERRIDE {
+  bool HasEscapingOperandAt(int index) OVERRIDE { return index == 1; }
+  bool HasOutOfBoundsAccess(int size) OVERRIDE {
     return !access().IsInobject() || access().offset() >= size;
   }
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     if (index == 0 && access().IsExternalMemory()) {
       // object must be external in case of external memory access
       return Representation::External();
@@ -6913,7 +6973,7 @@ class HStoreNamedField FINAL : public HTemplateInstruction<3> {
     dominator_ = dominator;
     return false;
   }
-  virtual std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
+  std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
 
   HValue* object() const { return OperandAt(0); }
   HValue* value() const { return OperandAt(1); }
@@ -6943,7 +7003,9 @@ class HStoreNamedField FINAL : public HTemplateInstruction<3> {
   }
 
   bool NeedsWriteBarrier() const {
-    DCHECK(!field_representation().IsDouble() || !has_transition());
+    DCHECK(!field_representation().IsDouble() ||
+           (FLAG_unbox_double_fields && access_.IsInobject()) ||
+           !has_transition());
     if (field_representation().IsDouble()) return false;
     if (field_representation().IsSmi()) return false;
     if (field_representation().IsInteger32()) return false;
@@ -7025,9 +7087,9 @@ class HStoreNamedGeneric FINAL : public HTemplateInstruction<3> {
   Handle<String> name() const { return name_; }
   StrictMode strict_mode() const { return strict_mode_; }
 
-  virtual std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
+  std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
@@ -7062,7 +7124,7 @@ class HStoreKeyed FINAL
   DECLARE_INSTRUCTION_FACTORY_P6(HStoreKeyed, HValue*, HValue*, HValue*,
                                  ElementsKind, StoreFieldOrKeyedMode, int);
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     // kind_fast:               tagged[int32] = tagged
     // kind_double:             tagged[int32] = double
     // kind_smi   :             tagged[int32] = smi
@@ -7113,7 +7175,7 @@ class HStoreKeyed FINAL
     return is_external() || is_fixed_typed_array();
   }
 
-  virtual Representation observed_input_representation(int index) OVERRIDE {
+  Representation observed_input_representation(int index) OVERRIDE {
     if (index < 2) return RequiredInputRepresentation(index);
     if (IsUninitialized()) {
       return Representation::None();
@@ -7178,7 +7240,7 @@ class HStoreKeyed FINAL
 
   bool NeedsCanonicalization();
 
-  virtual std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
+  std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
 
   DECLARE_CONCRETE_INSTRUCTION(StoreKeyed)
 
@@ -7247,12 +7309,12 @@ class HStoreKeyedGeneric FINAL : public HTemplateInstruction<4> {
   HValue* context() const { return OperandAt(3); }
   StrictMode strict_mode() const { return strict_mode_; }
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     // tagged[tagged] = tagged
     return Representation::Tagged();
   }
 
-  virtual std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
+  std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
 
   DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric)
 
@@ -7285,7 +7347,7 @@ class HTransitionElementsKind FINAL : public HTemplateInstruction<2> {
                                              original_map, transitioned_map);
   }
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
@@ -7296,18 +7358,18 @@ class HTransitionElementsKind FINAL : public HTemplateInstruction<2> {
   ElementsKind from_kind() const { return from_kind_; }
   ElementsKind to_kind() const { return to_kind_; }
 
-  virtual std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
+  std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
 
   DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind)
 
  protected:
-  virtual bool DataEquals(HValue* other) OVERRIDE {
+  bool DataEquals(HValue* other) OVERRIDE {
     HTransitionElementsKind* instr = HTransitionElementsKind::cast(other);
     return original_map_ == instr->original_map_ &&
            transitioned_map_ == instr->transitioned_map_;
   }
 
-  virtual int RedefinedOperandIndex() OVERRIDE { return 0; }
+  int RedefinedOperandIndex() OVERRIDE { return 0; }
 
  private:
   HTransitionElementsKind(HValue* context,
@@ -7350,16 +7412,16 @@ class HStringAdd FINAL : public HBinaryOperation {
   StringAddFlags flags() const { return flags_; }
   PretenureFlag pretenure_flag() const { return pretenure_flag_; }
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
-  virtual std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
+  std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
 
   DECLARE_CONCRETE_INSTRUCTION(StringAdd)
 
  protected:
-  virtual bool DataEquals(HValue* other) OVERRIDE {
+  bool DataEquals(HValue* other) OVERRIDE {
     return flags_ == HStringAdd::cast(other)->flags_ &&
         pretenure_flag_ == HStringAdd::cast(other)->pretenure_flag_;
   }
@@ -7387,7 +7449,7 @@ class HStringAdd FINAL : public HBinaryOperation {
   }
 
   // No side-effects except possible allocation:
-  virtual bool IsDeletable() const OVERRIDE { return true; }
+  bool IsDeletable() const OVERRIDE { return true; }
 
   const StringAddFlags flags_;
   const PretenureFlag pretenure_flag_;
@@ -7400,7 +7462,7 @@ class HStringCharCodeAt FINAL : public HTemplateInstruction<3> {
                                               HValue*,
                                               HValue*);
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     // The index is supposed to be Integer32.
     return index == 2
         ? Representation::Integer32()
@@ -7414,9 +7476,9 @@ class HStringCharCodeAt FINAL : public HTemplateInstruction<3> {
   DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt)
 
  protected:
-  virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
+  bool DataEquals(HValue* other) OVERRIDE { return true; }
 
-  virtual Range* InferRange(Zone* zone) OVERRIDE {
+  Range* InferRange(Zone* zone) OVERRIDE {
     return new(zone) Range(0, String::kMaxUtf16CodeUnit);
   }
 
@@ -7433,7 +7495,7 @@ class HStringCharCodeAt FINAL : public HTemplateInstruction<3> {
   }
 
   // No side effects: runtime function assumes string + number inputs.
-  virtual bool IsDeletable() const OVERRIDE { return true; }
+  bool IsDeletable() const OVERRIDE { return true; }
 };
 
 
@@ -7443,7 +7505,7 @@ class HStringCharFromCode FINAL : public HTemplateInstruction<2> {
                            HValue* context,
                            HValue* char_code);
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return index == 0
         ? Representation::Tagged()
         : Representation::Integer32();
@@ -7452,7 +7514,7 @@ class HStringCharFromCode FINAL : public HTemplateInstruction<2> {
   HValue* context() const { return OperandAt(0); }
   HValue* value() const { return OperandAt(1); }
 
-  virtual bool DataEquals(HValue* other) OVERRIDE { return true; }
+  bool DataEquals(HValue* other) OVERRIDE { return true; }
 
   DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode)
 
@@ -7466,7 +7528,7 @@ class HStringCharFromCode FINAL : public HTemplateInstruction<2> {
     SetChangesFlag(kNewSpacePromotion);
   }
 
-  virtual bool IsDeletable() const OVERRIDE {
+  bool IsDeletable() const OVERRIDE {
     return !value()->ToNumberCanBeObserved();
   }
 };
@@ -7493,7 +7555,7 @@ class HMaterializedLiteral : public HTemplateInstruction<V> {
   }
 
  private:
-  virtual bool IsDeletable() const FINAL OVERRIDE { return true; }
+  bool IsDeletable() const FINAL { return true; }
 
   int literal_index_;
   int depth_;
@@ -7514,7 +7576,7 @@ class HRegExpLiteral FINAL : public HMaterializedLiteral<1> {
   Handle<String> pattern() { return pattern_; }
   Handle<String> flags() { return flags_; }
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
@@ -7548,7 +7610,7 @@ class HFunctionLiteral FINAL : public HTemplateInstruction<1> {
                                               bool);
   HValue* context() { return OperandAt(0); }
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
@@ -7562,6 +7624,7 @@ class HFunctionLiteral FINAL : public HTemplateInstruction<1> {
   bool is_arrow() const { return IsArrowFunction(kind()); }
   bool is_generator() const { return IsGeneratorFunction(kind()); }
   bool is_concise_method() const { return IsConciseMethod(kind()); }
+  bool is_default_constructor() const { return IsDefaultConstructor(kind()); }
   FunctionKind kind() const { return FunctionKindField::decode(bit_field_); }
   StrictMode strict_mode() const { return StrictModeField::decode(bit_field_); }
 
@@ -7579,12 +7642,12 @@ class HFunctionLiteral FINAL : public HTemplateInstruction<1> {
     SetChangesFlag(kNewSpacePromotion);
   }
 
-  virtual bool IsDeletable() const OVERRIDE { return true; }
+  bool IsDeletable() const OVERRIDE { return true; }
 
-  class FunctionKindField : public BitField<FunctionKind, 0, 3> {};
-  class PretenureField : public BitField<bool, 3, 1> {};
-  class HasNoLiteralsField : public BitField<bool, 4, 1> {};
-  class StrictModeField : public BitField<StrictMode, 5, 1> {};
+  class FunctionKindField : public BitField<FunctionKind, 0, 4> {};
+  class PretenureField : public BitField<bool, 5, 1> {};
+  class HasNoLiteralsField : public BitField<bool, 6, 1> {};
+  class StrictModeField : public BitField<StrictMode, 7, 1> {};
 
   Handle<SharedFunctionInfo> shared_info_;
   uint32_t bit_field_;
@@ -7598,9 +7661,9 @@ class HTypeof FINAL : public HTemplateInstruction<2> {
   HValue* context() const { return OperandAt(0); }
   HValue* value() const { return OperandAt(1); }
 
-  virtual std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
+  std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
@@ -7613,7 +7676,7 @@ class HTypeof FINAL : public HTemplateInstruction<2> {
     set_representation(Representation::Tagged());
   }
 
-  virtual bool IsDeletable() const OVERRIDE { return true; }
+  bool IsDeletable() const OVERRIDE { return true; }
 };
 
 
@@ -7621,7 +7684,7 @@ class HTrapAllocationMemento FINAL : public HTemplateInstruction<1> {
  public:
   DECLARE_INSTRUCTION_FACTORY_P1(HTrapAllocationMemento, HValue*);
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
@@ -7640,7 +7703,7 @@ class HToFastProperties FINAL : public HUnaryOperation {
  public:
   DECLARE_INSTRUCTION_FACTORY_P1(HToFastProperties, HValue*);
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
@@ -7661,7 +7724,7 @@ class HToFastProperties FINAL : public HUnaryOperation {
 #endif
   }
 
-  virtual bool IsDeletable() const OVERRIDE { return true; }
+  bool IsDeletable() const OVERRIDE { return true; }
 };
 
 
@@ -7671,7 +7734,7 @@ class HDateField FINAL : public HUnaryOperation {
 
   Smi* index() const { return index_; }
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
@@ -7695,7 +7758,7 @@ class HSeqStringGetChar FINAL : public HTemplateInstruction<2> {
                            HValue* string,
                            HValue* index);
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return (index == 0) ? Representation::Tagged()
                         : Representation::Integer32();
   }
@@ -7707,11 +7770,11 @@ class HSeqStringGetChar FINAL : public HTemplateInstruction<2> {
   DECLARE_CONCRETE_INSTRUCTION(SeqStringGetChar)
 
  protected:
-  virtual bool DataEquals(HValue* other) OVERRIDE {
+  bool DataEquals(HValue* other) OVERRIDE {
     return encoding() == HSeqStringGetChar::cast(other)->encoding();
   }
 
-  virtual Range* InferRange(Zone* zone) OVERRIDE {
+  Range* InferRange(Zone* zone) OVERRIDE {
     if (encoding() == String::ONE_BYTE_ENCODING) {
       return new(zone) Range(0, String::kMaxOneByteCharCode);
     } else {
@@ -7731,7 +7794,7 @@ class HSeqStringGetChar FINAL : public HTemplateInstruction<2> {
     SetDependsOnFlag(kStringChars);
   }
 
-  virtual bool IsDeletable() const OVERRIDE { return true; }
+  bool IsDeletable() const OVERRIDE { return true; }
 
   String::Encoding encoding_;
 };
@@ -7749,7 +7812,7 @@ class HSeqStringSetChar FINAL : public HTemplateInstruction<4> {
   HValue* index() { return OperandAt(2); }
   HValue* value() { return OperandAt(3); }
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return (index <= 1) ? Representation::Tagged()
                         : Representation::Integer32();
   }
@@ -7778,13 +7841,13 @@ class HCheckMapValue FINAL : public HTemplateInstruction<2> {
  public:
   DECLARE_INSTRUCTION_FACTORY_P2(HCheckMapValue, HValue*, HValue*);
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
-  virtual std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
+  std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
 
-  virtual HType CalculateInferredType() OVERRIDE {
+  HType CalculateInferredType() OVERRIDE {
     if (value()->type().IsHeapObject()) return value()->type();
     return HType::HeapObject();
   }
@@ -7792,16 +7855,14 @@ class HCheckMapValue FINAL : public HTemplateInstruction<2> {
   HValue* value() const { return OperandAt(0); }
   HValue* map() const { return OperandAt(1); }
 
-  virtual HValue* Canonicalize() OVERRIDE;
+  HValue* Canonicalize() OVERRIDE;
 
   DECLARE_CONCRETE_INSTRUCTION(CheckMapValue)
 
  protected:
-  virtual int RedefinedOperandIndex() OVERRIDE { return 0; }
+  int RedefinedOperandIndex() OVERRIDE { return 0; }
 
-  virtual bool DataEquals(HValue* other) OVERRIDE {
-    return true;
-  }
+  bool DataEquals(HValue* other) OVERRIDE { return true; }
 
  private:
   HCheckMapValue(HValue* value, HValue* map)
@@ -7820,18 +7881,16 @@ class HForInPrepareMap FINAL : public HTemplateInstruction<2> {
  public:
   DECLARE_INSTRUCTION_WITH_CONTEXT_FACTORY_P1(HForInPrepareMap, HValue*);
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
   HValue* context() const { return OperandAt(0); }
   HValue* enumerable() const { return OperandAt(1); }
 
-  virtual std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
+  std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
 
-  virtual HType CalculateInferredType() OVERRIDE {
-    return HType::Tagged();
-  }
+  HType CalculateInferredType() OVERRIDE { return HType::Tagged(); }
 
   DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap);
 
@@ -7850,7 +7909,7 @@ class HForInCacheArray FINAL : public HTemplateInstruction<2> {
  public:
   DECLARE_INSTRUCTION_FACTORY_P3(HForInCacheArray, HValue*, HValue*, int);
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
@@ -7866,11 +7925,9 @@ class HForInCacheArray FINAL : public HTemplateInstruction<2> {
     index_cache_ = index_cache;
   }
 
-  virtual std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
+  std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
 
-  virtual HType CalculateInferredType() OVERRIDE {
-    return HType::Tagged();
-  }
+  HType CalculateInferredType() OVERRIDE { return HType::Tagged(); }
 
   DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray);
 
@@ -7900,7 +7957,7 @@ class HLoadFieldByIndex FINAL : public HTemplateInstruction<2> {
     set_representation(Representation::Tagged());
   }
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     if (index == 1) {
       return Representation::Smi();
     } else {
@@ -7911,16 +7968,14 @@ class HLoadFieldByIndex FINAL : public HTemplateInstruction<2> {
   HValue* object() const { return OperandAt(0); }
   HValue* index() const { return OperandAt(1); }
 
-  virtual std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
+  std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
 
-  virtual HType CalculateInferredType() OVERRIDE {
-    return HType::Tagged();
-  }
+  HType CalculateInferredType() OVERRIDE { return HType::Tagged(); }
 
   DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex);
 
  private:
-  virtual bool IsDeletable() const OVERRIDE { return true; }
+  bool IsDeletable() const OVERRIDE { return true; }
 };
 
 
@@ -7930,7 +7985,7 @@ class HStoreFrameContext: public HUnaryOperation {
 
   HValue* context() { return OperandAt(0); }
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
@@ -7952,11 +8007,11 @@ class HAllocateBlockContext: public HTemplateInstruction<2> {
   HValue* function() const { return OperandAt(1); }
   Handle<ScopeInfo> scope_info() const { return scope_info_; }
 
-  virtual Representation RequiredInputRepresentation(int index) OVERRIDE {
+  Representation RequiredInputRepresentation(int index) OVERRIDE {
     return Representation::Tagged();
   }
 
-  virtual std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
+  std::ostream& PrintDataTo(std::ostream& os) const OVERRIDE;  // NOLINT
 
   DECLARE_CONCRETE_INSTRUCTION(AllocateBlockContext)
 
index 6184bb9..a684311 100644 (file)
@@ -1217,8 +1217,8 @@ void HGraphBuilder::FinishExitCurrentBlock(HControlInstruction* instruction) {
 void HGraphBuilder::AddIncrementCounter(StatsCounter* counter) {
   if (FLAG_native_code_counters && counter->Enabled()) {
     HValue* reference = Add<HConstant>(ExternalReference(counter));
-    HValue* old_value = Add<HLoadNamedField>(
-        reference, static_cast<HValue*>(NULL), HObjectAccess::ForCounter());
+    HValue* old_value =
+        Add<HLoadNamedField>(reference, nullptr, HObjectAccess::ForCounter());
     HValue* new_value = AddUncasted<HAdd>(old_value, graph()->GetConstant1());
     new_value->ClearFlag(HValue::kCanOverflow);  // Ignore counter overflow
     Add<HStoreNamedField>(reference, HObjectAccess::ForCounter(),
@@ -1252,11 +1252,10 @@ HBasicBlock* HGraphBuilder::CreateLoopHeaderBlock() {
 
 
 HValue* HGraphBuilder::BuildGetElementsKind(HValue* object) {
-  HValue* map = Add<HLoadNamedField>(object, static_cast<HValue*>(NULL),
-                                     HObjectAccess::ForMap());
+  HValue* map = Add<HLoadNamedField>(object, nullptr, HObjectAccess::ForMap());
 
-  HValue* bit_field2 = Add<HLoadNamedField>(map, static_cast<HValue*>(NULL),
-                                            HObjectAccess::ForMapBitField2());
+  HValue* bit_field2 =
+      Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapBitField2());
   return BuildDecodeField<Map::ElementsKindBits>(bit_field2);
 }
 
@@ -1415,10 +1414,11 @@ void HGraphBuilder::BuildTransitionElementsKind(HValue* object,
 
     HInstruction* elements_length = AddLoadFixedArrayLength(elements);
 
-    HInstruction* array_length = is_jsarray
-        ? Add<HLoadNamedField>(object, static_cast<HValue*>(NULL),
-                               HObjectAccess::ForArrayLength(from_kind))
-        : elements_length;
+    HInstruction* array_length =
+        is_jsarray
+            ? Add<HLoadNamedField>(object, nullptr,
+                                   HObjectAccess::ForArrayLength(from_kind))
+            : elements_length;
 
     BuildGrowElementsCapacity(object, elements, from_kind, to_kind,
                               array_length, elements_length);
@@ -1436,14 +1436,14 @@ void HGraphBuilder::BuildJSObjectCheck(HValue* receiver,
   Add<HCheckHeapObject>(receiver);
 
   // Get the map of the receiver.
-  HValue* map = Add<HLoadNamedField>(receiver, static_cast<HValue*>(NULL),
-                                     HObjectAccess::ForMap());
+  HValue* map =
+      Add<HLoadNamedField>(receiver, nullptr, HObjectAccess::ForMap());
 
   // Check the instance type and if an access check is needed, this can be
   // done with a single load, since both bytes are adjacent in the map.
   HObjectAccess access(HObjectAccess::ForMapInstanceTypeAndBitField());
   HValue* instance_type_and_bit_field =
-      Add<HLoadNamedField>(map, static_cast<HValue*>(NULL), access);
+      Add<HLoadNamedField>(map, nullptr, access);
 
   HValue* mask = Add<HConstant>(0x00FF | (bit_field_mask << 8));
   HValue* and_result = AddUncasted<HBitwise>(Token::BIT_AND,
@@ -1472,11 +1472,9 @@ void HGraphBuilder::BuildKeyedIndexCheck(HValue* key,
   }
   key_smi_if.Else();
   {
-    HValue* map = Add<HLoadNamedField>(key, static_cast<HValue*>(NULL),
-                                       HObjectAccess::ForMap());
+    HValue* map = Add<HLoadNamedField>(key, nullptr, HObjectAccess::ForMap());
     HValue* instance_type =
-        Add<HLoadNamedField>(map, static_cast<HValue*>(NULL),
-                             HObjectAccess::ForMapInstanceType());
+        Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapInstanceType());
 
     // Non-unique string, check for a string with a hash code that is actually
     // an index.
@@ -1508,9 +1506,8 @@ void HGraphBuilder::BuildKeyedIndexCheck(HValue* key,
       {
         // String: check whether the String is a String of an index. If it is,
         // extract the index value from the hash.
-        HValue* hash =
-            Add<HLoadNamedField>(key, static_cast<HValue*>(NULL),
-                                 HObjectAccess::ForNameHashField());
+        HValue* hash = Add<HLoadNamedField>(key, nullptr,
+                                            HObjectAccess::ForNameHashField());
         HValue* not_index_mask = Add<HConstant>(static_cast<int>(
             String::kContainsCachedArrayIndexMask));
 
@@ -1569,11 +1566,10 @@ void HGraphBuilder::BuildKeyedIndexCheck(HValue* key,
 void HGraphBuilder::BuildNonGlobalObjectCheck(HValue* receiver) {
   // Get the the instance type of the receiver, and make sure that it is
   // not one of the global object types.
-  HValue* map = Add<HLoadNamedField>(receiver, static_cast<HValue*>(NULL),
-                                     HObjectAccess::ForMap());
+  HValue* map =
+      Add<HLoadNamedField>(receiver, nullptr, HObjectAccess::ForMap());
   HValue* instance_type =
-    Add<HLoadNamedField>(map, static_cast<HValue*>(NULL),
-                         HObjectAccess::ForMapInstanceType());
+      Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapInstanceType());
   STATIC_ASSERT(JS_BUILTINS_OBJECT_TYPE == JS_GLOBAL_OBJECT_TYPE + 1);
   HValue* min_global_type = Add<HConstant>(JS_GLOBAL_OBJECT_TYPE);
   HValue* max_global_type = Add<HConstant>(JS_BUILTINS_OBJECT_TYPE);
@@ -1595,11 +1591,9 @@ void HGraphBuilder::BuildTestForDictionaryProperties(
     HValue* object,
     HIfContinuation* continuation) {
   HValue* properties = Add<HLoadNamedField>(
-      object, static_cast<HValue*>(NULL),
-      HObjectAccess::ForPropertiesPointer());
+      object, nullptr, HObjectAccess::ForPropertiesPointer());
   HValue* properties_map =
-      Add<HLoadNamedField>(properties, static_cast<HValue*>(NULL),
-                           HObjectAccess::ForMap());
+      Add<HLoadNamedField>(properties, nullptr, HObjectAccess::ForMap());
   HValue* hash_map = Add<HLoadRoot>(Heap::kHashTableMapRootIndex);
   IfBuilder builder(this);
   builder.If<HCompareObjectEqAndBranch>(properties_map, hash_map);
@@ -1612,13 +1606,11 @@ HValue* HGraphBuilder::BuildKeyedLookupCacheHash(HValue* object,
   // Load the map of the receiver, compute the keyed lookup cache hash
   // based on 32 bits of the map pointer and the string hash.
   HValue* object_map =
-      Add<HLoadNamedField>(object, static_cast<HValue*>(NULL),
-                           HObjectAccess::ForMapAsInteger32());
+      Add<HLoadNamedField>(object, nullptr, HObjectAccess::ForMapAsInteger32());
   HValue* shifted_map = AddUncasted<HShr>(
       object_map, Add<HConstant>(KeyedLookupCache::kMapHashShift));
   HValue* string_hash =
-      Add<HLoadNamedField>(key, static_cast<HValue*>(NULL),
-                           HObjectAccess::ForStringHashField());
+      Add<HLoadNamedField>(key, nullptr, HObjectAccess::ForStringHashField());
   HValue* shifted_hash = AddUncasted<HShr>(
       string_hash, Add<HConstant>(String::kHashShift));
   HValue* xor_result = AddUncasted<HBitwise>(Token::BIT_XOR, shifted_map,
@@ -1666,11 +1658,9 @@ HValue* HGraphBuilder::BuildUncheckedDictionaryElementLoad(HValue* receiver,
                                                            HValue* elements,
                                                            HValue* key,
                                                            HValue* hash) {
-  HValue* capacity = Add<HLoadKeyed>(
-      elements,
-      Add<HConstant>(NameDictionary::kCapacityIndex),
-      static_cast<HValue*>(NULL),
-      FAST_ELEMENTS);
+  HValue* capacity =
+      Add<HLoadKeyed>(elements, Add<HConstant>(NameDictionary::kCapacityIndex),
+                      nullptr, FAST_ELEMENTS);
 
   HValue* mask = AddUncasted<HSub>(capacity, graph()->GetConstant1());
   mask->ChangeRepresentation(Representation::Integer32());
@@ -1700,8 +1690,8 @@ HValue* HGraphBuilder::BuildUncheckedDictionaryElementLoad(HValue* receiver,
       AddUncasted<HAdd>(base_index, Add<HConstant>(start_offset));
   key_index->ClearFlag(HValue::kCanOverflow);
 
-  HValue* candidate_key = Add<HLoadKeyed>(
-      elements, key_index, static_cast<HValue*>(NULL), FAST_ELEMENTS);
+  HValue* candidate_key =
+      Add<HLoadKeyed>(elements, key_index, nullptr, FAST_ELEMENTS);
   IfBuilder if_undefined(this);
   if_undefined.If<HCompareObjectEqAndBranch>(candidate_key,
                                              graph()->GetConstantUndefined());
@@ -1727,8 +1717,8 @@ HValue* HGraphBuilder::BuildUncheckedDictionaryElementLoad(HValue* receiver,
         if_update_with_internalized.IfNot<HIsSmiAndBranch>(candidate_key);
     if_update_with_internalized.And();
     HValue* map = AddLoadMap(candidate_key, smi_check);
-    HValue* instance_type = Add<HLoadNamedField>(
-        map, static_cast<HValue*>(NULL), HObjectAccess::ForMapInstanceType());
+    HValue* instance_type =
+        Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapInstanceType());
     HValue* not_internalized_bit = AddUncasted<HBitwise>(
         Token::BIT_AND, instance_type,
         Add<HConstant>(static_cast<int>(kIsNotInternalizedMask)));
@@ -1755,8 +1745,8 @@ HValue* HGraphBuilder::BuildUncheckedDictionaryElementLoad(HValue* receiver,
     HValue* details_index =
         AddUncasted<HAdd>(base_index, Add<HConstant>(start_offset + 2));
     details_index->ClearFlag(HValue::kCanOverflow);
-    HValue* details = Add<HLoadKeyed>(
-        elements, details_index, static_cast<HValue*>(NULL), FAST_ELEMENTS);
+    HValue* details =
+        Add<HLoadKeyed>(elements, details_index, nullptr, FAST_ELEMENTS);
     int details_mask = PropertyDetails::TypeField::kMask |
                        PropertyDetails::DeletedField::kMask;
     details = AddUncasted<HBitwise>(Token::BIT_AND, details,
@@ -1768,8 +1758,7 @@ HValue* HGraphBuilder::BuildUncheckedDictionaryElementLoad(HValue* receiver,
     HValue* result_index =
         AddUncasted<HAdd>(base_index, Add<HConstant>(start_offset + 1));
     result_index->ClearFlag(HValue::kCanOverflow);
-    Push(Add<HLoadKeyed>(elements, result_index, static_cast<HValue*>(NULL),
-                         FAST_ELEMENTS));
+    Push(Add<HLoadKeyed>(elements, result_index, nullptr, FAST_ELEMENTS));
     details_compare.Else();
     Add<HPushArguments>(receiver, key);
     Push(Add<HCallRuntime>(isolate()->factory()->empty_string(),
@@ -1821,15 +1810,14 @@ HValue* HGraphBuilder::BuildRegExpConstructResult(HValue* length,
 
   // Initialize the JSRegExpResult header.
   HValue* global_object = Add<HLoadNamedField>(
-      context(), static_cast<HValue*>(NULL),
+      context(), nullptr,
       HObjectAccess::ForContextSlot(Context::GLOBAL_OBJECT_INDEX));
   HValue* native_context = Add<HLoadNamedField>(
-      global_object, static_cast<HValue*>(NULL),
-      HObjectAccess::ForGlobalObjectNativeContext());
+      global_object, nullptr, HObjectAccess::ForGlobalObjectNativeContext());
   Add<HStoreNamedField>(
       result, HObjectAccess::ForMap(),
       Add<HLoadNamedField>(
-          native_context, static_cast<HValue*>(NULL),
+          native_context, nullptr,
           HObjectAccess::ForContextSlot(Context::REGEXP_RESULT_MAP_INDEX)));
   HConstant* empty_fixed_array =
       Add<HConstant>(isolate()->factory()->empty_fixed_array());
@@ -1908,8 +1896,7 @@ HValue* HGraphBuilder::BuildNumberToString(HValue* object, Type* type) {
 
     // Load the key.
     HValue* key_index = AddUncasted<HShl>(hash, graph()->GetConstant1());
-    HValue* key = Add<HLoadKeyed>(number_string_cache, key_index,
-                                  static_cast<HValue*>(NULL),
+    HValue* key = Add<HLoadKeyed>(number_string_cache, key_index, nullptr,
                                   FAST_ELEMENTS, ALLOW_RETURN_HOLE);
 
     // Check if object == key.
@@ -1945,8 +1932,7 @@ HValue* HGraphBuilder::BuildNumberToString(HValue* object, Type* type) {
 
         // Load the key.
         HValue* key_index = AddUncasted<HShl>(hash, graph()->GetConstant1());
-        HValue* key = Add<HLoadKeyed>(number_string_cache, key_index,
-                                      static_cast<HValue*>(NULL),
+        HValue* key = Add<HLoadKeyed>(number_string_cache, key_index, nullptr,
                                       FAST_ELEMENTS, ALLOW_RETURN_HOLE);
 
         // Check if the key is a heap number and compare it with the object.
@@ -1999,8 +1985,7 @@ HValue* HGraphBuilder::BuildNumberToString(HValue* object, Type* type) {
     // Load the value in case of cache hit.
     HValue* key_index = Pop();
     HValue* value_index = AddUncasted<HAdd>(key_index, graph()->GetConstant1());
-    Push(Add<HLoadKeyed>(number_string_cache, value_index,
-                         static_cast<HValue*>(NULL),
+    Push(Add<HLoadKeyed>(number_string_cache, value_index, nullptr,
                          FAST_ELEMENTS, ALLOW_RETURN_HOLE));
   }
   if_found.Else();
@@ -2440,8 +2425,7 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
     HValue* backing_store;
     if (IsExternalArrayElementsKind(elements_kind)) {
       backing_store = Add<HLoadNamedField>(
-          elements, static_cast<HValue*>(NULL),
-          HObjectAccess::ForExternalArrayExternalPointer());
+          elements, nullptr, HObjectAccess::ForExternalArrayExternalPointer());
     } else {
       backing_store = elements;
     }
@@ -2847,8 +2831,7 @@ void HGraphBuilder::BuildCopyProperties(HValue* from_properties,
   key = AddUncasted<HSub>(key, graph()->GetConstant1());
   key->ClearFlag(HValue::kCanOverflow);
 
-  HValue* element =
-      Add<HLoadKeyed>(from_properties, key, static_cast<HValue*>(NULL), kind);
+  HValue* element = Add<HLoadKeyed>(from_properties, key, nullptr, kind);
 
   Add<HStoreKeyed>(to_properties, key, element, kind);
 
@@ -2888,8 +2871,7 @@ void HGraphBuilder::BuildCopyElements(HValue* from_elements,
     for (int i = 0; i < constant_capacity; i++) {
       HValue* key_constant = Add<HConstant>(i);
       HInstruction* value = Add<HLoadKeyed>(from_elements, key_constant,
-                                            static_cast<HValue*>(NULL),
-                                            from_elements_kind);
+                                            nullptr, from_elements_kind);
       Add<HStoreKeyed>(to_elements, key_constant, value, to_elements_kind);
     }
   } else {
@@ -2907,10 +2889,8 @@ void HGraphBuilder::BuildCopyElements(HValue* from_elements,
     key = AddUncasted<HSub>(key, graph()->GetConstant1());
     key->ClearFlag(HValue::kCanOverflow);
 
-    HValue* element = Add<HLoadKeyed>(from_elements, key,
-                                      static_cast<HValue*>(NULL),
-                                      from_elements_kind,
-                                      ALLOW_RETURN_HOLE);
+    HValue* element = Add<HLoadKeyed>(from_elements, key, nullptr,
+                                      from_elements_kind, ALLOW_RETURN_HOLE);
 
     ElementsKind kind = (IsHoleyElementsKind(from_elements_kind) &&
                          IsFastSmiElementsKind(to_elements_kind))
@@ -3021,9 +3001,9 @@ HValue* HGraphBuilder::BuildCloneShallowArrayNonEmpty(HValue* boilerplate,
   // Copy the elements array header.
   for (int i = 0; i < FixedArrayBase::kHeaderSize; i += kPointerSize) {
     HObjectAccess access = HObjectAccess::ForFixedArrayHeader(i);
-    Add<HStoreNamedField>(elements, access,
-        Add<HLoadNamedField>(boilerplate_elements,
-                             static_cast<HValue*>(NULL), access));
+    Add<HStoreNamedField>(
+        elements, access,
+        Add<HLoadNamedField>(boilerplate_elements, nullptr, access));
   }
 
   // And the result of the length
@@ -3036,10 +3016,9 @@ HValue* HGraphBuilder::BuildCloneShallowArrayNonEmpty(HValue* boilerplate,
 }
 
 
-void HGraphBuilder::BuildCompareNil(
-    HValue* value,
-    Type* type,
-    HIfContinuation* continuation) {
+void HGraphBuilder::BuildCompareNil(HValue* value, Type* type,
+                                    HIfContinuation* continuation,
+                                    MapEmbedding map_embedding) {
   IfBuilder if_nil(this);
   bool some_case_handled = false;
   bool some_case_missing = false;
@@ -3078,7 +3057,21 @@ void HGraphBuilder::BuildCompareNil(
       // the monomorphic map when the code is used as a template to generate a
       // new IC. For optimized functions, there is no sentinel map, the map
       // emitted below is the actual monomorphic map.
-      Add<HCheckMaps>(value, type->Classes().Current());
+      if (map_embedding == kEmbedMapsViaWeakCells) {
+        HValue* cell =
+            Add<HConstant>(Map::WeakCellForMap(type->Classes().Current()));
+        HValue* expected_map = Add<HLoadNamedField>(
+            cell, nullptr, HObjectAccess::ForWeakCellValue());
+        HValue* map =
+            Add<HLoadNamedField>(value, nullptr, HObjectAccess::ForMap());
+        IfBuilder map_check(this);
+        map_check.IfNot<HCompareObjectEqAndBranch>(expected_map, map);
+        map_check.ThenDeopt("Unknown map");
+        map_check.End();
+      } else {
+        DCHECK(map_embedding == kEmbedMapsDirectly);
+        Add<HCheckMaps>(value, type->Classes().Current());
+      }
     } else {
       if_nil.Deopt("Too many undetectable types");
     }
@@ -3102,10 +3095,10 @@ void HGraphBuilder::BuildCreateAllocationMemento(
       HObjectAccess::ForAllocationMementoSite(),
       allocation_site);
   if (FLAG_allocation_site_pretenuring) {
-    HValue* memento_create_count = Add<HLoadNamedField>(
-        allocation_site, static_cast<HValue*>(NULL),
-        HObjectAccess::ForAllocationSiteOffset(
-            AllocationSite::kPretenureCreateCountOffset));
+    HValue* memento_create_count =
+        Add<HLoadNamedField>(allocation_site, nullptr,
+                             HObjectAccess::ForAllocationSiteOffset(
+                                 AllocationSite::kPretenureCreateCountOffset));
     memento_create_count = AddUncasted<HAdd>(
         memento_create_count, graph()->GetConstant1());
     // This smi value is reset to zero after every gc, overflow isn't a problem
@@ -3119,29 +3112,36 @@ void HGraphBuilder::BuildCreateAllocationMemento(
 
 
 HInstruction* HGraphBuilder::BuildGetNativeContext(HValue* closure) {
-  // Get the global context, then the native context
-  HInstruction* context =
-      Add<HLoadNamedField>(closure, static_cast<HValue*>(NULL),
-                           HObjectAccess::ForFunctionContextPointer());
+  // Get the global object, then the native context
+  HInstruction* context = Add<HLoadNamedField>(
+      closure, nullptr, HObjectAccess::ForFunctionContextPointer());
   HInstruction* global_object = Add<HLoadNamedField>(
-      context, static_cast<HValue*>(NULL),
+      context, nullptr,
       HObjectAccess::ForContextSlot(Context::GLOBAL_OBJECT_INDEX));
   HObjectAccess access = HObjectAccess::ForObservableJSObjectOffset(
       GlobalObject::kNativeContextOffset);
-  return Add<HLoadNamedField>(
-      global_object, static_cast<HValue*>(NULL), access);
+  return Add<HLoadNamedField>(global_object, nullptr, access);
+}
+
+
+HInstruction* HGraphBuilder::BuildGetScriptContext(int context_index) {
+  HValue* native_context = BuildGetNativeContext();
+  HValue* script_context_table = Add<HLoadNamedField>(
+      native_context, nullptr,
+      HObjectAccess::ForContextSlot(Context::SCRIPT_CONTEXT_TABLE_INDEX));
+  return Add<HLoadNamedField>(script_context_table, nullptr,
+                              HObjectAccess::ForScriptContext(context_index));
 }
 
 
 HInstruction* HGraphBuilder::BuildGetNativeContext() {
-  // Get the global context, then the native context
+  // Get the global object, then the native context
   HValue* global_object = Add<HLoadNamedField>(
-      context(), static_cast<HValue*>(NULL),
+      context(), nullptr,
       HObjectAccess::ForContextSlot(Context::GLOBAL_OBJECT_INDEX));
-  return Add<HLoadNamedField>(
-      global_object, static_cast<HValue*>(NULL),
-      HObjectAccess::ForObservableJSObjectOffset(
-          GlobalObject::kNativeContextOffset));
+  return Add<HLoadNamedField>(global_object, nullptr,
+                              HObjectAccess::ForObservableJSObjectOffset(
+                                  GlobalObject::kNativeContextOffset));
 }
 
 
@@ -3149,8 +3149,7 @@ HInstruction* HGraphBuilder::BuildGetArrayFunction() {
   HInstruction* native_context = BuildGetNativeContext();
   HInstruction* index =
       Add<HConstant>(static_cast<int32_t>(Context::ARRAY_FUNCTION_INDEX));
-  return Add<HLoadKeyed>(
-      native_context, index, static_cast<HValue*>(NULL), FAST_ELEMENTS);
+  return Add<HLoadKeyed>(native_context, index, nullptr, FAST_ELEMENTS);
 }
 
 
@@ -3195,8 +3194,8 @@ HValue* HGraphBuilder::JSArrayBuilder::EmitMapCode() {
     // No need for a context lookup if the kind_ matches the initial
     // map, because we can just load the map in that case.
     HObjectAccess access = HObjectAccess::ForPrototypeOrInitialMap();
-    return builder()->Add<HLoadNamedField>(
-        constructor_function_, static_cast<HValue*>(NULL), access);
+    return builder()->Add<HLoadNamedField>(constructor_function_, nullptr,
+                                           access);
   }
 
   // TODO(mvstanton): we should always have a constructor function if we
@@ -3208,21 +3207,21 @@ HValue* HGraphBuilder::JSArrayBuilder::EmitMapCode() {
   HInstruction* index = builder()->Add<HConstant>(
       static_cast<int32_t>(Context::JS_ARRAY_MAPS_INDEX));
 
-  HInstruction* map_array = builder()->Add<HLoadKeyed>(
-      native_context, index, static_cast<HValue*>(NULL), FAST_ELEMENTS);
+  HInstruction* map_array =
+      builder()->Add<HLoadKeyed>(native_context, index, nullptr, FAST_ELEMENTS);
 
   HInstruction* kind_index = builder()->Add<HConstant>(kind_);
 
-  return builder()->Add<HLoadKeyed>(
-      map_array, kind_index, static_cast<HValue*>(NULL), FAST_ELEMENTS);
+  return builder()->Add<HLoadKeyed>(map_array, kind_index, nullptr,
+                                    FAST_ELEMENTS);
 }
 
 
 HValue* HGraphBuilder::JSArrayBuilder::EmitInternalMapCode() {
   // Find the map near the constructor function
   HObjectAccess access = HObjectAccess::ForPrototypeOrInitialMap();
-  return builder()->Add<HLoadNamedField>(
-      constructor_function_, static_cast<HValue*>(NULL), access);
+  return builder()->Add<HLoadNamedField>(constructor_function_, nullptr,
+                                         access);
 }
 
 
@@ -3321,16 +3320,14 @@ HAllocate* HGraphBuilder::JSArrayBuilder::AllocateArray(
 
 HValue* HGraphBuilder::AddLoadJSBuiltin(Builtins::JavaScript builtin) {
   HValue* global_object = Add<HLoadNamedField>(
-      context(), static_cast<HValue*>(NULL),
+      context(), nullptr,
       HObjectAccess::ForContextSlot(Context::GLOBAL_OBJECT_INDEX));
   HObjectAccess access = HObjectAccess::ForObservableJSObjectOffset(
       GlobalObject::kBuiltinsOffset);
-  HValue* builtins = Add<HLoadNamedField>(
-      global_object, static_cast<HValue*>(NULL), access);
+  HValue* builtins = Add<HLoadNamedField>(global_object, nullptr, access);
   HObjectAccess function_access = HObjectAccess::ForObservableJSObjectOffset(
           JSBuiltinsObject::OffsetOfFunctionWithId(builtin));
-  return Add<HLoadNamedField>(
-      builtins, static_cast<HValue*>(NULL), function_access);
+  return Add<HLoadNamedField>(builtins, nullptr, function_access);
 }
 
 
@@ -3346,7 +3343,7 @@ HOptimizedGraphBuilder::HOptimizedGraphBuilder(CompilationInfo* info)
   // This is not initialized in the initializer list because the
   // constructor for the initial state relies on function_state_ == NULL
   // to know it's the initial state.
-  function_state_= &initial_function_state_;
+  function_state_ = &initial_function_state_;
   InitializeAstVisitor(info->zone());
   if (FLAG_hydrogen_track_positions) {
     SetSourcePosition(info->shared_info()->start_position());
@@ -4040,8 +4037,12 @@ void EffectContext::ReturnValue(HValue* value) {
 void ValueContext::ReturnValue(HValue* value) {
   // The value is tracked in the bailout environment, and communicated
   // through the environment as the result of the expression.
-  if (!arguments_allowed() && value->CheckFlag(HValue::kIsArguments)) {
-    owner()->Bailout(kBadValueContextForArgumentsValue);
+  if (value->CheckFlag(HValue::kIsArguments)) {
+    if (flag_ == ARGUMENTS_FAKED) {
+      value = owner()->graph()->GetConstantUndefined();
+    } else if (!arguments_allowed()) {
+      owner()->Bailout(kBadValueContextForArgumentsValue);
+    }
   }
   owner()->Push(value);
 }
@@ -4267,20 +4268,16 @@ void HOptimizedGraphBuilder::VisitExpressions(
 }
 
 
-bool HOptimizedGraphBuilder::BuildGraph() {
-  if (current_info()->function()->is_generator()) {
-    Bailout(kFunctionIsAGenerator);
-    return false;
+void HOptimizedGraphBuilder::VisitExpressions(ZoneList<Expression*>* exprs,
+                                              ArgumentsAllowedFlag flag) {
+  for (int i = 0; i < exprs->length(); ++i) {
+    CHECK_ALIVE(VisitForValue(exprs->at(i), flag));
   }
+}
+
+
+bool HOptimizedGraphBuilder::BuildGraph() {
   Scope* scope = current_info()->scope();
-  if (scope->HasIllegalRedeclaration()) {
-    Bailout(kFunctionWithIllegalRedeclaration);
-    return false;
-  }
-  if (scope->calls_eval()) {
-    Bailout(kFunctionCallsEval);
-    return false;
-  }
   SetUpScope(scope);
 
   // Add an edge to the body entry.  This is warty: the graph's start
@@ -4516,10 +4513,6 @@ void HOptimizedGraphBuilder::SetUpScope(Scope* scope) {
   // Handle the arguments and arguments shadow variables specially (they do
   // not have declarations).
   if (scope->arguments() != NULL) {
-    if (!scope->arguments()->IsStackAllocated()) {
-      return Bailout(kContextAllocatedArguments);
-    }
-
     environment()->Bind(scope->arguments(),
                         graph()->GetArgumentsObject());
   }
@@ -4555,7 +4548,7 @@ void HOptimizedGraphBuilder::VisitBlock(Block* stmt) {
       Scope* declaration_scope = scope->DeclarationScope();
       HInstruction* function;
       HValue* outer_context = environment()->context();
-      if (declaration_scope->is_global_scope() ||
+      if (declaration_scope->is_script_scope() ||
           declaration_scope->is_eval_scope()) {
         function = new(zone()) HLoadContextSlot(
             outer_context, Context::CLOSURE_INDEX, HLoadContextSlot::kNoCheck);
@@ -4567,11 +4560,11 @@ void HOptimizedGraphBuilder::VisitBlock(Block* stmt) {
       HInstruction* inner_context = Add<HAllocateBlockContext>(
           outer_context, function, scope->GetScopeInfo());
       HInstruction* instr = Add<HStoreFrameContext>(inner_context);
+      set_scope(scope);
+      environment()->BindContext(inner_context);
       if (instr->HasObservableSideEffects()) {
         AddSimulate(stmt->EntryId(), REMOVABLE_SIMULATE);
       }
-      set_scope(scope);
-      environment()->BindContext(inner_context);
       VisitDeclarations(scope->declarations());
       AddSimulate(stmt->DeclsId(), REMOVABLE_SIMULATE);
     }
@@ -4581,14 +4574,14 @@ void HOptimizedGraphBuilder::VisitBlock(Block* stmt) {
   if (scope != NULL && current_block() != NULL) {
     HValue* inner_context = environment()->context();
     HValue* outer_context = Add<HLoadNamedField>(
-        inner_context, static_cast<HValue*>(NULL),
+        inner_context, nullptr,
         HObjectAccess::ForContextSlot(Context::PREVIOUS_INDEX));
 
     HInstruction* instr = Add<HStoreFrameContext>(outer_context);
+    environment()->BindContext(outer_context);
     if (instr->HasObservableSideEffects()) {
       AddSimulate(stmt->ExitId(), REMOVABLE_SIMULATE);
     }
-    environment()->BindContext(outer_context);
   }
   HBasicBlock* break_block = break_info.break_block();
   if (break_block != NULL) {
@@ -4712,7 +4705,7 @@ void HOptimizedGraphBuilder::VisitContinueStatement(
   if (context_pop_count > 0) {
     while (context_pop_count-- > 0) {
       HInstruction* context_instruction = Add<HLoadNamedField>(
-          context, static_cast<HValue*>(NULL),
+          context, nullptr,
           HObjectAccess::ForContextSlot(Context::PREVIOUS_INDEX));
       context = context_instruction;
     }
@@ -4744,7 +4737,7 @@ void HOptimizedGraphBuilder::VisitBreakStatement(BreakStatement* stmt) {
   if (context_pop_count > 0) {
     while (context_pop_count-- > 0) {
       HInstruction* context_instruction = Add<HLoadNamedField>(
-          context, static_cast<HValue*>(NULL),
+          context, nullptr,
           HObjectAccess::ForContextSlot(Context::PREVIOUS_INDEX));
       context = context_instruction;
     }
@@ -5344,7 +5337,7 @@ HValue* HOptimizedGraphBuilder::BuildContextChainWalk(Variable* var) {
   int length = scope()->ContextChainLength(var->scope());
   while (length-- > 0) {
     context = Add<HLoadNamedField>(
-        context, static_cast<HValue*>(NULL),
+        context, nullptr,
         HObjectAccess::ForContextSlot(Context::PREVIOUS_INDEX));
   }
   return context;
@@ -5376,6 +5369,22 @@ void HOptimizedGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
       }
 
       Handle<GlobalObject> global(current_info()->global_object());
+
+      if (FLAG_harmony_scoping) {
+        Handle<ScriptContextTable> script_contexts(
+            global->native_context()->script_context_table());
+        ScriptContextTable::LookupResult lookup;
+        if (ScriptContextTable::Lookup(script_contexts, variable->name(),
+                                       &lookup)) {
+          Handle<Context> script_context = ScriptContextTable::GetContext(
+              script_contexts, lookup.context_index);
+          HInstruction* result = New<HLoadNamedField>(
+              Add<HConstant>(script_context), nullptr,
+              HObjectAccess::ForContextSlot(lookup.slot_index));
+          return ast_context()->ReturnInstruction(result, expr->id());
+        }
+      }
+
       LookupIterator it(global, variable->name(),
                         LookupIterator::OWN_SKIP_INTERCEPTOR);
       GlobalPropertyAccess type = LookupGlobalProperty(variable, &it, LOAD);
@@ -5398,7 +5407,7 @@ void HOptimizedGraphBuilder::VisitVariableProxy(VariableProxy* expr) {
         }
       } else {
         HValue* global_object = Add<HLoadNamedField>(
-            context(), static_cast<HValue*>(NULL),
+            context(), nullptr,
             HObjectAccess::ForContextSlot(Context::GLOBAL_OBJECT_INDEX));
         HLoadGlobalGeneric* instr =
             New<HLoadGlobalGeneric>(global_object,
@@ -5503,7 +5512,7 @@ static bool IsFastLiteral(Handle<JSObject> boilerplate,
   Handle<FixedArrayBase> elements(boilerplate->elements());
   if (elements->length() > 0 &&
       elements->map() != isolate->heap()->fixed_cow_array_map()) {
-    if (boilerplate->HasFastObjectElements()) {
+    if (boilerplate->HasFastSmiOrObjectElements()) {
       Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
       int length = elements->length();
       for (int i = 0; i < length; i++) {
@@ -5533,9 +5542,11 @@ static bool IsFastLiteral(Handle<JSObject> boilerplate,
     for (int i = 0; i < limit; i++) {
       PropertyDetails details = descriptors->GetDetails(i);
       if (details.type() != FIELD) continue;
-      int index = descriptors->GetFieldIndex(i);
       if ((*max_properties)-- == 0) return false;
-      Handle<Object> value(boilerplate->InObjectPropertyAt(index), isolate);
+      FieldIndex field_index = FieldIndex::ForDescriptor(boilerplate->map(), i);
+      if (boilerplate->IsUnboxedDoubleField(field_index)) continue;
+      Handle<Object> value(boilerplate->RawFastPropertyAt(field_index),
+                           isolate);
       if (value->IsJSObject()) {
         Handle<JSObject> value_object = Handle<JSObject>::cast(value);
         if (!IsFastLiteral(value_object,
@@ -5625,6 +5636,17 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
           if (property->emit_store()) {
             CHECK_ALIVE(VisitForValue(value));
             HValue* value = Pop();
+
+            // Add [[HomeObject]] to function literals.
+            if (FunctionLiteral::NeedsHomeObject(property->value())) {
+              Handle<Symbol> sym = isolate()->factory()->home_object_symbol();
+              HInstruction* store_home = BuildKeyedGeneric(
+                  STORE, NULL, value, Add<HConstant>(sym), literal);
+              AddInstruction(store_home);
+              DCHECK(store_home->HasObservableSideEffects());
+              Add<HSimulate>(property->value()->id(), REMOVABLE_SIMULATE);
+            }
+
             Handle<Map> map = property->GetReceiverType();
             Handle<String> name = property->key()->AsPropertyName();
             HInstruction* store;
@@ -5646,9 +5668,8 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
               }
             }
             AddInstruction(store);
-            if (store->HasObservableSideEffects()) {
-              Add<HSimulate>(key->id(), REMOVABLE_SIMULATE);
-            }
+            DCHECK(store->HasObservableSideEffects());
+            Add<HSimulate>(key->id(), REMOVABLE_SIMULATE);
           } else {
             CHECK_ALIVE(VisitForEffect(value));
           }
@@ -5752,17 +5773,13 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
                         Add<HConstant>(constants),
                         Add<HConstant>(flags));
 
-    // TODO(mvstanton): Consider a flag to turn off creation of any
-    // AllocationMementos for this call: we are in crankshaft and should have
-    // learned enough about transition behavior to stop emitting mementos.
     Runtime::FunctionId function_id = Runtime::kCreateArrayLiteral;
     literal = Add<HCallRuntime>(isolate()->factory()->empty_string(),
                                 Runtime::FunctionForId(function_id),
                                 4);
 
-    // De-opt if elements kind changed from boilerplate_elements_kind.
-    Handle<Map> map = Handle<Map>(boilerplate_object->map(), isolate());
-    literal = Add<HCheckMaps>(literal, map);
+    // Register to deopt if the boilerplate ElementsKind changes.
+    AllocationSite::RegisterForDeoptOnTransitionChange(site, top_info());
   }
 
   // The array is expected in the bailout environment during computation
@@ -5838,10 +5855,11 @@ HInstruction* HOptimizedGraphBuilder::BuildLoadNamedField(
   }
 
   HObjectAccess access = info->access();
-  if (access.representation().IsDouble()) {
+  if (access.representation().IsDouble() &&
+      (!FLAG_unbox_double_fields || !access.IsInobject())) {
     // Load the heap number.
     checked_object = Add<HLoadNamedField>(
-        checked_object, static_cast<HValue*>(NULL),
+        checked_object, nullptr,
         access.WithRepresentation(Representation::Tagged()));
     // Load the double value from it.
     access = HObjectAccess::ForHeapNumberValue();
@@ -5870,7 +5888,8 @@ HInstruction* HOptimizedGraphBuilder::BuildStoreNamedField(
   HObjectAccess field_access = info->access();
 
   HStoreNamedField *instr;
-  if (field_access.representation().IsDouble()) {
+  if (field_access.representation().IsDouble() &&
+      (!FLAG_unbox_double_fields || !field_access.IsInobject())) {
     HObjectAccess heap_number_access =
         field_access.WithRepresentation(Representation::Tagged());
     if (transition_to_field) {
@@ -5892,8 +5911,8 @@ HInstruction* HOptimizedGraphBuilder::BuildStoreNamedField(
                                     heap_number);
     } else {
       // Already holds a HeapNumber; load the box and write its value field.
-      HInstruction* heap_number = Add<HLoadNamedField>(
-          checked_object, static_cast<HValue*>(NULL), heap_number_access);
+      HInstruction* heap_number =
+          Add<HLoadNamedField>(checked_object, nullptr, heap_number_access);
       instr = New<HStoreNamedField>(heap_number,
                                     HObjectAccess::ForHeapNumberValue(),
                                     value, STORE_TO_INITIALIZED_ENTRY);
@@ -6502,6 +6521,27 @@ void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(
     HValue* value,
     BailoutId ast_id) {
   Handle<GlobalObject> global(current_info()->global_object());
+
+  if (FLAG_harmony_scoping) {
+    Handle<ScriptContextTable> script_contexts(
+        global->native_context()->script_context_table());
+    ScriptContextTable::LookupResult lookup;
+    if (ScriptContextTable::Lookup(script_contexts, var->name(), &lookup)) {
+      if (lookup.mode == CONST) {
+        return Bailout(kNonInitializerAssignmentToConst);
+      }
+      Handle<Context> script_context =
+          ScriptContextTable::GetContext(script_contexts, lookup.context_index);
+      HStoreNamedField* instr = Add<HStoreNamedField>(
+          Add<HConstant>(script_context),
+          HObjectAccess::ForContextSlot(lookup.slot_index), value);
+      USE(instr);
+      DCHECK(instr->HasObservableSideEffects());
+      Add<HSimulate>(ast_id, REMOVABLE_SIMULATE);
+      return;
+    }
+  }
+
   LookupIterator it(global, var->name(), LookupIterator::OWN_SKIP_INTERCEPTOR);
   GlobalPropertyAccess type = LookupGlobalProperty(var, &it, STORE);
   if (type == kUseCell) {
@@ -6536,7 +6576,7 @@ void HOptimizedGraphBuilder::HandleGlobalVariableAssignment(
     }
   } else {
     HValue* global_object = Add<HLoadNamedField>(
-        context(), static_cast<HValue*>(NULL),
+        context(), nullptr,
         HObjectAccess::ForContextSlot(Context::GLOBAL_OBJECT_INDEX));
     HStoreNamedGeneric* instr =
         Add<HStoreNamedGeneric>(global_object, var->name(),
@@ -6578,6 +6618,9 @@ void HOptimizedGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
         if (var->mode() == CONST_LEGACY)  {
           return Bailout(kUnsupportedConstCompoundAssignment);
         }
+        if (var->mode() == CONST) {
+          return Bailout(kNonInitializerAssignmentToConst);
+        }
         BindIfLive(var, Top());
         break;
 
@@ -6604,9 +6647,7 @@ void HOptimizedGraphBuilder::HandleCompoundAssignment(Assignment* expr) {
             mode = HStoreContextSlot::kCheckDeoptimize;
             break;
           case CONST:
-            // This case is checked statically so no need to
-            // perform checks here
-            UNREACHABLE();
+            return Bailout(kNonInitializerAssignmentToConst);
           case CONST_LEGACY:
             return ast_context()->ReturnValue(Pop());
           default:
@@ -6819,9 +6860,8 @@ HInstruction* HGraphBuilder::AddLoadStringInstanceType(HValue* string) {
     }
   }
   return Add<HLoadNamedField>(
-      Add<HLoadNamedField>(string, static_cast<HValue*>(NULL),
-                           HObjectAccess::ForMap()),
-      static_cast<HValue*>(NULL), HObjectAccess::ForMapInstanceType());
+      Add<HLoadNamedField>(string, nullptr, HObjectAccess::ForMap()), nullptr,
+      HObjectAccess::ForMapInstanceType());
 }
 
 
@@ -6832,7 +6872,7 @@ HInstruction* HGraphBuilder::AddLoadStringLength(HValue* string) {
       return Add<HConstant>(c_string->StringValue()->length());
     }
   }
-  return Add<HLoadNamedField>(string, static_cast<HValue*>(NULL),
+  return Add<HLoadNamedField>(string, nullptr,
                               HObjectAccess::ForStringLength());
 }
 
@@ -6853,9 +6893,10 @@ HInstruction* HOptimizedGraphBuilder::BuildNamedGeneric(
     if (FLAG_vector_ics) {
       Handle<SharedFunctionInfo> current_shared =
           function_state()->compilation_info()->shared_info();
-      result->SetVectorAndSlot(
-          handle(current_shared->feedback_vector(), isolate()),
-          expr->AsProperty()->PropertyFeedbackSlot());
+      Handle<TypeFeedbackVector> vector =
+          handle(current_shared->feedback_vector(), isolate());
+      FeedbackVectorICSlot slot = expr->AsProperty()->PropertyFeedbackSlot();
+      result->SetVectorAndSlot(vector, slot);
     }
     return result;
   } else {
@@ -6876,9 +6917,10 @@ HInstruction* HOptimizedGraphBuilder::BuildKeyedGeneric(
     if (FLAG_vector_ics) {
       Handle<SharedFunctionInfo> current_shared =
           function_state()->compilation_info()->shared_info();
-      result->SetVectorAndSlot(
-          handle(current_shared->feedback_vector(), isolate()),
-          expr->AsProperty()->PropertyFeedbackSlot());
+      Handle<TypeFeedbackVector> vector =
+          handle(current_shared->feedback_vector(), isolate());
+      FeedbackVectorICSlot slot = expr->AsProperty()->PropertyFeedbackSlot();
+      result->SetVectorAndSlot(vector, slot);
     }
     return result;
   } else {
@@ -7040,7 +7082,9 @@ HValue* HOptimizedGraphBuilder::HandlePolymorphicElementAccess(
   MapHandleList possible_transitioned_maps(maps->length());
   for (int i = 0; i < maps->length(); ++i) {
     Handle<Map> map = maps->at(i);
-    DCHECK(!map->IsStringMap());
+    // Loads from strings or loads with a mix of string and non-string maps
+    // shouldn't be handled polymorphically.
+    DCHECK(access_type != LOAD || !map->IsStringMap());
     ElementsKind elements_kind = map->elements_kind();
     if (CanInlineElementAccess(map) && IsFastElementsKind(elements_kind) &&
         elements_kind != GetInitialFastElementsKind()) {
@@ -7152,7 +7196,9 @@ HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess(
     HValue* obj, HValue* key, HValue* val, Expression* expr, BailoutId ast_id,
     BailoutId return_id, PropertyAccessType access_type,
     bool* has_side_effects) {
-  if (key->ActualValue()->IsConstant()) {
+  // TODO(mvstanton): This optimization causes trouble for vector-based
+  // KeyedLoadICs, turn it off for now.
+  if (!FLAG_vector_ics && key->ActualValue()->IsConstant()) {
     Handle<Object> constant =
         HConstant::cast(key->ActualValue())->handle(isolate());
     uint32_t array_index;
@@ -7182,7 +7228,7 @@ HValue* HOptimizedGraphBuilder::HandleKeyedElementAccess(
   bool monomorphic = ComputeReceiverTypes(expr, obj, &types, zone());
 
   bool force_generic = false;
-  if (access_type == STORE && expr->GetKeyType() == PROPERTY) {
+  if (expr->GetKeyType() == PROPERTY) {
     // Non-Generic accesses assume that elements are being accessed, and will
     // deopt for non-index keys, which the IC knows will occur.
     // TODO(jkummerow): Consider adding proper support for property accesses.
@@ -7517,8 +7563,7 @@ HInstruction* HOptimizedGraphBuilder::BuildCallConstantFunction(
   } else {
     HValue* param_count_value = Add<HConstant>(formal_parameter_count);
     HValue* context = Add<HLoadNamedField>(
-        target, static_cast<HValue*>(NULL),
-        HObjectAccess::ForFunctionContextPointer());
+        target, nullptr, HObjectAccess::ForFunctionContextPointer());
     return NewArgumentAdaptorCall(target, context,
         argument_count, param_count_value);
   }
@@ -7785,7 +7830,7 @@ int HOptimizedGraphBuilder::InliningAstSize(Handle<JSFunction> target) {
     TraceInline(target, caller, "target not inlineable");
     return kNotInlinable;
   }
-  if (target_shared->DisableOptimizationReason() != kNoReason) {
+  if (target_shared->disable_optimization_reason() != kNoReason) {
     TraceInline(target, caller, "target contains unsupported syntax [early]");
     return kNotInlinable;
   }
@@ -7885,13 +7930,6 @@ bool HOptimizedGraphBuilder::TryInline(Handle<JSFunction> target,
       TraceInline(target, caller, "target uses arguments object");
       return false;
     }
-
-    if (!function->scope()->arguments()->IsStackAllocated()) {
-      TraceInline(target,
-                  caller,
-                  "target uses non-stackallocated arguments object");
-      return false;
-    }
   }
 
   // All declarations must be inlineable.
@@ -8301,6 +8339,7 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
       if (receiver_map.is_null()) return false;
       if (receiver_map->instance_type() != JS_ARRAY_TYPE) return false;
       ElementsKind elements_kind = receiver_map->elements_kind();
+      if (JSArray::IsReadOnlyLengthDescriptor(receiver_map)) return false;
       if (!IsFastElementsKind(elements_kind)) return false;
       if (receiver_map->is_observed()) return false;
       if (!receiver_map->is_extensible()) return false;
@@ -8311,9 +8350,9 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
       HValue* receiver = Pop();
 
       HValue* checked_object = AddCheckMap(receiver, receiver_map);
-      HValue* length = Add<HLoadNamedField>(
-          checked_object, static_cast<HValue*>(NULL),
-          HObjectAccess::ForArrayLength(elements_kind));
+      HValue* length =
+          Add<HLoadNamedField>(checked_object, nullptr,
+                               HObjectAccess::ForArrayLength(elements_kind));
 
       Drop(1);  // Function.
 
@@ -8394,8 +8433,8 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
       {
         NoObservableSideEffectsScope scope(this);
 
-        length = Add<HLoadNamedField>(array, static_cast<HValue*>(NULL),
-          HObjectAccess::ForArrayLength(elements_kind));
+        length = Add<HLoadNamedField>(
+            array, nullptr, HObjectAccess::ForArrayLength(elements_kind));
 
         new_size = AddUncasted<HAdd>(length, graph()->GetConstant1());
 
@@ -8418,6 +8457,7 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
       if (receiver_map.is_null()) return false;
       if (receiver_map->instance_type() != JS_ARRAY_TYPE) return false;
       ElementsKind kind = receiver_map->elements_kind();
+      if (JSArray::IsReadOnlyLengthDescriptor(receiver_map)) return false;
       if (!IsFastElementsKind(kind)) return false;
       if (receiver_map->is_observed()) return false;
       if (!receiver_map->is_extensible()) return false;
@@ -8447,8 +8487,7 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
         NoObservableSideEffectsScope scope(this);
 
         HValue* length = Add<HLoadNamedField>(
-            receiver, static_cast<HValue*>(NULL),
-            HObjectAccess::ForArrayLength(kind));
+            receiver, nullptr, HObjectAccess::ForArrayLength(kind));
 
         IfBuilder if_lengthiszero(this);
         HValue* lengthiszero = if_lengthiszero.If<HCompareNumericAndBranch>(
@@ -8490,10 +8529,12 @@ bool HOptimizedGraphBuilder::TryInlineBuiltinMethodCall(
                   graph()->GetConstant0(), new_length, Token::LT);
               HValue* key = AddUncasted<HAdd>(new_key, graph()->GetConstant1());
               key->ClearFlag(HValue::kCanOverflow);
+              ElementsKind copy_kind =
+                  kind == FAST_HOLEY_SMI_ELEMENTS ? FAST_HOLEY_ELEMENTS : kind;
               HValue* element = AddUncasted<HLoadKeyed>(
-                  elements, key, lengthiszero, kind, ALLOW_RETURN_HOLE);
-              HStoreKeyed* store = Add<HStoreKeyed>(
-                  elements, new_key, element, kind);
+                  elements, key, lengthiszero, copy_kind, ALLOW_RETURN_HOLE);
+              HStoreKeyed* store =
+                  Add<HStoreKeyed>(elements, new_key, element, copy_kind);
               store->SetFlag(HValue::kAllowUndefinedAsNaN);
             }
             loop.EndBody();
@@ -8807,13 +8848,8 @@ bool HOptimizedGraphBuilder::TryIndirectCall(Call* expr) {
       // is supported.
       if (current_info()->scope()->arguments() == NULL) return false;
 
-      ZoneList<Expression*>* args = expr->arguments();
-      if (args->length() != 2) return false;
+      if (!CanBeFunctionApplyArguments(expr)) return false;
 
-      VariableProxy* arg_two = args->at(1)->AsVariableProxy();
-      if (arg_two == NULL || !arg_two->var()->IsStackAllocated()) return false;
-      HValue* arg_two_value = LookupAndMakeLive(arg_two->var());
-      if (!arg_two_value->CheckFlag(HValue::kIsArguments)) return false;
       BuildFunctionApply(expr);
       return true;
     }
@@ -8958,9 +8994,8 @@ HValue* HOptimizedGraphBuilder::BuildArrayIndexOf(HValue* receiver,
     LoopBuilder loop(this, context(), direction);
     {
       HValue* index = loop.BeginBody(initial, terminating, token);
-      HValue* element = AddUncasted<HLoadKeyed>(
-          elements, index, static_cast<HValue*>(NULL),
-          kind, ALLOW_RETURN_HOLE);
+      HValue* element = AddUncasted<HLoadKeyed>(elements, index, nullptr, kind,
+                                                ALLOW_RETURN_HOLE);
       IfBuilder if_issame(this);
       if_issame.If<HCompareNumericAndBranch>(element, search_element,
                                              Token::EQ_STRICT);
@@ -8981,9 +9016,8 @@ HValue* HOptimizedGraphBuilder::BuildArrayIndexOf(HValue* receiver,
       LoopBuilder loop(this, context(), direction);
       {
         HValue* index = loop.BeginBody(initial, terminating, token);
-        HValue* element = AddUncasted<HLoadKeyed>(
-            elements, index, static_cast<HValue*>(NULL),
-            kind, ALLOW_RETURN_HOLE);
+        HValue* element = AddUncasted<HLoadKeyed>(elements, index, nullptr,
+                                                  kind, ALLOW_RETURN_HOLE);
         IfBuilder if_issame(this);
         if_issame.If<HIsStringAndBranch>(element);
         if_issame.AndIf<HStringCompareAndBranch>(
@@ -9012,9 +9046,8 @@ HValue* HOptimizedGraphBuilder::BuildArrayIndexOf(HValue* receiver,
         LoopBuilder loop(this, context(), direction);
         {
           HValue* index = loop.BeginBody(initial, terminating, token);
-          HValue* element = AddUncasted<HLoadKeyed>(
-              elements, index, static_cast<HValue*>(NULL),
-              kind, ALLOW_RETURN_HOLE);
+          HValue* element = AddUncasted<HLoadKeyed>(elements, index, nullptr,
+                                                    kind, ALLOW_RETURN_HOLE);
 
           IfBuilder if_element_isnumber(this);
           if_element_isnumber.If<HIsSmiAndBranch>(element);
@@ -9045,9 +9078,8 @@ HValue* HOptimizedGraphBuilder::BuildArrayIndexOf(HValue* receiver,
         LoopBuilder loop(this, context(), direction);
         {
           HValue* index = loop.BeginBody(initial, terminating, token);
-          HValue* element = AddUncasted<HLoadKeyed>(
-              elements, index, static_cast<HValue*>(NULL),
-              kind, ALLOW_RETURN_HOLE);
+          HValue* element = AddUncasted<HLoadKeyed>(elements, index, nullptr,
+                                                    kind, ALLOW_RETURN_HOLE);
           IfBuilder if_issame(this);
           if_issame.If<HCompareObjectEqAndBranch>(
               element, search_element);
@@ -9100,6 +9132,17 @@ bool HOptimizedGraphBuilder::TryHandleArrayCallNew(CallNew* expr,
 }
 
 
+bool HOptimizedGraphBuilder::CanBeFunctionApplyArguments(Call* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  if (args->length() != 2) return false;
+  VariableProxy* arg_two = args->at(1)->AsVariableProxy();
+  if (arg_two == NULL || !arg_two->var()->IsStackAllocated()) return false;
+  HValue* arg_two_value = LookupAndMakeLive(arg_two->var());
+  if (!arg_two_value->CheckFlag(HValue::kIsArguments)) return false;
+  return true;
+}
+
+
 void HOptimizedGraphBuilder::VisitCall(Call* expr) {
   DCHECK(!HasStackOverflow());
   DCHECK(current_block() != NULL);
@@ -9136,13 +9179,14 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
 
     if (FLAG_hydrogen_track_positions) SetSourcePosition(expr->position());
 
-    // Push the function under the receiver.
-    environment()->SetExpressionStackAt(0, function);
 
-    Push(receiver);
 
     if (function->IsConstant() &&
         HConstant::cast(function)->handle(isolate())->IsJSFunction()) {
+      // Push the function under the receiver.
+      environment()->SetExpressionStackAt(0, function);
+      Push(receiver);
+
       Handle<JSFunction> known_function = Handle<JSFunction>::cast(
           HConstant::cast(function)->handle(isolate()));
       expr->set_target(known_function);
@@ -9178,7 +9222,20 @@ void HOptimizedGraphBuilder::VisitCall(Call* expr) {
       }
 
     } else {
-      CHECK_ALIVE(VisitExpressions(expr->arguments()));
+      ArgumentsAllowedFlag arguments_flag = ARGUMENTS_NOT_ALLOWED;
+      if (CanBeFunctionApplyArguments(expr) && expr->is_uninitialized()) {
+        // We have to use EAGER deoptimization here because Deoptimizer::SOFT
+        // gets ignored by the always-opt flag, which leads to incorrect code.
+        Add<HDeoptimize>("Insufficient type feedback for call with arguments",
+                         Deoptimizer::EAGER);
+        arguments_flag = ARGUMENTS_FAKED;
+      }
+
+      // Push the function under the receiver.
+      environment()->SetExpressionStackAt(0, function);
+      Push(receiver);
+
+      CHECK_ALIVE(VisitExpressions(expr->arguments(), arguments_flag));
       CallFunctionFlags flags = receiver->type().IsJSObject()
           ? NO_CALL_FUNCTION_FLAGS : CALL_AS_METHOD;
       call = New<HCallFunction>(function, argument_count, flags);
@@ -9291,8 +9348,7 @@ void HOptimizedGraphBuilder::BuildInlinedCallArray(
   HValue* constructor = environment()->ExpressionStackAt(argument_count);
 
   // Register on the site for deoptimization if the transition feedback changes.
-  AllocationSite::AddDependentCompilationInfo(
-      site, AllocationSite::TRANSITIONS, top_info());
+  AllocationSite::RegisterForDeoptOnTransitionChange(site, top_info());
   ElementsKind kind = site->GetElementsKind();
   HInstruction* site_instruction = Add<HConstant>(site);
 
@@ -9422,9 +9478,8 @@ void HOptimizedGraphBuilder::VisitCallNew(CallNew* expr) {
         Handle<AllocationSite> allocation_site = expr->allocation_site();
         allocation_mode = HAllocationMode(allocation_site);
         // Take a dependency on allocation site.
-        AllocationSite::AddDependentCompilationInfo(allocation_site,
-                                                    AllocationSite::TENURING,
-                                                    top_info());
+        AllocationSite::RegisterForDeoptOnTenureChange(allocation_site,
+                                                       top_info());
       }
     }
 
@@ -9545,11 +9600,9 @@ void HGraphBuilder::BuildArrayBufferViewInitialization(
         HObjectAccess::ForJSArrayBufferViewBuffer(), buffer);
     HObjectAccess weak_first_view_access =
         HObjectAccess::ForJSArrayBufferWeakFirstView();
-    Add<HStoreNamedField>(obj,
-        HObjectAccess::ForJSArrayBufferViewWeakNext(),
-        Add<HLoadNamedField>(buffer,
-                             static_cast<HValue*>(NULL),
-                             weak_first_view_access));
+    Add<HStoreNamedField>(
+        obj, HObjectAccess::ForJSArrayBufferViewWeakNext(),
+        Add<HLoadNamedField>(buffer, nullptr, weak_first_view_access));
     Add<HStoreNamedField>(buffer, weak_first_view_access, obj);
   } else {
     Add<HStoreNamedField>(
@@ -9629,8 +9682,7 @@ HValue* HOptimizedGraphBuilder::BuildAllocateExternalElements(
       HObjectAccess::ForFixedArrayLength(), length);
 
   HValue* backing_store = Add<HLoadNamedField>(
-      buffer, static_cast<HValue*>(NULL),
-      HObjectAccess::ForJSArrayBufferBackingStore());
+      buffer, nullptr, HObjectAccess::ForJSArrayBufferBackingStore());
 
   HValue* typed_array_start;
   if (is_zero_byte_offset) {
@@ -9848,9 +9900,7 @@ void HOptimizedGraphBuilder::GenerateArrayBufferGetByteLength(
   CHECK_ALIVE(VisitForValue(expr->arguments()->at(0)));
   HValue* buffer = Pop();
   HInstruction* result = New<HLoadNamedField>(
-    buffer,
-    static_cast<HValue*>(NULL),
-    HObjectAccess::ForJSArrayBufferByteLength());
+      buffer, nullptr, HObjectAccess::ForJSArrayBufferByteLength());
   return ast_context()->ReturnInstruction(result, expr->id());
 }
 
@@ -9861,9 +9911,7 @@ void HOptimizedGraphBuilder::GenerateArrayBufferViewGetByteLength(
   CHECK_ALIVE(VisitForValue(expr->arguments()->at(0)));
   HValue* buffer = Pop();
   HInstruction* result = New<HLoadNamedField>(
-    buffer,
-    static_cast<HValue*>(NULL),
-    HObjectAccess::ForJSArrayBufferViewByteLength());
+      buffer, nullptr, HObjectAccess::ForJSArrayBufferViewByteLength());
   return ast_context()->ReturnInstruction(result, expr->id());
 }
 
@@ -9874,9 +9922,7 @@ void HOptimizedGraphBuilder::GenerateArrayBufferViewGetByteOffset(
   CHECK_ALIVE(VisitForValue(expr->arguments()->at(0)));
   HValue* buffer = Pop();
   HInstruction* result = New<HLoadNamedField>(
-    buffer,
-    static_cast<HValue*>(NULL),
-    HObjectAccess::ForJSArrayBufferViewByteOffset());
+      buffer, nullptr, HObjectAccess::ForJSArrayBufferViewByteOffset());
   return ast_context()->ReturnInstruction(result, expr->id());
 }
 
@@ -9887,9 +9933,7 @@ void HOptimizedGraphBuilder::GenerateTypedArrayGetLength(
   CHECK_ALIVE(VisitForValue(expr->arguments()->at(0)));
   HValue* buffer = Pop();
   HInstruction* result = New<HLoadNamedField>(
-    buffer,
-    static_cast<HValue*>(NULL),
-    HObjectAccess::ForJSTypedArrayLength());
+      buffer, nullptr, HObjectAccess::ForJSTypedArrayLength());
   return ast_context()->ReturnInstruction(result, expr->id());
 }
 
@@ -10122,6 +10166,9 @@ void HOptimizedGraphBuilder::VisitCountOperation(CountOperation* expr) {
     if (var->mode() == CONST_LEGACY)  {
       return Bailout(kUnsupportedCountOperationWithConst);
     }
+    if (var->mode() == CONST) {
+      return Bailout(kNonInitializerAssignmentToConst);
+    }
     // Argument of the count operation is a variable, not a property.
     DCHECK(prop == NULL);
     CHECK_ALIVE(VisitForValue(target));
@@ -10271,7 +10318,7 @@ bool HGraphBuilder::MatchRotateRight(HValue* left,
       !ShiftAmountsAllowReplaceByRotate(shr->right(), shl->right())) {
     return false;
   }
-  *operand= shr->left();
+  *operand = shr->left();
   *shift_amount = shr->right();
   return true;
 }
@@ -10467,8 +10514,7 @@ HValue* HGraphBuilder::BuildBinaryOperation(
     if (!allocation_mode.feedback_site().is_null()) {
       DCHECK(!graph()->info()->IsStub());
       Handle<AllocationSite> site(allocation_mode.feedback_site());
-      AllocationSite::AddDependentCompilationInfo(
-          site, AllocationSite::TENURING, top_info());
+      AllocationSite::RegisterForDeoptOnTenureChange(site, top_info());
     }
 
     // Inline the string addition into the stub when creating allocation
@@ -11063,13 +11109,14 @@ HInstruction* HOptimizedGraphBuilder::BuildFastLiteral(
       boilerplate_object->map()->instance_size());
 
   PretenureFlag pretenure_flag = NOT_TENURED;
+  Handle<AllocationSite> site(site_context->current());
   if (FLAG_allocation_site_pretenuring) {
     pretenure_flag = site_context->current()->GetPretenureMode();
-    Handle<AllocationSite> site(site_context->current());
-    AllocationSite::AddDependentCompilationInfo(
-        site, AllocationSite::TENURING, top_info());
+    AllocationSite::RegisterForDeoptOnTenureChange(site, top_info());
   }
 
+  AllocationSite::RegisterForDeoptOnTransitionChange(site, top_info());
+
   HInstruction* object = Add<HAllocate>(object_size_constant, type,
       pretenure_flag, instance_type, site_context->current());
 
@@ -11186,18 +11233,27 @@ void HOptimizedGraphBuilder::BuildEmitInObjectProperties(
     PropertyDetails details = descriptors->GetDetails(i);
     if (details.type() != FIELD) continue;
     copied_fields++;
-    int index = descriptors->GetFieldIndex(i);
-    int property_offset = boilerplate_object->GetInObjectPropertyOffset(index);
+    FieldIndex field_index = FieldIndex::ForDescriptor(*boilerplate_map, i);
+
+
+    int property_offset = field_index.offset();
     Handle<Name> name(descriptors->GetKey(i));
-    Handle<Object> value =
-        Handle<Object>(boilerplate_object->InObjectPropertyAt(index),
-        isolate());
 
     // The access for the store depends on the type of the boilerplate.
     HObjectAccess access = boilerplate_object->IsJSArray() ?
         HObjectAccess::ForJSArrayOffset(property_offset) :
         HObjectAccess::ForMapAndOffset(boilerplate_map, property_offset);
 
+    if (boilerplate_object->IsUnboxedDoubleField(field_index)) {
+      CHECK(!boilerplate_object->IsJSArray());
+      double value = boilerplate_object->RawFastDoublePropertyAt(field_index);
+      access = access.WithRepresentation(Representation::Double());
+      Add<HStoreNamedField>(object, access, Add<HConstant>(value));
+      continue;
+    }
+    Handle<Object> value(boilerplate_object->RawFastPropertyAt(field_index),
+                         isolate());
+
     if (value->IsJSObject()) {
       Handle<JSObject> value_object = Handle<JSObject>::cast(value);
       Handle<AllocationSite> current_site = site_context->EnterNewScope();
@@ -11284,10 +11340,8 @@ void HOptimizedGraphBuilder::BuildEmitFixedDoubleArray(
   int elements_length = elements->length();
   for (int i = 0; i < elements_length; i++) {
     HValue* key_constant = Add<HConstant>(i);
-    HInstruction* value_instruction =
-        Add<HLoadKeyed>(boilerplate_elements, key_constant,
-                        static_cast<HValue*>(NULL), kind,
-                        ALLOW_RETURN_HOLE);
+    HInstruction* value_instruction = Add<HLoadKeyed>(
+        boilerplate_elements, key_constant, nullptr, kind, ALLOW_RETURN_HOLE);
     HInstruction* store = Add<HStoreKeyed>(object_elements, key_constant,
                                            value_instruction, kind);
     store->SetFlag(HValue::kAllowUndefinedAsNaN);
@@ -11314,11 +11368,13 @@ void HOptimizedGraphBuilder::BuildEmitFixedArray(
       site_context->ExitScope(current_site, value_object);
       Add<HStoreKeyed>(object_elements, key_constant, result, kind);
     } else {
+      ElementsKind copy_kind =
+          kind == FAST_HOLEY_SMI_ELEMENTS ? FAST_HOLEY_ELEMENTS : kind;
       HInstruction* value_instruction =
-          Add<HLoadKeyed>(boilerplate_elements, key_constant,
-                          static_cast<HValue*>(NULL), kind,
-                          ALLOW_RETURN_HOLE);
-      Add<HStoreKeyed>(object_elements, key_constant, value_instruction, kind);
+          Add<HLoadKeyed>(boilerplate_elements, key_constant, nullptr,
+                          copy_kind, ALLOW_RETURN_HOLE);
+      Add<HStoreKeyed>(object_elements, key_constant, value_instruction,
+                       copy_kind);
     }
   }
 }
@@ -11567,8 +11623,8 @@ void HOptimizedGraphBuilder::GenerateIsJSProxy(CallRuntime* call) {
   HValue* smicheck = if_proxy.IfNot<HIsSmiAndBranch>(value);
   if_proxy.And();
   HValue* map = Add<HLoadNamedField>(value, smicheck, HObjectAccess::ForMap());
-  HValue* instance_type = Add<HLoadNamedField>(
-      map, static_cast<HValue*>(NULL), HObjectAccess::ForMapInstanceType());
+  HValue* instance_type =
+      Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapInstanceType());
   if_proxy.If<HCompareNumericAndBranch>(
       instance_type, Add<HConstant>(FIRST_JS_PROXY_TYPE), Token::GTE);
   if_proxy.And();
@@ -11580,6 +11636,35 @@ void HOptimizedGraphBuilder::GenerateIsJSProxy(CallRuntime* call) {
 }
 
 
+void HOptimizedGraphBuilder::GenerateHasFastPackedElements(CallRuntime* call) {
+  DCHECK(call->arguments()->length() == 1);
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  HValue* object = Pop();
+  HIfContinuation continuation(graph()->CreateBasicBlock(),
+                               graph()->CreateBasicBlock());
+  IfBuilder if_not_smi(this);
+  if_not_smi.IfNot<HIsSmiAndBranch>(object);
+  if_not_smi.Then();
+  {
+    NoObservableSideEffectsScope no_effects(this);
+
+    IfBuilder if_fast_packed(this);
+    HValue* elements_kind = BuildGetElementsKind(object);
+    if_fast_packed.If<HCompareNumericAndBranch>(
+        elements_kind, Add<HConstant>(FAST_SMI_ELEMENTS), Token::EQ);
+    if_fast_packed.Or();
+    if_fast_packed.If<HCompareNumericAndBranch>(
+        elements_kind, Add<HConstant>(FAST_ELEMENTS), Token::EQ);
+    if_fast_packed.Or();
+    if_fast_packed.If<HCompareNumericAndBranch>(
+        elements_kind, Add<HConstant>(FAST_DOUBLE_ELEMENTS), Token::EQ);
+    if_fast_packed.JoinContinuation(&continuation);
+  }
+  if_not_smi.JoinContinuation(&continuation);
+  return ast_context()->ReturnContinuation(&continuation, call->id());
+}
+
+
 void HOptimizedGraphBuilder::GenerateIsNonNegativeSmi(CallRuntime* call) {
   return Bailout(kInlinedRuntimeFunctionIsNonNegativeSmi);
 }
@@ -12000,6 +12085,650 @@ void HOptimizedGraphBuilder::GenerateMathSqrtRT(CallRuntime* call) {
 }
 
 
+HValue* HOptimizedGraphBuilder::BuildOrderedHashTableHashToBucket(
+    HValue* hash, HValue* num_buckets) {
+  HValue* mask = AddUncasted<HSub>(num_buckets, graph()->GetConstant1());
+  mask->ChangeRepresentation(Representation::Integer32());
+  mask->ClearFlag(HValue::kCanOverflow);
+  return AddUncasted<HBitwise>(Token::BIT_AND, hash, mask);
+}
+
+
+template <typename CollectionType>
+HValue* HOptimizedGraphBuilder::BuildOrderedHashTableHashToEntry(
+    HValue* table, HValue* hash, HValue* num_buckets) {
+  HValue* bucket = BuildOrderedHashTableHashToBucket(hash, num_buckets);
+  HValue* entry_index = AddUncasted<HAdd>(
+      bucket, Add<HConstant>(CollectionType::kHashTableStartIndex));
+  entry_index->ClearFlag(HValue::kCanOverflow);
+  HValue* entry = Add<HLoadKeyed>(table, entry_index, nullptr, FAST_ELEMENTS);
+  entry->set_type(HType::Smi());
+  return entry;
+}
+
+
+template <typename CollectionType>
+HValue* HOptimizedGraphBuilder::BuildOrderedHashTableEntryToIndex(
+    HValue* entry, HValue* num_buckets) {
+  HValue* index =
+      AddUncasted<HMul>(entry, Add<HConstant>(CollectionType::kEntrySize));
+  index->ClearFlag(HValue::kCanOverflow);
+  index = AddUncasted<HAdd>(index, num_buckets);
+  index->ClearFlag(HValue::kCanOverflow);
+  index = AddUncasted<HAdd>(
+      index, Add<HConstant>(CollectionType::kHashTableStartIndex));
+  index->ClearFlag(HValue::kCanOverflow);
+  return index;
+}
+
+
+template <typename CollectionType>
+HValue* HOptimizedGraphBuilder::BuildOrderedHashTableFindEntry(HValue* table,
+                                                               HValue* key,
+                                                               HValue* hash) {
+  HValue* num_buckets = Add<HLoadNamedField>(
+      table, nullptr,
+      HObjectAccess::ForOrderedHashTableNumberOfBuckets<CollectionType>());
+
+  HValue* entry = BuildOrderedHashTableHashToEntry<CollectionType>(table, hash,
+                                                                   num_buckets);
+
+  Push(entry);
+
+  LoopBuilder loop(this);
+  loop.BeginBody(1);
+
+  entry = Pop();
+
+  {
+    IfBuilder if_not_found(this);
+    if_not_found.If<HCompareNumericAndBranch>(
+        entry, Add<HConstant>(CollectionType::kNotFound), Token::EQ);
+    if_not_found.Then();
+    Push(entry);
+    loop.Break();
+  }
+
+  HValue* key_index =
+      BuildOrderedHashTableEntryToIndex<CollectionType>(entry, num_buckets);
+  HValue* candidate_key =
+      Add<HLoadKeyed>(table, key_index, nullptr, FAST_ELEMENTS);
+
+  {
+    IfBuilder if_keys_equal(this);
+    if_keys_equal.If<HIsStringAndBranch>(candidate_key);
+    if_keys_equal.AndIf<HStringCompareAndBranch>(candidate_key, key,
+                                                 Token::EQ_STRICT);
+    if_keys_equal.Then();
+    Push(key_index);
+    loop.Break();
+  }
+
+  // BuildChainAt
+  HValue* chain_index = AddUncasted<HAdd>(
+      key_index, Add<HConstant>(CollectionType::kChainOffset));
+  chain_index->ClearFlag(HValue::kCanOverflow);
+  entry = Add<HLoadKeyed>(table, chain_index, nullptr, FAST_ELEMENTS);
+  entry->set_type(HType::Smi());
+  Push(entry);
+
+  loop.EndBody();
+
+  return Pop();
+}
+
+
+void HOptimizedGraphBuilder::GenerateMapGet(CallRuntime* call) {
+  DCHECK(call->arguments()->length() == 2);
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
+  HValue* key = Pop();
+  HValue* receiver = Pop();
+
+  NoObservableSideEffectsScope no_effects(this);
+
+  HIfContinuation continuation;
+  HValue* hash =
+      BuildStringHashLoadIfIsStringAndHashComputed(key, &continuation);
+  {
+    IfBuilder string_checker(this, &continuation);
+    string_checker.Then();
+    {
+      HValue* table = Add<HLoadNamedField>(
+          receiver, nullptr, HObjectAccess::ForJSCollectionTable());
+      HValue* key_index =
+          BuildOrderedHashTableFindEntry<OrderedHashMap>(table, key, hash);
+      IfBuilder if_found(this);
+      if_found.If<HCompareNumericAndBranch>(
+          key_index, Add<HConstant>(OrderedHashMap::kNotFound), Token::NE);
+      if_found.Then();
+      {
+        HValue* value_index = AddUncasted<HAdd>(
+            key_index, Add<HConstant>(OrderedHashMap::kValueOffset));
+        value_index->ClearFlag(HValue::kCanOverflow);
+        Push(Add<HLoadKeyed>(table, value_index, nullptr, FAST_ELEMENTS));
+      }
+      if_found.Else();
+      Push(graph()->GetConstantUndefined());
+      if_found.End();
+    }
+    string_checker.Else();
+    {
+      Add<HPushArguments>(receiver, key);
+      Push(Add<HCallRuntime>(call->name(),
+                             Runtime::FunctionForId(Runtime::kMapGet), 2));
+    }
+  }
+
+  return ast_context()->ReturnValue(Pop());
+}
+
+
+HValue* HOptimizedGraphBuilder::BuildStringHashLoadIfIsStringAndHashComputed(
+    HValue* object, HIfContinuation* continuation) {
+  IfBuilder string_checker(this);
+  string_checker.If<HIsStringAndBranch>(object);
+  string_checker.And();
+  HValue* hash = Add<HLoadNamedField>(object, nullptr,
+                                      HObjectAccess::ForStringHashField());
+  HValue* hash_not_computed_mask = Add<HConstant>(String::kHashNotComputedMask);
+  HValue* hash_computed_test =
+      AddUncasted<HBitwise>(Token::BIT_AND, hash, hash_not_computed_mask);
+  string_checker.If<HCompareNumericAndBranch>(
+      hash_computed_test, graph()->GetConstant0(), Token::EQ);
+  string_checker.Then();
+  HValue* shifted_hash =
+      AddUncasted<HShr>(hash, Add<HConstant>(String::kHashShift));
+  string_checker.CaptureContinuation(continuation);
+  return shifted_hash;
+}
+
+
+template <typename CollectionType>
+void HOptimizedGraphBuilder::BuildJSCollectionHas(
+    CallRuntime* call, const Runtime::Function* c_function) {
+  DCHECK(call->arguments()->length() == 2);
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
+  HValue* key = Pop();
+  HValue* receiver = Pop();
+
+  NoObservableSideEffectsScope no_effects(this);
+
+  HIfContinuation continuation;
+  HValue* hash =
+      BuildStringHashLoadIfIsStringAndHashComputed(key, &continuation);
+  {
+    IfBuilder string_checker(this, &continuation);
+    string_checker.Then();
+    {
+      HValue* table = Add<HLoadNamedField>(
+          receiver, nullptr, HObjectAccess::ForJSCollectionTable());
+      HValue* key_index =
+          BuildOrderedHashTableFindEntry<CollectionType>(table, key, hash);
+      {
+        IfBuilder if_found(this);
+        if_found.If<HCompareNumericAndBranch>(
+            key_index, Add<HConstant>(CollectionType::kNotFound), Token::NE);
+        if_found.Then();
+        Push(graph()->GetConstantTrue());
+        if_found.Else();
+        Push(graph()->GetConstantFalse());
+      }
+    }
+    string_checker.Else();
+    {
+      Add<HPushArguments>(receiver, key);
+      Push(Add<HCallRuntime>(call->name(), c_function, 2));
+    }
+  }
+
+  return ast_context()->ReturnValue(Pop());
+}
+
+
+void HOptimizedGraphBuilder::GenerateMapHas(CallRuntime* call) {
+  BuildJSCollectionHas<OrderedHashMap>(
+      call, Runtime::FunctionForId(Runtime::kMapHas));
+}
+
+
+void HOptimizedGraphBuilder::GenerateSetHas(CallRuntime* call) {
+  BuildJSCollectionHas<OrderedHashSet>(
+      call, Runtime::FunctionForId(Runtime::kSetHas));
+}
+
+
+template <typename CollectionType>
+HValue* HOptimizedGraphBuilder::BuildOrderedHashTableAddEntry(
+    HValue* table, HValue* key, HValue* hash,
+    HIfContinuation* join_continuation) {
+  HValue* num_buckets = Add<HLoadNamedField>(
+      table, nullptr,
+      HObjectAccess::ForOrderedHashTableNumberOfBuckets<CollectionType>());
+  HValue* capacity = AddUncasted<HMul>(
+      num_buckets, Add<HConstant>(CollectionType::kLoadFactor));
+  capacity->ClearFlag(HValue::kCanOverflow);
+  HValue* num_elements = Add<HLoadNamedField>(
+      table, nullptr,
+      HObjectAccess::ForOrderedHashTableNumberOfElements<CollectionType>());
+  HValue* num_deleted = Add<HLoadNamedField>(
+      table, nullptr, HObjectAccess::ForOrderedHashTableNumberOfDeletedElements<
+                          CollectionType>());
+  HValue* used = AddUncasted<HAdd>(num_elements, num_deleted);
+  used->ClearFlag(HValue::kCanOverflow);
+  IfBuilder if_space_available(this);
+  if_space_available.If<HCompareNumericAndBranch>(capacity, used, Token::GT);
+  if_space_available.Then();
+  HValue* bucket = BuildOrderedHashTableHashToBucket(hash, num_buckets);
+  HValue* entry = used;
+  HValue* key_index =
+      BuildOrderedHashTableEntryToIndex<CollectionType>(entry, num_buckets);
+
+  HValue* bucket_index = AddUncasted<HAdd>(
+      bucket, Add<HConstant>(CollectionType::kHashTableStartIndex));
+  bucket_index->ClearFlag(HValue::kCanOverflow);
+  HValue* chain_entry =
+      Add<HLoadKeyed>(table, bucket_index, nullptr, FAST_ELEMENTS);
+  chain_entry->set_type(HType::Smi());
+
+  HValue* chain_index = AddUncasted<HAdd>(
+      key_index, Add<HConstant>(CollectionType::kChainOffset));
+  chain_index->ClearFlag(HValue::kCanOverflow);
+
+  Add<HStoreKeyed>(table, bucket_index, entry, FAST_ELEMENTS);
+  Add<HStoreKeyed>(table, chain_index, chain_entry, FAST_ELEMENTS);
+  Add<HStoreKeyed>(table, key_index, key, FAST_ELEMENTS);
+
+  HValue* new_num_elements =
+      AddUncasted<HAdd>(num_elements, graph()->GetConstant1());
+  new_num_elements->ClearFlag(HValue::kCanOverflow);
+  Add<HStoreNamedField>(
+      table,
+      HObjectAccess::ForOrderedHashTableNumberOfElements<CollectionType>(),
+      new_num_elements);
+  if_space_available.JoinContinuation(join_continuation);
+  return key_index;
+}
+
+
+void HOptimizedGraphBuilder::GenerateMapSet(CallRuntime* call) {
+  DCHECK(call->arguments()->length() == 3);
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(2)));
+  HValue* value = Pop();
+  HValue* key = Pop();
+  HValue* receiver = Pop();
+
+  NoObservableSideEffectsScope no_effects(this);
+
+  HIfContinuation return_or_call_runtime_continuation(
+      graph()->CreateBasicBlock(), graph()->CreateBasicBlock());
+  HIfContinuation got_string_hash;
+  HValue* hash =
+      BuildStringHashLoadIfIsStringAndHashComputed(key, &got_string_hash);
+  IfBuilder string_checker(this, &got_string_hash);
+  string_checker.Then();
+  {
+    HValue* table = Add<HLoadNamedField>(receiver, nullptr,
+                                         HObjectAccess::ForJSCollectionTable());
+    HValue* key_index =
+        BuildOrderedHashTableFindEntry<OrderedHashMap>(table, key, hash);
+    {
+      IfBuilder if_found(this);
+      if_found.If<HCompareNumericAndBranch>(
+          key_index, Add<HConstant>(OrderedHashMap::kNotFound), Token::NE);
+      if_found.Then();
+      {
+        HValue* value_index = AddUncasted<HAdd>(
+            key_index, Add<HConstant>(OrderedHashMap::kValueOffset));
+        value_index->ClearFlag(HValue::kCanOverflow);
+        Add<HStoreKeyed>(table, value_index, value, FAST_ELEMENTS);
+      }
+      if_found.Else();
+      {
+        HIfContinuation did_add(graph()->CreateBasicBlock(),
+                                graph()->CreateBasicBlock());
+        HValue* key_index = BuildOrderedHashTableAddEntry<OrderedHashMap>(
+            table, key, hash, &did_add);
+        IfBuilder if_did_add(this, &did_add);
+        if_did_add.Then();
+        {
+          HValue* value_index = AddUncasted<HAdd>(
+              key_index, Add<HConstant>(OrderedHashMap::kValueOffset));
+          value_index->ClearFlag(HValue::kCanOverflow);
+          Add<HStoreKeyed>(table, value_index, value, FAST_ELEMENTS);
+        }
+        if_did_add.JoinContinuation(&return_or_call_runtime_continuation);
+      }
+    }
+  }
+  string_checker.JoinContinuation(&return_or_call_runtime_continuation);
+
+  {
+    IfBuilder return_or_call_runtime(this,
+                                     &return_or_call_runtime_continuation);
+    return_or_call_runtime.Then();
+    Push(receiver);
+    return_or_call_runtime.Else();
+    Add<HPushArguments>(receiver, key, value);
+    Push(Add<HCallRuntime>(call->name(),
+                           Runtime::FunctionForId(Runtime::kMapSet), 3));
+  }
+
+  return ast_context()->ReturnValue(Pop());
+}
+
+
+void HOptimizedGraphBuilder::GenerateSetAdd(CallRuntime* call) {
+  DCHECK(call->arguments()->length() == 2);
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
+  HValue* key = Pop();
+  HValue* receiver = Pop();
+
+  NoObservableSideEffectsScope no_effects(this);
+
+  HIfContinuation return_or_call_runtime_continuation(
+      graph()->CreateBasicBlock(), graph()->CreateBasicBlock());
+  HIfContinuation got_string_hash;
+  HValue* hash =
+      BuildStringHashLoadIfIsStringAndHashComputed(key, &got_string_hash);
+  IfBuilder string_checker(this, &got_string_hash);
+  string_checker.Then();
+  {
+    HValue* table = Add<HLoadNamedField>(receiver, nullptr,
+                                         HObjectAccess::ForJSCollectionTable());
+    HValue* key_index =
+        BuildOrderedHashTableFindEntry<OrderedHashSet>(table, key, hash);
+    {
+      IfBuilder if_not_found(this);
+      if_not_found.If<HCompareNumericAndBranch>(
+          key_index, Add<HConstant>(OrderedHashSet::kNotFound), Token::EQ);
+      if_not_found.Then();
+      BuildOrderedHashTableAddEntry<OrderedHashSet>(
+          table, key, hash, &return_or_call_runtime_continuation);
+    }
+  }
+  string_checker.JoinContinuation(&return_or_call_runtime_continuation);
+
+  {
+    IfBuilder return_or_call_runtime(this,
+                                     &return_or_call_runtime_continuation);
+    return_or_call_runtime.Then();
+    Push(receiver);
+    return_or_call_runtime.Else();
+    Add<HPushArguments>(receiver, key);
+    Push(Add<HCallRuntime>(call->name(),
+                           Runtime::FunctionForId(Runtime::kSetAdd), 2));
+  }
+
+  return ast_context()->ReturnValue(Pop());
+}
+
+
+template <typename CollectionType>
+void HOptimizedGraphBuilder::BuildJSCollectionDelete(
+    CallRuntime* call, const Runtime::Function* c_function) {
+  DCHECK(call->arguments()->length() == 2);
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(1)));
+  HValue* key = Pop();
+  HValue* receiver = Pop();
+
+  NoObservableSideEffectsScope no_effects(this);
+
+  HIfContinuation return_or_call_runtime_continuation(
+      graph()->CreateBasicBlock(), graph()->CreateBasicBlock());
+  HIfContinuation got_string_hash;
+  HValue* hash =
+      BuildStringHashLoadIfIsStringAndHashComputed(key, &got_string_hash);
+  IfBuilder string_checker(this, &got_string_hash);
+  string_checker.Then();
+  {
+    HValue* table = Add<HLoadNamedField>(receiver, nullptr,
+                                         HObjectAccess::ForJSCollectionTable());
+    HValue* key_index =
+        BuildOrderedHashTableFindEntry<CollectionType>(table, key, hash);
+    {
+      IfBuilder if_found(this);
+      if_found.If<HCompareNumericAndBranch>(
+          key_index, Add<HConstant>(CollectionType::kNotFound), Token::NE);
+      if_found.Then();
+      {
+        // If we're removing an element, we might need to shrink.
+        // If we do need to shrink, we'll be bailing out to the runtime.
+        HValue* num_elements = Add<HLoadNamedField>(
+            table, nullptr, HObjectAccess::ForOrderedHashTableNumberOfElements<
+                                CollectionType>());
+        num_elements = AddUncasted<HSub>(num_elements, graph()->GetConstant1());
+        num_elements->ClearFlag(HValue::kCanOverflow);
+
+        HValue* num_buckets = Add<HLoadNamedField>(
+            table, nullptr, HObjectAccess::ForOrderedHashTableNumberOfBuckets<
+                                CollectionType>());
+        // threshold is capacity >> 2; we simplify this to num_buckets >> 1
+        // since kLoadFactor is 2.
+        STATIC_ASSERT(CollectionType::kLoadFactor == 2);
+        HValue* threshold =
+            AddUncasted<HShr>(num_buckets, graph()->GetConstant1());
+
+        IfBuilder if_need_not_shrink(this);
+        if_need_not_shrink.If<HCompareNumericAndBranch>(num_elements, threshold,
+                                                        Token::GTE);
+        if_need_not_shrink.Then();
+        {
+          Add<HStoreKeyed>(table, key_index, graph()->GetConstantHole(),
+                           FAST_ELEMENTS);
+
+          // For maps, also need to clear the value.
+          if (CollectionType::kChainOffset > 1) {
+            HValue* value_index =
+                AddUncasted<HAdd>(key_index, graph()->GetConstant1());
+            value_index->ClearFlag(HValue::kCanOverflow);
+            Add<HStoreKeyed>(table, value_index, graph()->GetConstantHole(),
+                             FAST_ELEMENTS);
+          }
+          STATIC_ASSERT(CollectionType::kChainOffset <= 2);
+
+          HValue* num_deleted = Add<HLoadNamedField>(
+              table, nullptr,
+              HObjectAccess::ForOrderedHashTableNumberOfDeletedElements<
+                  CollectionType>());
+          num_deleted = AddUncasted<HAdd>(num_deleted, graph()->GetConstant1());
+          num_deleted->ClearFlag(HValue::kCanOverflow);
+          Add<HStoreNamedField>(
+              table, HObjectAccess::ForOrderedHashTableNumberOfElements<
+                         CollectionType>(),
+              num_elements);
+          Add<HStoreNamedField>(
+              table, HObjectAccess::ForOrderedHashTableNumberOfDeletedElements<
+                         CollectionType>(),
+              num_deleted);
+          Push(graph()->GetConstantTrue());
+        }
+        if_need_not_shrink.JoinContinuation(
+            &return_or_call_runtime_continuation);
+      }
+      if_found.Else();
+      {
+        // Not found, so we're done.
+        Push(graph()->GetConstantFalse());
+      }
+    }
+  }
+  string_checker.JoinContinuation(&return_or_call_runtime_continuation);
+
+  {
+    IfBuilder return_or_call_runtime(this,
+                                     &return_or_call_runtime_continuation);
+    return_or_call_runtime.Then();
+    return_or_call_runtime.Else();
+    Add<HPushArguments>(receiver, key);
+    Push(Add<HCallRuntime>(call->name(), c_function, 2));
+  }
+
+  return ast_context()->ReturnValue(Pop());
+}
+
+
+void HOptimizedGraphBuilder::GenerateMapDelete(CallRuntime* call) {
+  BuildJSCollectionDelete<OrderedHashMap>(
+      call, Runtime::FunctionForId(Runtime::kMapDelete));
+}
+
+
+void HOptimizedGraphBuilder::GenerateSetDelete(CallRuntime* call) {
+  BuildJSCollectionDelete<OrderedHashSet>(
+      call, Runtime::FunctionForId(Runtime::kSetDelete));
+}
+
+
+void HOptimizedGraphBuilder::GenerateSetGetSize(CallRuntime* call) {
+  DCHECK(call->arguments()->length() == 1);
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  HValue* receiver = Pop();
+  HValue* table = Add<HLoadNamedField>(receiver, nullptr,
+                                       HObjectAccess::ForJSCollectionTable());
+  HInstruction* result = New<HLoadNamedField>(
+      table, nullptr,
+      HObjectAccess::ForOrderedHashTableNumberOfElements<OrderedHashSet>());
+  return ast_context()->ReturnInstruction(result, call->id());
+}
+
+
+void HOptimizedGraphBuilder::GenerateMapGetSize(CallRuntime* call) {
+  DCHECK(call->arguments()->length() == 1);
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  HValue* receiver = Pop();
+  HValue* table = Add<HLoadNamedField>(receiver, nullptr,
+                                       HObjectAccess::ForJSCollectionTable());
+  HInstruction* result = New<HLoadNamedField>(
+      table, nullptr,
+      HObjectAccess::ForOrderedHashTableNumberOfElements<OrderedHashMap>());
+  return ast_context()->ReturnInstruction(result, call->id());
+}
+
+
+template <typename CollectionType>
+HValue* HOptimizedGraphBuilder::BuildAllocateOrderedHashTable() {
+  static const int kCapacity = CollectionType::kMinCapacity;
+  static const int kBucketCount = kCapacity / CollectionType::kLoadFactor;
+  static const int kFixedArrayLength = CollectionType::kHashTableStartIndex +
+                                       kBucketCount +
+                                       (kCapacity * CollectionType::kEntrySize);
+  static const int kSizeInBytes =
+      FixedArray::kHeaderSize + (kFixedArrayLength * kPointerSize);
+
+  // Allocate the table and add the proper map.
+  HValue* table =
+      Add<HAllocate>(Add<HConstant>(kSizeInBytes), HType::HeapObject(),
+                     NOT_TENURED, FIXED_ARRAY_TYPE);
+  AddStoreMapConstant(table, isolate()->factory()->ordered_hash_table_map());
+
+  // Initialize the FixedArray...
+  HValue* length = Add<HConstant>(kFixedArrayLength);
+  Add<HStoreNamedField>(table, HObjectAccess::ForFixedArrayLength(), length);
+
+  // ...and the OrderedHashTable fields.
+  Add<HStoreNamedField>(
+      table,
+      HObjectAccess::ForOrderedHashTableNumberOfBuckets<CollectionType>(),
+      Add<HConstant>(kBucketCount));
+  Add<HStoreNamedField>(
+      table,
+      HObjectAccess::ForOrderedHashTableNumberOfElements<CollectionType>(),
+      graph()->GetConstant0());
+  Add<HStoreNamedField>(
+      table, HObjectAccess::ForOrderedHashTableNumberOfDeletedElements<
+                 CollectionType>(),
+      graph()->GetConstant0());
+
+  // Fill the buckets with kNotFound.
+  HValue* not_found = Add<HConstant>(CollectionType::kNotFound);
+  for (int i = 0; i < kBucketCount; ++i) {
+    Add<HStoreNamedField>(
+        table, HObjectAccess::ForOrderedHashTableBucket<CollectionType>(i),
+        not_found);
+  }
+
+  // Fill the data table with undefined.
+  HValue* undefined = graph()->GetConstantUndefined();
+  for (int i = 0; i < (kCapacity * CollectionType::kEntrySize); ++i) {
+    Add<HStoreNamedField>(table,
+                          HObjectAccess::ForOrderedHashTableDataTableIndex<
+                              CollectionType, kBucketCount>(i),
+                          undefined);
+  }
+
+  return table;
+}
+
+
+void HOptimizedGraphBuilder::GenerateSetInitialize(CallRuntime* call) {
+  DCHECK(call->arguments()->length() == 1);
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  HValue* receiver = Pop();
+
+  NoObservableSideEffectsScope no_effects(this);
+  HValue* table = BuildAllocateOrderedHashTable<OrderedHashSet>();
+  Add<HStoreNamedField>(receiver, HObjectAccess::ForJSCollectionTable(), table);
+  return ast_context()->ReturnValue(receiver);
+}
+
+
+void HOptimizedGraphBuilder::GenerateMapInitialize(CallRuntime* call) {
+  DCHECK(call->arguments()->length() == 1);
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  HValue* receiver = Pop();
+
+  NoObservableSideEffectsScope no_effects(this);
+  HValue* table = BuildAllocateOrderedHashTable<OrderedHashMap>();
+  Add<HStoreNamedField>(receiver, HObjectAccess::ForJSCollectionTable(), table);
+  return ast_context()->ReturnValue(receiver);
+}
+
+
+template <typename CollectionType>
+void HOptimizedGraphBuilder::BuildOrderedHashTableClear(HValue* receiver) {
+  HValue* old_table = Add<HLoadNamedField>(
+      receiver, nullptr, HObjectAccess::ForJSCollectionTable());
+  HValue* new_table = BuildAllocateOrderedHashTable<CollectionType>();
+  Add<HStoreNamedField>(
+      old_table, HObjectAccess::ForOrderedHashTableNextTable<CollectionType>(),
+      new_table);
+  Add<HStoreNamedField>(
+      old_table, HObjectAccess::ForOrderedHashTableNumberOfDeletedElements<
+                     CollectionType>(),
+      Add<HConstant>(CollectionType::kClearedTableSentinel));
+  Add<HStoreNamedField>(receiver, HObjectAccess::ForJSCollectionTable(),
+                        new_table);
+}
+
+
+void HOptimizedGraphBuilder::GenerateSetClear(CallRuntime* call) {
+  DCHECK(call->arguments()->length() == 1);
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  HValue* receiver = Pop();
+
+  NoObservableSideEffectsScope no_effects(this);
+  BuildOrderedHashTableClear<OrderedHashSet>(receiver);
+  return ast_context()->ReturnValue(graph()->GetConstantUndefined());
+}
+
+
+void HOptimizedGraphBuilder::GenerateMapClear(CallRuntime* call) {
+  DCHECK(call->arguments()->length() == 1);
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  HValue* receiver = Pop();
+
+  NoObservableSideEffectsScope no_effects(this);
+  BuildOrderedHashTableClear<OrderedHashMap>(receiver);
+  return ast_context()->ReturnValue(graph()->GetConstantUndefined());
+}
+
+
 void HOptimizedGraphBuilder::GenerateGetCachedArrayIndex(CallRuntime* call) {
   DCHECK(call->arguments()->length() == 1);
   CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
@@ -12036,12 +12765,59 @@ void HOptimizedGraphBuilder::GenerateDebugIsActive(CallRuntime* call) {
   DCHECK(call->arguments()->length() == 0);
   HValue* ref =
       Add<HConstant>(ExternalReference::debug_is_active_address(isolate()));
-  HValue* value = Add<HLoadNamedField>(
-      ref, static_cast<HValue*>(NULL), HObjectAccess::ForExternalUInteger8());
+  HValue* value =
+      Add<HLoadNamedField>(ref, nullptr, HObjectAccess::ForExternalUInteger8());
   return ast_context()->ReturnValue(value);
 }
 
 
+void HOptimizedGraphBuilder::GenerateGetPrototype(CallRuntime* call) {
+  DCHECK(call->arguments()->length() == 1);
+  CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
+  HValue* object = Pop();
+
+  NoObservableSideEffectsScope no_effects(this);
+
+  HValue* map = Add<HLoadNamedField>(object, nullptr, HObjectAccess::ForMap());
+  HValue* bit_field =
+      Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForMapBitField());
+  HValue* is_access_check_needed_mask =
+      Add<HConstant>(1 << Map::kIsAccessCheckNeeded);
+  HValue* is_access_check_needed_test = AddUncasted<HBitwise>(
+      Token::BIT_AND, bit_field, is_access_check_needed_mask);
+
+  HValue* proto =
+      Add<HLoadNamedField>(map, nullptr, HObjectAccess::ForPrototype());
+  HValue* proto_map =
+      Add<HLoadNamedField>(proto, nullptr, HObjectAccess::ForMap());
+  HValue* proto_bit_field =
+      Add<HLoadNamedField>(proto_map, nullptr, HObjectAccess::ForMapBitField());
+  HValue* is_hidden_prototype_mask =
+      Add<HConstant>(1 << Map::kIsHiddenPrototype);
+  HValue* is_hidden_prototype_test = AddUncasted<HBitwise>(
+      Token::BIT_AND, proto_bit_field, is_hidden_prototype_mask);
+
+  {
+    IfBuilder needs_runtime(this);
+    needs_runtime.If<HCompareNumericAndBranch>(
+        is_access_check_needed_test, graph()->GetConstant0(), Token::NE);
+    needs_runtime.OrIf<HCompareNumericAndBranch>(
+        is_hidden_prototype_test, graph()->GetConstant0(), Token::NE);
+
+    needs_runtime.Then();
+    {
+      Add<HPushArguments>(object);
+      Push(Add<HCallRuntime>(
+          call->name(), Runtime::FunctionForId(Runtime::kGetPrototype), 1));
+    }
+
+    needs_runtime.Else();
+    Push(proto);
+  }
+  return ast_context()->ReturnValue(Pop());
+}
+
+
 #undef CHECK_BAILOUT
 #undef CHECK_ALIVE
 
index 0ff5a45..c1ed797 100644 (file)
@@ -748,7 +748,8 @@ class HOptimizedGraphBuilder;
 
 enum ArgumentsAllowedFlag {
   ARGUMENTS_NOT_ALLOWED,
-  ARGUMENTS_ALLOWED
+  ARGUMENTS_ALLOWED,
+  ARGUMENTS_FAKED
 };
 
 
@@ -818,7 +819,7 @@ class EffectContext FINAL : public AstContext {
   }
   virtual ~EffectContext();
 
-  virtual void ReturnValue(HValue* value) OVERRIDE;
+  void ReturnValue(HValue* value) OVERRIDE;
   virtual void ReturnInstruction(HInstruction* instr,
                                  BailoutId ast_id) OVERRIDE;
   virtual void ReturnControl(HControlInstruction* instr,
@@ -835,7 +836,7 @@ class ValueContext FINAL : public AstContext {
   }
   virtual ~ValueContext();
 
-  virtual void ReturnValue(HValue* value) OVERRIDE;
+  void ReturnValue(HValue* value) OVERRIDE;
   virtual void ReturnInstruction(HInstruction* instr,
                                  BailoutId ast_id) OVERRIDE;
   virtual void ReturnControl(HControlInstruction* instr,
@@ -862,7 +863,7 @@ class TestContext FINAL : public AstContext {
         if_false_(if_false) {
   }
 
-  virtual void ReturnValue(HValue* value) OVERRIDE;
+  void ReturnValue(HValue* value) OVERRIDE;
   virtual void ReturnInstruction(HInstruction* instr,
                                  BailoutId ast_id) OVERRIDE;
   virtual void ReturnControl(HControlInstruction* instr,
@@ -1863,10 +1864,10 @@ class HGraphBuilder {
 
   HValue* BuildElementIndexHash(HValue* index);
 
-  void BuildCompareNil(
-      HValue* value,
-      Type* type,
-      HIfContinuation* continuation);
+  enum MapEmbedding { kEmbedMapsDirectly, kEmbedMapsViaWeakCells };
+
+  void BuildCompareNil(HValue* value, Type* type, HIfContinuation* continuation,
+                       MapEmbedding map_embedding = kEmbedMapsDirectly);
 
   void BuildCreateAllocationMemento(HValue* previous_object,
                                     HValue* previous_object_size,
@@ -1878,6 +1879,7 @@ class HGraphBuilder {
 
   HInstruction* BuildGetNativeContext(HValue* closure);
   HInstruction* BuildGetNativeContext();
+  HInstruction* BuildGetScriptContext(int context_index);
   HInstruction* BuildGetArrayFunction();
 
  protected:
@@ -2106,13 +2108,13 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
 
   explicit HOptimizedGraphBuilder(CompilationInfo* info);
 
-  virtual bool BuildGraph() OVERRIDE;
+  bool BuildGraph() OVERRIDE;
 
   // Simple accessors.
   BreakAndContinueScope* break_scope() const { return break_scope_; }
   void set_break_scope(BreakAndContinueScope* head) { break_scope_ = head; }
 
-  HValue* context() { return environment()->context(); }
+  HValue* context() OVERRIDE { return environment()->context(); }
 
   HOsrBuilder* osr() const { return osr_; }
 
@@ -2124,7 +2126,7 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
 
   FunctionState* function_state() const { return function_state_; }
 
-  void VisitDeclarations(ZoneList<Declaration*>* declarations);
+  void VisitDeclarations(ZoneList<Declaration*>* declarations) OVERRIDE;
 
   void* operator new(size_t size, Zone* zone) {
     return zone->New(static_cast<int>(size));
@@ -2256,7 +2258,6 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
 #endif
     }
   }
-
   HValue* LookupAndMakeLive(Variable* var) {
     HEnvironment* env = environment();
     int index = env->IndexFor(var);
@@ -2284,7 +2285,9 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
                        HBasicBlock* false_block);
 
   // Visit a list of expressions from left to right, each in a value context.
-  void VisitExpressions(ZoneList<Expression*>* exprs);
+  void VisitExpressions(ZoneList<Expression*>* exprs) OVERRIDE;
+  void VisitExpressions(ZoneList<Expression*>* exprs,
+                        ArgumentsAllowedFlag flag);
 
   // Remove the arguments from the bailout environment and emit instructions
   // to push them as outgoing parameters.
@@ -2292,7 +2295,7 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
   void PushArgumentsFromEnvironment(int count);
 
   void SetUpScope(Scope* scope);
-  virtual void VisitStatements(ZoneList<Statement*>* statements) OVERRIDE;
+  void VisitStatements(ZoneList<Statement*>* statements) OVERRIDE;
 
 #define DECLARE_VISIT(type) virtual void Visit##type(type* node) OVERRIDE;
   AST_NODE_LIST(DECLARE_VISIT)
@@ -2414,6 +2417,33 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
       ElementsKind fixed_elements_kind,
       HValue* byte_length, HValue* length);
 
+  // TODO(adamk): Move all OrderedHashTable functions to their own class.
+  HValue* BuildOrderedHashTableHashToBucket(HValue* hash, HValue* num_buckets);
+  template <typename CollectionType>
+  HValue* BuildOrderedHashTableHashToEntry(HValue* table, HValue* hash,
+                                           HValue* num_buckets);
+  template <typename CollectionType>
+  HValue* BuildOrderedHashTableEntryToIndex(HValue* entry, HValue* num_buckets);
+  template <typename CollectionType>
+  HValue* BuildOrderedHashTableFindEntry(HValue* table, HValue* key,
+                                         HValue* hash);
+  template <typename CollectionType>
+  HValue* BuildOrderedHashTableAddEntry(HValue* table, HValue* key,
+                                        HValue* hash,
+                                        HIfContinuation* join_continuation);
+  template <typename CollectionType>
+  HValue* BuildAllocateOrderedHashTable();
+  template <typename CollectionType>
+  void BuildOrderedHashTableClear(HValue* receiver);
+  template <typename CollectionType>
+  void BuildJSCollectionDelete(CallRuntime* call,
+                               const Runtime::Function* c_function);
+  template <typename CollectionType>
+  void BuildJSCollectionHas(CallRuntime* call,
+                            const Runtime::Function* c_function);
+  HValue* BuildStringHashLoadIfIsStringAndHashComputed(
+      HValue* object, HIfContinuation* continuation);
+
   Handle<JSFunction> array_function() {
     return handle(isolate()->native_context()->array_function());
   }
@@ -2718,6 +2748,8 @@ class HOptimizedGraphBuilder : public HGraphBuilder, public AstVisitor {
   HInstruction* BuildCallConstantFunction(Handle<JSFunction> target,
                                           int argument_count);
 
+  bool CanBeFunctionApplyArguments(Call* expr);
+
   // The translation state of the currently-being-translated function.
   FunctionState* function_state_;
 
index ba44687..69fa9ca 100644 (file)
@@ -704,6 +704,10 @@ icu::SimpleDateFormat* DateFormat::InitializeDateTimeFormat(
     icu::Locale no_extension_locale(icu_locale.getBaseName());
     date_format = CreateICUDateFormat(isolate, no_extension_locale, options);
 
+    if (!date_format) {
+      FATAL("Failed to create ICU date format, are ICU data files missing?");
+    }
+
     // Set resolved settings (pattern, numbering system, calendar).
     SetResolvedDateSettings(
         isolate, no_extension_locale, date_format, resolved);
@@ -780,6 +784,10 @@ icu::DecimalFormat* NumberFormat::InitializeNumberFormat(
     number_format = CreateICUNumberFormat(
         isolate, no_extension_locale, options);
 
+    if (!number_format) {
+      FATAL("Failed to create ICU number format, are ICU data files missing?");
+    }
+
     // Set resolved settings (pattern, numbering system).
     SetResolvedNumberSettings(
         isolate, no_extension_locale, number_format, resolved);
@@ -839,6 +847,10 @@ icu::Collator* Collator::InitializeCollator(
     icu::Locale no_extension_locale(icu_locale.getBaseName());
     collator = CreateICUCollator(isolate, no_extension_locale, options);
 
+    if (!collator) {
+      FATAL("Failed to create ICU collator, are ICU data files missing?");
+    }
+
     // Set resolved settings (pattern, numbering system).
     SetResolvedCollatorSettings(
         isolate, no_extension_locale, collator, resolved);
@@ -898,6 +910,10 @@ icu::BreakIterator* BreakIterator::InitializeBreakIterator(
     break_iterator = CreateICUBreakIterator(
         isolate, no_extension_locale, options);
 
+    if (!break_iterator) {
+      FATAL("Failed to create ICU break iterator, are ICU data files missing?");
+    }
+
     // Set resolved settings (locale).
     SetResolvedBreakIteratorSettings(
         isolate, no_extension_locale, break_iterator, resolved);
index 50c834f..2805fa0 100644 (file)
@@ -60,11 +60,17 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
 
   if (cpu.has_sse41() && FLAG_enable_sse4_1) supported_ |= 1u << SSE4_1;
   if (cpu.has_sse3() && FLAG_enable_sse3) supported_ |= 1u << SSE3;
+  if (cpu.has_avx() && FLAG_enable_avx) supported_ |= 1u << AVX;
+  if (cpu.has_fma3() && FLAG_enable_fma3) supported_ |= 1u << FMA3;
 }
 
 
 void CpuFeatures::PrintTarget() { }
-void CpuFeatures::PrintFeatures() { }
+void CpuFeatures::PrintFeatures() {
+  printf("SSE3=%d SSE4_1=%d AVX=%d FMA3=%d\n", CpuFeatures::IsSupported(SSE3),
+         CpuFeatures::IsSupported(SSE4_1), CpuFeatures::IsSupported(AVX),
+         CpuFeatures::IsSupported(FMA3));
+}
 
 
 // -----------------------------------------------------------------------------
@@ -457,11 +463,11 @@ void Assembler::mov_b(Register dst, const Operand& src) {
 }
 
 
-void Assembler::mov_b(const Operand& dst, int8_t imm8) {
+void Assembler::mov_b(const Operand& dst, const Immediate& src) {
   EnsureSpace ensure_space(this);
   EMIT(0xC6);
   emit_operand(eax, dst);
-  EMIT(imm8);
+  EMIT(static_cast<int8_t>(src.x_));
 }
 
 
@@ -489,13 +495,13 @@ void Assembler::mov_w(const Operand& dst, Register src) {
 }
 
 
-void Assembler::mov_w(const Operand& dst, int16_t imm16) {
+void Assembler::mov_w(const Operand& dst, const Immediate& src) {
   EnsureSpace ensure_space(this);
   EMIT(0x66);
   EMIT(0xC7);
   emit_operand(eax, dst);
-  EMIT(static_cast<int8_t>(imm16 & 0xff));
-  EMIT(static_cast<int8_t>(imm16 >> 8));
+  EMIT(static_cast<int8_t>(src.x_ & 0xff));
+  EMIT(static_cast<int8_t>(src.x_ >> 8));
 }
 
 
@@ -2437,6 +2443,81 @@ void Assembler::pinsrd(XMMRegister dst, const Operand& src, int8_t offset) {
 }
 
 
+void Assembler::addss(XMMRegister dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  EMIT(0xF3);
+  EMIT(0x0F);
+  EMIT(0x58);
+  emit_sse_operand(dst, src);
+}
+
+
+void Assembler::subss(XMMRegister dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  EMIT(0xF3);
+  EMIT(0x0F);
+  EMIT(0x5C);
+  emit_sse_operand(dst, src);
+}
+
+
+void Assembler::mulss(XMMRegister dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  EMIT(0xF3);
+  EMIT(0x0F);
+  EMIT(0x59);
+  emit_sse_operand(dst, src);
+}
+
+
+void Assembler::divss(XMMRegister dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  EMIT(0xF3);
+  EMIT(0x0F);
+  EMIT(0x5E);
+  emit_sse_operand(dst, src);
+}
+
+
+void Assembler::ucomiss(XMMRegister dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  EMIT(0x0f);
+  EMIT(0x2e);
+  emit_sse_operand(dst, src);
+}
+
+
+// AVX instructions
+void Assembler::vfmasd(byte op, XMMRegister dst, XMMRegister src1,
+                       const Operand& src2) {
+  DCHECK(IsEnabled(FMA3));
+  EnsureSpace ensure_space(this);
+  emit_vex_prefix(src1, kLIG, k66, k0F38, kW1);
+  EMIT(op);
+  emit_sse_operand(dst, src2);
+}
+
+
+void Assembler::vfmass(byte op, XMMRegister dst, XMMRegister src1,
+                       const Operand& src2) {
+  DCHECK(IsEnabled(FMA3));
+  EnsureSpace ensure_space(this);
+  emit_vex_prefix(src1, kLIG, k66, k0F38, kW0);
+  EMIT(op);
+  emit_sse_operand(dst, src2);
+}
+
+
+void Assembler::vsd(byte op, XMMRegister dst, XMMRegister src1,
+                    const Operand& src2) {
+  DCHECK(IsEnabled(AVX));
+  EnsureSpace ensure_space(this);
+  emit_vex_prefix(src1, kLIG, kF2, k0F, kWIG);
+  EMIT(op);
+  emit_sse_operand(dst, src2);
+}
+
+
 void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) {
   Register ireg = { reg.code() };
   emit_operand(ireg, adr);
@@ -2458,6 +2539,19 @@ void Assembler::emit_sse_operand(XMMRegister dst, Register src) {
 }
 
 
+void Assembler::emit_vex_prefix(XMMRegister vreg, VectorLength l, SIMDPrefix pp,
+                                LeadingOpcode mm, VexW w) {
+  if (mm != k0F || w != kW0) {
+    EMIT(0xc4);
+    EMIT(0xc0 | mm);
+    EMIT(w | ((~vreg.code() & 0xf) << 3) | l | pp);
+  } else {
+    EMIT(0xc5);
+    EMIT(((~vreg.code()) << 3) | l | pp);
+  }
+}
+
+
 void Assembler::RecordJSReturn() {
   positions_recorder()->WriteRecordedPositions();
   EnsureSpace ensure_space(this);
index 00ee959..b913f7a 100644 (file)
@@ -619,12 +619,14 @@ class Assembler : public AssemblerBase {
   void mov_b(Register dst, Register src) { mov_b(dst, Operand(src)); }
   void mov_b(Register dst, const Operand& src);
   void mov_b(Register dst, int8_t imm8) { mov_b(Operand(dst), imm8); }
-  void mov_b(const Operand& dst, int8_t imm8);
+  void mov_b(const Operand& dst, int8_t src) { mov_b(dst, Immediate(src)); }
+  void mov_b(const Operand& dst, const Immediate& src);
   void mov_b(const Operand& dst, Register src);
 
   void mov_w(Register dst, const Operand& src);
+  void mov_w(const Operand& dst, int16_t src) { mov_w(dst, Immediate(src)); }
+  void mov_w(const Operand& dst, const Immediate& src);
   void mov_w(const Operand& dst, Register src);
-  void mov_w(const Operand& dst, int16_t imm16);
 
   void mov(Register dst, int32_t imm32);
   void mov(Register dst, const Immediate& x);
@@ -926,6 +928,17 @@ class Assembler : public AssemblerBase {
   void cpuid();
 
   // SSE instructions
+  void addss(XMMRegister dst, XMMRegister src) { addss(dst, Operand(src)); }
+  void addss(XMMRegister dst, const Operand& src);
+  void subss(XMMRegister dst, XMMRegister src) { subss(dst, Operand(src)); }
+  void subss(XMMRegister dst, const Operand& src);
+  void mulss(XMMRegister dst, XMMRegister src) { mulss(dst, Operand(src)); }
+  void mulss(XMMRegister dst, const Operand& src);
+  void divss(XMMRegister dst, XMMRegister src) { divss(dst, Operand(src)); }
+  void divss(XMMRegister dst, const Operand& src);
+
+  void ucomiss(XMMRegister dst, XMMRegister src) { ucomiss(dst, Operand(src)); }
+  void ucomiss(XMMRegister dst, const Operand& src);
   void movaps(XMMRegister dst, XMMRegister src);
   void shufps(XMMRegister dst, XMMRegister src, byte imm8);
 
@@ -1049,6 +1062,182 @@ class Assembler : public AssemblerBase {
   // Parallel XMM operations.
   void movntdqa(XMMRegister dst, const Operand& src);
   void movntdq(const Operand& dst, XMMRegister src);
+
+  // AVX instructions
+  void vfmadd132sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+    vfmadd132sd(dst, src1, Operand(src2));
+  }
+  void vfmadd213sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+    vfmadd213sd(dst, src1, Operand(src2));
+  }
+  void vfmadd231sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+    vfmadd231sd(dst, src1, Operand(src2));
+  }
+  void vfmadd132sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+    vfmasd(0x99, dst, src1, src2);
+  }
+  void vfmadd213sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+    vfmasd(0xa9, dst, src1, src2);
+  }
+  void vfmadd231sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+    vfmasd(0xb9, dst, src1, src2);
+  }
+  void vfmsub132sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+    vfmsub132sd(dst, src1, Operand(src2));
+  }
+  void vfmsub213sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+    vfmsub213sd(dst, src1, Operand(src2));
+  }
+  void vfmsub231sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+    vfmsub231sd(dst, src1, Operand(src2));
+  }
+  void vfmsub132sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+    vfmasd(0x9b, dst, src1, src2);
+  }
+  void vfmsub213sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+    vfmasd(0xab, dst, src1, src2);
+  }
+  void vfmsub231sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+    vfmasd(0xbb, dst, src1, src2);
+  }
+  void vfnmadd132sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+    vfnmadd132sd(dst, src1, Operand(src2));
+  }
+  void vfnmadd213sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+    vfnmadd213sd(dst, src1, Operand(src2));
+  }
+  void vfnmadd231sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+    vfnmadd231sd(dst, src1, Operand(src2));
+  }
+  void vfnmadd132sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+    vfmasd(0x9d, dst, src1, src2);
+  }
+  void vfnmadd213sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+    vfmasd(0xad, dst, src1, src2);
+  }
+  void vfnmadd231sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+    vfmasd(0xbd, dst, src1, src2);
+  }
+  void vfnmsub132sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+    vfnmsub132sd(dst, src1, Operand(src2));
+  }
+  void vfnmsub213sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+    vfnmsub213sd(dst, src1, Operand(src2));
+  }
+  void vfnmsub231sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+    vfnmsub231sd(dst, src1, Operand(src2));
+  }
+  void vfnmsub132sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+    vfmasd(0x9f, dst, src1, src2);
+  }
+  void vfnmsub213sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+    vfmasd(0xaf, dst, src1, src2);
+  }
+  void vfnmsub231sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+    vfmasd(0xbf, dst, src1, src2);
+  }
+  void vfmasd(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
+
+  void vfmadd132ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+    vfmadd132ss(dst, src1, Operand(src2));
+  }
+  void vfmadd213ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+    vfmadd213ss(dst, src1, Operand(src2));
+  }
+  void vfmadd231ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+    vfmadd231ss(dst, src1, Operand(src2));
+  }
+  void vfmadd132ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+    vfmass(0x99, dst, src1, src2);
+  }
+  void vfmadd213ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+    vfmass(0xa9, dst, src1, src2);
+  }
+  void vfmadd231ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+    vfmass(0xb9, dst, src1, src2);
+  }
+  void vfmsub132ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+    vfmsub132ss(dst, src1, Operand(src2));
+  }
+  void vfmsub213ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+    vfmsub213ss(dst, src1, Operand(src2));
+  }
+  void vfmsub231ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+    vfmsub231ss(dst, src1, Operand(src2));
+  }
+  void vfmsub132ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+    vfmass(0x9b, dst, src1, src2);
+  }
+  void vfmsub213ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+    vfmass(0xab, dst, src1, src2);
+  }
+  void vfmsub231ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+    vfmass(0xbb, dst, src1, src2);
+  }
+  void vfnmadd132ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+    vfnmadd132ss(dst, src1, Operand(src2));
+  }
+  void vfnmadd213ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+    vfnmadd213ss(dst, src1, Operand(src2));
+  }
+  void vfnmadd231ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+    vfnmadd231ss(dst, src1, Operand(src2));
+  }
+  void vfnmadd132ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+    vfmass(0x9d, dst, src1, src2);
+  }
+  void vfnmadd213ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+    vfmass(0xad, dst, src1, src2);
+  }
+  void vfnmadd231ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+    vfmass(0xbd, dst, src1, src2);
+  }
+  void vfnmsub132ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+    vfnmsub132ss(dst, src1, Operand(src2));
+  }
+  void vfnmsub213ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+    vfnmsub213ss(dst, src1, Operand(src2));
+  }
+  void vfnmsub231ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+    vfnmsub231ss(dst, src1, Operand(src2));
+  }
+  void vfnmsub132ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+    vfmass(0x9f, dst, src1, src2);
+  }
+  void vfnmsub213ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+    vfmass(0xaf, dst, src1, src2);
+  }
+  void vfnmsub231ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+    vfmass(0xbf, dst, src1, src2);
+  }
+  void vfmass(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
+
+  void vaddsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+    vaddsd(dst, src1, Operand(src2));
+  }
+  void vaddsd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+    vsd(0x58, dst, src1, src2);
+  }
+  void vsubsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+    vsubsd(dst, src1, Operand(src2));
+  }
+  void vsubsd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+    vsd(0x5c, dst, src1, src2);
+  }
+  void vmulsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+    vmulsd(dst, src1, Operand(src2));
+  }
+  void vmulsd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+    vsd(0x59, dst, src1, src2);
+  }
+  void vdivsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+    vdivsd(dst, src1, Operand(src2));
+  }
+  void vdivsd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+    vsd(0x5e, dst, src1, src2);
+  }
+  void vsd(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
+
   // Prefetch src position into cache level.
   // Level 1, 2 or 3 specifies CPU cache level. Level 0 specifies a
   // non-temporal
@@ -1152,6 +1341,14 @@ class Assembler : public AssemblerBase {
 
   void emit_farith(int b1, int b2, int i);
 
+  // Emit vex prefix
+  enum SIMDPrefix { kNone = 0x0, k66 = 0x1, kF3 = 0x2, kF2 = 0x3 };
+  enum VectorLength { kL128 = 0x0, kL256 = 0x4, kLIG = kL128 };
+  enum VexW { kW0 = 0x0, kW1 = 0x80, kWIG = kW0 };
+  enum LeadingOpcode { k0F = 0x1, k0F38 = 0x2, k0F3A = 0x2 };
+  inline void emit_vex_prefix(XMMRegister v, VectorLength l, SIMDPrefix pp,
+                              LeadingOpcode m, VexW w);
+
   // labels
   void print(Label* L);
   void bind_to(Label* L, int pos);
index eeddcd2..5767489 100644 (file)
@@ -160,18 +160,17 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
       if (!is_api_function) {
         Label allocate;
         // The code below relies on these assumptions.
-        STATIC_ASSERT(JSFunction::kNoSlackTracking == 0);
-        STATIC_ASSERT(Map::ConstructionCount::kShift +
-                      Map::ConstructionCount::kSize == 32);
+        STATIC_ASSERT(Map::Counter::kShift + Map::Counter::kSize == 32);
         // Check if slack tracking is enabled.
         __ mov(esi, FieldOperand(eax, Map::kBitField3Offset));
-        __ shr(esi, Map::ConstructionCount::kShift);
-        __ j(zero, &allocate);  // JSFunction::kNoSlackTracking
+        __ shr(esi, Map::Counter::kShift);
+        __ cmp(esi, Map::kSlackTrackingCounterEnd);
+        __ j(less, &allocate);
         // Decrease generous allocation count.
         __ sub(FieldOperand(eax, Map::kBitField3Offset),
-               Immediate(1 << Map::ConstructionCount::kShift));
+               Immediate(1 << Map::Counter::kShift));
 
-        __ cmp(esi, JSFunction::kFinishSlackTracking);
+        __ cmp(esi, Map::kSlackTrackingCounterEnd);
         __ j(not_equal, &allocate);
 
         __ push(eax);
@@ -182,7 +181,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
 
         __ pop(edi);
         __ pop(eax);
-        __ xor_(esi, esi);  // JSFunction::kNoSlackTracking
+        __ mov(esi, Map::kSlackTrackingCounterEnd - 1);
 
         __ bind(&allocate);
       }
@@ -219,8 +218,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
         Label no_inobject_slack_tracking;
 
         // Check if slack tracking is enabled.
-        __ cmp(esi, JSFunction::kNoSlackTracking);
-        __ j(equal, &no_inobject_slack_tracking);
+        __ cmp(esi, Map::kSlackTrackingCounterEnd);
+        __ j(less, &no_inobject_slack_tracking);
 
         // Allocate object with a slack.
         __ movzx_b(esi,
index caef04c..b75ae3a 100644 (file)
@@ -652,9 +652,20 @@ void MathPowStub::Generate(MacroAssembler* masm) {
 void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
   Label miss;
   Register receiver = LoadDescriptor::ReceiverRegister();
+  if (FLAG_vector_ics) {
+    // With careful management, we won't have to save slot and vector on
+    // the stack. Simply handle the possibly missing case first.
+    // TODO(mvstanton): this code can be more efficient.
+    __ cmp(FieldOperand(receiver, JSFunction::kPrototypeOrInitialMapOffset),
+           Immediate(isolate()->factory()->the_hole_value()));
+    __ j(equal, &miss);
+    __ TryGetFunctionPrototype(receiver, eax, ebx, &miss);
+    __ ret(0);
+  } else {
+    NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, eax,
+                                                            ebx, &miss);
+  }
 
-  NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, eax,
-                                                          ebx, &miss);
   __ bind(&miss);
   PropertyAccessCompiler::TailCallBuiltin(
       masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
@@ -697,11 +708,17 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
 
   Register receiver = LoadDescriptor::ReceiverRegister();
   Register index = LoadDescriptor::NameRegister();
-  Register scratch = ebx;
+  Register scratch = edi;
   DCHECK(!scratch.is(receiver) && !scratch.is(index));
   Register result = eax;
   DCHECK(!result.is(scratch));
+  DCHECK(!FLAG_vector_ics ||
+         (!scratch.is(VectorLoadICDescriptor::VectorRegister()) &&
+          result.is(VectorLoadICDescriptor::SlotRegister())));
 
+  // StringCharAtGenerator doesn't use the result register until it's passed
+  // the different miss possibilities. If it did, we would have a conflict
+  // when FLAG_vector_ics is true.
   StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
                                           &miss,  // When not a string.
                                           &miss,  // When not a number.
@@ -2214,6 +2231,10 @@ void CallICStub::Generate(MacroAssembler* masm) {
   // edi - function
   // edx - slot id
   Isolate* isolate = masm->isolate();
+  const int with_types_offset =
+      FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
+  const int generic_offset =
+      FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
   Label extra_checks_or_miss, slow_start;
   Label slow, non_function, wrap, cont;
   Label have_js_function;
@@ -2253,35 +2274,66 @@ void CallICStub::Generate(MacroAssembler* masm) {
   }
 
   __ bind(&extra_checks_or_miss);
-  Label miss;
+  Label uninitialized, miss;
 
   __ mov(ecx, FieldOperand(ebx, edx, times_half_pointer_size,
                            FixedArray::kHeaderSize));
   __ cmp(ecx, Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate)));
   __ j(equal, &slow_start);
+
+  // The following cases attempt to handle MISS cases without going to the
+  // runtime.
+  if (FLAG_trace_ic) {
+    __ jmp(&miss);
+  }
+
   __ cmp(ecx, Immediate(TypeFeedbackVector::UninitializedSentinel(isolate)));
+  __ j(equal, &uninitialized);
+
+  // We are going megamorphic. If the feedback is a JSFunction, it is fine
+  // to handle it here. More complex cases are dealt with in the runtime.
+  __ AssertNotSmi(ecx);
+  __ CmpObjectType(ecx, JS_FUNCTION_TYPE, ecx);
+  __ j(not_equal, &miss);
+  __ mov(
+      FieldOperand(ebx, edx, times_half_pointer_size, FixedArray::kHeaderSize),
+      Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate)));
+  // We have to update statistics for runtime profiling.
+  __ sub(FieldOperand(ebx, with_types_offset), Immediate(Smi::FromInt(1)));
+  __ add(FieldOperand(ebx, generic_offset), Immediate(Smi::FromInt(1)));
+  __ jmp(&slow_start);
+
+  __ bind(&uninitialized);
+
+  // We are going monomorphic, provided we actually have a JSFunction.
+  __ JumpIfSmi(edi, &miss);
+
+  // Goto miss case if we do not have a function.
+  __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
+  __ j(not_equal, &miss);
+
+  // Make sure the function is not the Array() function, which requires special
+  // behavior on MISS.
+  __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ecx);
+  __ cmp(edi, ecx);
   __ j(equal, &miss);
 
-  if (!FLAG_trace_ic) {
-    // We are going megamorphic. If the feedback is a JSFunction, it is fine
-    // to handle it here. More complex cases are dealt with in the runtime.
-    __ AssertNotSmi(ecx);
-    __ CmpObjectType(ecx, JS_FUNCTION_TYPE, ecx);
-    __ j(not_equal, &miss);
-    __ mov(FieldOperand(ebx, edx, times_half_pointer_size,
-                        FixedArray::kHeaderSize),
-           Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate)));
-    // We have to update statistics for runtime profiling.
-    const int with_types_offset =
-        FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
-    __ sub(FieldOperand(ebx, with_types_offset), Immediate(Smi::FromInt(1)));
-    const int generic_offset =
-        FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
-    __ add(FieldOperand(ebx, generic_offset), Immediate(Smi::FromInt(1)));
-    __ jmp(&slow_start);
-  }
+  // Update stats.
+  __ add(FieldOperand(ebx, with_types_offset), Immediate(Smi::FromInt(1)));
+
+  // Store the function.
+  __ mov(
+      FieldOperand(ebx, edx, times_half_pointer_size, FixedArray::kHeaderSize),
+      edi);
+
+  // Update the write barrier.
+  __ mov(eax, edi);
+  __ RecordWriteArray(ebx, eax, edx, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+  __ jmp(&have_js_function);
 
-  // We are here because tracing is on or we are going monomorphic.
+  // We are here because tracing is on or we encountered a MISS case we can't
+  // handle here.
   __ bind(&miss);
   GenerateMiss(masm);
 
@@ -3175,18 +3227,45 @@ void SubStringStub::Generate(MacroAssembler* masm) {
 
 void ToNumberStub::Generate(MacroAssembler* masm) {
   // The ToNumber stub takes one argument in eax.
-  Label check_heap_number, call_builtin;
-  __ JumpIfNotSmi(eax, &check_heap_number, Label::kNear);
+  Label not_smi;
+  __ JumpIfNotSmi(eax, &not_smi, Label::kNear);
   __ Ret();
+  __ bind(&not_smi);
 
-  __ bind(&check_heap_number);
+  Label not_heap_number;
   __ CompareMap(eax, masm->isolate()->factory()->heap_number_map());
-  __ j(not_equal, &call_builtin, Label::kNear);
+  __ j(not_equal, &not_heap_number, Label::kNear);
+  __ Ret();
+  __ bind(&not_heap_number);
+
+  Label not_string, slow_string;
+  __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, edi);
+  // eax: object
+  // edi: object map
+  __ j(above_equal, &not_string, Label::kNear);
+  // Check if string has a cached array index.
+  __ test(FieldOperand(eax, String::kHashFieldOffset),
+          Immediate(String::kContainsCachedArrayIndexMask));
+  __ j(not_zero, &slow_string, Label::kNear);
+  __ mov(eax, FieldOperand(eax, String::kHashFieldOffset));
+  __ IndexFromHash(eax, eax);
   __ Ret();
+  __ bind(&slow_string);
+  __ pop(ecx);   // Pop return address.
+  __ push(eax);  // Push argument.
+  __ push(ecx);  // Push return address.
+  __ TailCallRuntime(Runtime::kStringToNumber, 1, 1);
+  __ bind(&not_string);
 
-  __ bind(&call_builtin);
-  __ pop(ecx);  // Pop return address.
-  __ push(eax);
+  Label not_oddball;
+  __ CmpInstanceType(edi, ODDBALL_TYPE);
+  __ j(not_equal, &not_oddball, Label::kNear);
+  __ mov(eax, FieldOperand(eax, Oddball::kToNumberOffset));
+  __ Ret();
+  __ bind(&not_oddball);
+
+  __ pop(ecx);   // Pop return address.
+  __ push(eax);  // Push argument.
   __ push(ecx);  // Push return address.
   __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
 }
index eabb5a5..0b12fd0 100644 (file)
@@ -76,7 +76,7 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
                                      Register r0,
                                      Register r1);
 
-  virtual bool SometimesSetsUpAFrame() { return false; }
+  bool SometimesSetsUpAFrame() OVERRIDE { return false; }
 
  private:
   static const int kInlinedProbes = 4;
@@ -142,7 +142,7 @@ class RecordWriteStub: public PlatformCodeStub {
     INCREMENTAL_COMPACTION
   };
 
-  virtual bool SometimesSetsUpAFrame() { return false; }
+  bool SometimesSetsUpAFrame() OVERRIDE { return false; }
 
   static const byte kTwoByteNopInstruction = 0x3c;  // Cmpb al, #imm8.
   static const byte kTwoByteJumpInstruction = 0xeb;  // Jmp #imm8.
@@ -339,9 +339,9 @@ class RecordWriteStub: public PlatformCodeStub {
     kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
   };
 
-  virtual inline Major MajorKey() const FINAL OVERRIDE { return RecordWrite; }
+  inline Major MajorKey() const FINAL { return RecordWrite; }
 
-  virtual void Generate(MacroAssembler* masm) OVERRIDE;
+  void Generate(MacroAssembler* masm) OVERRIDE;
   void GenerateIncremental(MacroAssembler* masm, Mode mode);
   void CheckNeedsToInformIncrementalMarker(
       MacroAssembler* masm,
@@ -349,7 +349,7 @@ class RecordWriteStub: public PlatformCodeStub {
       Mode mode);
   void InformIncrementalMarker(MacroAssembler* masm);
 
-  void Activate(Code* code) {
+  void Activate(Code* code) OVERRIDE {
     code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
   }
 
index f40e23c..e451fcc 100644 (file)
@@ -27,7 +27,7 @@ void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
   HandleScope scope(isolate);
 
   // Compute the size of relocation information needed for the code
-  // patching in Deoptimizer::DeoptimizeFunction.
+  // patching in Deoptimizer::PatchCodeForDeoptimization below.
   int min_reloc_size = 0;
   int prev_pc_offset = 0;
   DeoptimizationInputData* deopt_data =
index 2523761..bf88f69 100644 (file)
@@ -246,6 +246,9 @@ class DisassemblerIA32 {
   DisassemblerIA32(const NameConverter& converter,
                    bool abort_on_unimplemented = true)
       : converter_(converter),
+        vex_byte0_(0),
+        vex_byte1_(0),
+        vex_byte2_(0),
         instruction_table_(InstructionTable::get_instance()),
         tmp_buffer_pos_(0),
         abort_on_unimplemented_(abort_on_unimplemented) {
@@ -260,6 +263,9 @@ class DisassemblerIA32 {
 
  private:
   const NameConverter& converter_;
+  byte vex_byte0_;  // 0xc4 or 0xc5
+  byte vex_byte1_;
+  byte vex_byte2_;  // only for 3 bytes vex prefix
   InstructionTable* instruction_table_;
   v8::internal::EmbeddedVector<char, 128> tmp_buffer_;
   unsigned int tmp_buffer_pos_;
@@ -287,6 +293,57 @@ class DisassemblerIA32 {
     kSAR = 7
   };
 
+  bool vex_128() {
+    DCHECK(vex_byte0_ == 0xc4 || vex_byte0_ == 0xc5);
+    byte checked = vex_byte0_ == 0xc4 ? vex_byte2_ : vex_byte1_;
+    return (checked & 4) != 1;
+  }
+
+  bool vex_66() {
+    DCHECK(vex_byte0_ == 0xc4 || vex_byte0_ == 0xc5);
+    byte checked = vex_byte0_ == 0xc4 ? vex_byte2_ : vex_byte1_;
+    return (checked & 3) == 1;
+  }
+
+  bool vex_f3() {
+    DCHECK(vex_byte0_ == 0xc4 || vex_byte0_ == 0xc5);
+    byte checked = vex_byte0_ == 0xc4 ? vex_byte2_ : vex_byte1_;
+    return (checked & 3) == 2;
+  }
+
+  bool vex_f2() {
+    DCHECK(vex_byte0_ == 0xc4 || vex_byte0_ == 0xc5);
+    byte checked = vex_byte0_ == 0xc4 ? vex_byte2_ : vex_byte1_;
+    return (checked & 3) == 3;
+  }
+
+  bool vex_w() {
+    if (vex_byte0_ == 0xc5) return false;
+    return (vex_byte2_ & 0x80) != 0;
+  }
+
+  bool vex_0f() {
+    if (vex_byte0_ == 0xc5) return true;
+    return (vex_byte1_ & 3) == 1;
+  }
+
+  bool vex_0f38() {
+    if (vex_byte0_ == 0xc5) return false;
+    return (vex_byte1_ & 3) == 2;
+  }
+
+  bool vex_0f3a() {
+    if (vex_byte0_ == 0xc5) return false;
+    return (vex_byte1_ & 3) == 3;
+  }
+
+  int vex_vreg() {
+    DCHECK(vex_byte0_ == 0xc4 || vex_byte0_ == 0xc5);
+    byte checked = vex_byte0_ == 0xc4 ? vex_byte2_ : vex_byte1_;
+    return ~(checked >> 3) & 0xf;
+  }
+
+  char float_size_code() { return "sd"[vex_w()]; }
 
   const char* NameOfCPURegister(int reg) const {
     return converter_.NameOfCPURegister(reg);
@@ -340,6 +397,7 @@ class DisassemblerIA32 {
   int FPUInstruction(byte* data);
   int MemoryFPUInstruction(int escape_opcode, int regop, byte* modrm_start);
   int RegisterFPUInstruction(int escape_opcode, byte modrm_byte);
+  int AVXInstruction(byte* data);
   void AppendToBuffer(const char* format, ...);
 
 
@@ -679,6 +737,111 @@ int DisassemblerIA32::CMov(byte* data) {
 }
 
 
+int DisassemblerIA32::AVXInstruction(byte* data) {
+  byte opcode = *data;
+  byte* current = data + 1;
+  if (vex_66() && vex_0f38()) {
+    int mod, regop, rm, vvvv = vex_vreg();
+    get_modrm(*current, &mod, &regop, &rm);
+    switch (opcode) {
+      case 0x99:
+        AppendToBuffer("vfmadd132s%c %s,%s,", float_size_code(),
+                       NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
+        current += PrintRightXMMOperand(current);
+        break;
+      case 0xa9:
+        AppendToBuffer("vfmadd213s%c %s,%s,", float_size_code(),
+                       NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
+        current += PrintRightXMMOperand(current);
+        break;
+      case 0xb9:
+        AppendToBuffer("vfmadd231s%c %s,%s,", float_size_code(),
+                       NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
+        current += PrintRightXMMOperand(current);
+        break;
+      case 0x9b:
+        AppendToBuffer("vfmsub132s%c %s,%s,", float_size_code(),
+                       NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
+        current += PrintRightXMMOperand(current);
+        break;
+      case 0xab:
+        AppendToBuffer("vfmsub213s%c %s,%s,", float_size_code(),
+                       NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
+        current += PrintRightXMMOperand(current);
+        break;
+      case 0xbb:
+        AppendToBuffer("vfmsub231s%c %s,%s,", float_size_code(),
+                       NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
+        current += PrintRightXMMOperand(current);
+        break;
+      case 0x9d:
+        AppendToBuffer("vfnmadd132s%c %s,%s,", float_size_code(),
+                       NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
+        current += PrintRightXMMOperand(current);
+        break;
+      case 0xad:
+        AppendToBuffer("vfnmadd213s%c %s,%s,", float_size_code(),
+                       NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
+        current += PrintRightXMMOperand(current);
+        break;
+      case 0xbd:
+        AppendToBuffer("vfnmadd231s%c %s,%s,", float_size_code(),
+                       NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
+        current += PrintRightXMMOperand(current);
+        break;
+      case 0x9f:
+        AppendToBuffer("vfnmsub132s%c %s,%s,", float_size_code(),
+                       NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
+        current += PrintRightXMMOperand(current);
+        break;
+      case 0xaf:
+        AppendToBuffer("vfnmsub213s%c %s,%s,", float_size_code(),
+                       NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
+        current += PrintRightXMMOperand(current);
+        break;
+      case 0xbf:
+        AppendToBuffer("vfnmsub231s%c %s,%s,", float_size_code(),
+                       NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
+        current += PrintRightXMMOperand(current);
+        break;
+      default:
+        UnimplementedInstruction();
+    }
+  } else if (vex_f2() && vex_0f()) {
+    int mod, regop, rm, vvvv = vex_vreg();
+    get_modrm(*current, &mod, &regop, &rm);
+    switch (opcode) {
+      case 0x58:
+        AppendToBuffer("vaddsd %s,%s,", NameOfXMMRegister(regop),
+                       NameOfXMMRegister(vvvv));
+        current += PrintRightXMMOperand(current);
+        break;
+      case 0x59:
+        AppendToBuffer("vmulsd %s,%s,", NameOfXMMRegister(regop),
+                       NameOfXMMRegister(vvvv));
+        current += PrintRightXMMOperand(current);
+        break;
+      case 0x5c:
+        AppendToBuffer("vsubsd %s,%s,", NameOfXMMRegister(regop),
+                       NameOfXMMRegister(vvvv));
+        current += PrintRightXMMOperand(current);
+        break;
+      case 0x5e:
+        AppendToBuffer("vdivsd %s,%s,", NameOfXMMRegister(regop),
+                       NameOfXMMRegister(vvvv));
+        current += PrintRightXMMOperand(current);
+        break;
+      default:
+        UnimplementedInstruction();
+    }
+  } else {
+    UnimplementedInstruction();
+  }
+
+  return static_cast<int>(current - data);
+}
+
+
 // Returns number of bytes used, including *data.
 int DisassemblerIA32::FPUInstruction(byte* data) {
   byte escape_opcode = *data;
@@ -903,65 +1066,81 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
   } else if (*data == 0x2E /*cs*/) {
     branch_hint = "predicted not taken";
     data++;
+  } else if (*data == 0xC4 && *(data + 1) >= 0xc0) {
+    vex_byte0_ = *data;
+    vex_byte1_ = *(data + 1);
+    vex_byte2_ = *(data + 2);
+    data += 3;
+  } else if (*data == 0xC5 && *(data + 1) >= 0xc0) {
+    vex_byte0_ = *data;
+    vex_byte1_ = *(data + 1);
+    data += 2;
   }
+
   bool processed = true;  // Will be set to false if the current instruction
                           // is not in 'instructions' table.
-  const InstructionDesc& idesc = instruction_table_->Get(*data);
-  switch (idesc.type) {
-    case ZERO_OPERANDS_INSTR:
-      AppendToBuffer(idesc.mnem);
-      data++;
-      break;
+  // Decode AVX instructions.
+  if (vex_byte0_ != 0) {
+    data += AVXInstruction(data);
+  } else {
+    const InstructionDesc& idesc = instruction_table_->Get(*data);
+    switch (idesc.type) {
+      case ZERO_OPERANDS_INSTR:
+        AppendToBuffer(idesc.mnem);
+        data++;
+        break;
 
-    case TWO_OPERANDS_INSTR:
-      data++;
-      data += PrintOperands(idesc.mnem, idesc.op_order_, data);
-      break;
+      case TWO_OPERANDS_INSTR:
+        data++;
+        data += PrintOperands(idesc.mnem, idesc.op_order_, data);
+        break;
 
-    case JUMP_CONDITIONAL_SHORT_INSTR:
-      data += JumpConditionalShort(data, branch_hint);
-      break;
+      case JUMP_CONDITIONAL_SHORT_INSTR:
+        data += JumpConditionalShort(data, branch_hint);
+        break;
 
-    case REGISTER_INSTR:
-      AppendToBuffer("%s %s", idesc.mnem, NameOfCPURegister(*data & 0x07));
-      data++;
-      break;
+      case REGISTER_INSTR:
+        AppendToBuffer("%s %s", idesc.mnem, NameOfCPURegister(*data & 0x07));
+        data++;
+        break;
 
-    case MOVE_REG_INSTR: {
-      byte* addr = reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data+1));
-      AppendToBuffer("mov %s,%s",
-                     NameOfCPURegister(*data & 0x07),
-                     NameOfAddress(addr));
-      data += 5;
-      break;
-    }
+      case MOVE_REG_INSTR: {
+        byte* addr =
+            reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data + 1));
+        AppendToBuffer("mov %s,%s", NameOfCPURegister(*data & 0x07),
+                       NameOfAddress(addr));
+        data += 5;
+        break;
+      }
 
-    case CALL_JUMP_INSTR: {
-      byte* addr = data + *reinterpret_cast<int32_t*>(data+1) + 5;
-      AppendToBuffer("%s %s", idesc.mnem, NameOfAddress(addr));
-      data += 5;
-      break;
-    }
+      case CALL_JUMP_INSTR: {
+        byte* addr = data + *reinterpret_cast<int32_t*>(data + 1) + 5;
+        AppendToBuffer("%s %s", idesc.mnem, NameOfAddress(addr));
+        data += 5;
+        break;
+      }
 
-    case SHORT_IMMEDIATE_INSTR: {
-      byte* addr = reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data+1));
-      AppendToBuffer("%s eax,%s", idesc.mnem, NameOfAddress(addr));
-      data += 5;
-      break;
-    }
+      case SHORT_IMMEDIATE_INSTR: {
+        byte* addr =
+            reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data + 1));
+        AppendToBuffer("%s eax,%s", idesc.mnem, NameOfAddress(addr));
+        data += 5;
+        break;
+      }
 
-    case BYTE_IMMEDIATE_INSTR: {
-      AppendToBuffer("%s al,0x%x", idesc.mnem, data[1]);
-      data += 2;
-      break;
-    }
+      case BYTE_IMMEDIATE_INSTR: {
+        AppendToBuffer("%s al,0x%x", idesc.mnem, data[1]);
+        data += 2;
+        break;
+      }
 
-    case NO_INSTR:
-      processed = false;
-      break;
+      case NO_INSTR:
+        processed = false;
+        break;
 
-    default:
-      UNIMPLEMENTED();  // This type is not implemented.
+      default:
+        UNIMPLEMENTED();  // This type is not implemented.
+    }
   }
   //----------------------------
   if (!processed) {
@@ -1047,6 +1226,12 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
                            NameOfXMMRegister(regop),
                            NameOfXMMRegister(rm));
             data++;
+          } else if (f0byte == 0x2e) {
+            data += 2;
+            int mod, regop, rm;
+            get_modrm(*data, &mod, &regop, &rm);
+            AppendToBuffer("ucomiss %s,", NameOfXMMRegister(regop));
+            data += PrintRightXMMOperand(data);
           } else if (f0byte >= 0x53 && f0byte <= 0x5F) {
             const char* const pseudo_op[] = {
               "rcpps",
@@ -1617,12 +1802,36 @@ int DisassemblerIA32::InstructionDecode(v8::internal::Vector<char> out_buffer,
             get_modrm(*data, &mod, &regop, &rm);
             AppendToBuffer("cvttss2si %s,", NameOfCPURegister(regop));
             data += PrintRightXMMOperand(data);
+          } else if (b2 == 0x58) {
+            data += 3;
+            int mod, regop, rm;
+            get_modrm(*data, &mod, &regop, &rm);
+            AppendToBuffer("addss %s,", NameOfXMMRegister(regop));
+            data += PrintRightXMMOperand(data);
+          } else if (b2 == 0x59) {
+            data += 3;
+            int mod, regop, rm;
+            get_modrm(*data, &mod, &regop, &rm);
+            AppendToBuffer("mulss %s,", NameOfXMMRegister(regop));
+            data += PrintRightXMMOperand(data);
           } else if (b2 == 0x5A) {
             data += 3;
             int mod, regop, rm;
             get_modrm(*data, &mod, &regop, &rm);
             AppendToBuffer("cvtss2sd %s,", NameOfXMMRegister(regop));
             data += PrintRightXMMOperand(data);
+          } else if (b2 == 0x5c) {
+            data += 3;
+            int mod, regop, rm;
+            get_modrm(*data, &mod, &regop, &rm);
+            AppendToBuffer("subss %s,", NameOfXMMRegister(regop));
+            data += PrintRightXMMOperand(data);
+          } else if (b2 == 0x5e) {
+            data += 3;
+            int mod, regop, rm;
+            get_modrm(*data, &mod, &regop, &rm);
+            AppendToBuffer("divss %s,", NameOfXMMRegister(regop));
+            data += PrintRightXMMOperand(data);
           } else if (b2 == 0x6F) {
             data += 3;
             int mod, regop, rm;
index acf59b4..1ba4095 100644 (file)
@@ -188,10 +188,10 @@ void FullCodeGenerator::Generate() {
     Comment cmnt(masm_, "[ Allocate context");
     bool need_write_barrier = true;
     // Argument to NewContext is the function, which is still in edi.
-    if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
+    if (FLAG_harmony_scoping && info->scope()->is_script_scope()) {
       __ push(edi);
       __ Push(info->scope()->GetScopeInfo());
-      __ CallRuntime(Runtime::kNewGlobalContext, 2);
+      __ CallRuntime(Runtime::kNewScriptContext, 2);
     } else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
       FastNewContextStub stub(isolate(), heap_slots);
       __ CallStub(&stub);
@@ -872,7 +872,7 @@ void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
   EmitDebugCheckDeclarationContext(variable);
 
   // Load instance object.
-  __ LoadContext(eax, scope_->ContextChainLength(scope_->GlobalScope()));
+  __ LoadContext(eax, scope_->ContextChainLength(scope_->ScriptScope()));
   __ mov(eax, ContextOperand(eax, variable->interface()->Index()));
   __ mov(eax, ContextOperand(eax, Context::EXTENSION_INDEX));
 
@@ -1045,6 +1045,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
 
   // Get the object to enumerate over. If the object is null or undefined, skip
   // over the loop.  See ECMA-262 version 5, section 12.6.4.
+  SetExpressionPosition(stmt->enumerable());
   VisitForAccumulatorValue(stmt->enumerable());
   __ cmp(eax, isolate()->factory()->undefined_value());
   __ j(equal, &exit);
@@ -1139,6 +1140,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
   // Generate code for doing the condition check.
   PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
   __ bind(&loop);
+  SetExpressionPosition(stmt->each());
+
   __ mov(eax, Operand(esp, 0 * kPointerSize));  // Get the current index.
   __ cmp(eax, Operand(esp, 1 * kPointerSize));  // Compare to the array length.
   __ j(above_equal, loop_statement.break_label());
@@ -1205,48 +1208,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
 }
 
 
-void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
-  Comment cmnt(masm_, "[ ForOfStatement");
-  SetStatementPosition(stmt);
-
-  Iteration loop_statement(this, stmt);
-  increment_loop_depth();
-
-  // var iterator = iterable[Symbol.iterator]();
-  VisitForEffect(stmt->assign_iterator());
-
-  // Loop entry.
-  __ bind(loop_statement.continue_label());
-
-  // result = iterator.next()
-  VisitForEffect(stmt->next_result());
-
-  // if (result.done) break;
-  Label result_not_done;
-  VisitForControl(stmt->result_done(),
-                  loop_statement.break_label(),
-                  &result_not_done,
-                  &result_not_done);
-  __ bind(&result_not_done);
-
-  // each = result.value
-  VisitForEffect(stmt->assign_each());
-
-  // Generate code for the body of the loop.
-  Visit(stmt->body());
-
-  // Check stack before looping.
-  PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS);
-  EmitBackEdgeBookkeeping(stmt, loop_statement.continue_label());
-  __ jmp(loop_statement.continue_label());
-
-  // Exit and decrement the loop depth.
-  PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
-  __ bind(loop_statement.break_label());
-  decrement_loop_depth();
-}
-
-
 void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
                                        bool pretenure) {
   // Use the fast case closure allocation code that allocates in new
@@ -1306,6 +1267,19 @@ void FullCodeGenerator::EmitLoadHomeObject(SuperReference* expr) {
 }
 
 
+void FullCodeGenerator::EmitSetHomeObjectIfNeeded(Expression* initializer,
+                                                  int offset) {
+  if (NeedsHomeObject(initializer)) {
+    __ mov(StoreDescriptor::ReceiverRegister(), Operand(esp, 0));
+    __ mov(StoreDescriptor::NameRegister(),
+           Immediate(isolate()->factory()->home_object_symbol()));
+    __ mov(StoreDescriptor::ValueRegister(),
+           Operand(esp, offset * kPointerSize));
+    CallStoreIC();
+  }
+}
+
+
 void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
                                                       TypeofState typeof_state,
                                                       Label* slow) {
@@ -1670,6 +1644,14 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
             __ mov(StoreDescriptor::ReceiverRegister(), Operand(esp, 0));
             CallStoreIC(key->LiteralFeedbackId());
             PrepareForBailoutForId(key->id(), NO_REGISTERS);
+
+            if (NeedsHomeObject(value)) {
+              __ mov(StoreDescriptor::ReceiverRegister(), eax);
+              __ mov(StoreDescriptor::NameRegister(),
+                     Immediate(isolate()->factory()->home_object_symbol()));
+              __ mov(StoreDescriptor::ValueRegister(), Operand(esp, 0));
+              CallStoreIC();
+            }
           } else {
             VisitForEffect(value);
           }
@@ -1679,6 +1661,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
         VisitForStackValue(key);
         VisitForStackValue(value);
         if (property->emit_store()) {
+          EmitSetHomeObjectIfNeeded(value, 2);
           __ push(Immediate(Smi::FromInt(SLOPPY)));  // Strict mode
           __ CallRuntime(Runtime::kSetProperty, 4);
         } else {
@@ -1711,7 +1694,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
     __ push(Operand(esp, 0));  // Duplicate receiver.
     VisitForStackValue(it->first);
     EmitAccessor(it->second->getter);
+    EmitSetHomeObjectIfNeeded(it->second->getter, 2);
     EmitAccessor(it->second->setter);
+    EmitSetHomeObjectIfNeeded(it->second->setter, 3);
     __ push(Immediate(Smi::FromInt(NONE)));
     __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
   }
@@ -2140,15 +2125,6 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
   VisitForAccumulatorValue(value);
   __ pop(ebx);
 
-  // Check generator state.
-  Label wrong_state, closed_state, done;
-  STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting < 0);
-  STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed == 0);
-  __ cmp(FieldOperand(ebx, JSGeneratorObject::kContinuationOffset),
-         Immediate(Smi::FromInt(0)));
-  __ j(equal, &closed_state);
-  __ j(less, &wrong_state);
-
   // Load suspended function and context.
   __ mov(esi, FieldOperand(ebx, JSGeneratorObject::kContextOffset));
   __ mov(edi, FieldOperand(ebx, JSGeneratorObject::kFunctionOffset));
@@ -2170,7 +2146,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
 
   // Enter a new JavaScript frame, and initialize its slots as they were when
   // the generator was suspended.
-  Label resume_frame;
+  Label resume_frame, done;
   __ bind(&push_frame);
   __ call(&resume_frame);
   __ jmp(&done);
@@ -2217,25 +2193,6 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
   // Not reached: the runtime call returns elsewhere.
   __ Abort(kGeneratorFailedToResume);
 
-  // Reach here when generator is closed.
-  __ bind(&closed_state);
-  if (resume_mode == JSGeneratorObject::NEXT) {
-    // Return completed iterator result when generator is closed.
-    __ push(Immediate(isolate()->factory()->undefined_value()));
-    // Pop value from top-of-stack slot; box result into result register.
-    EmitCreateIteratorResult(true);
-  } else {
-    // Throw the provided value.
-    __ push(eax);
-    __ CallRuntime(Runtime::kThrow, 1);
-  }
-  __ jmp(&done);
-
-  // Throw error if we attempt to operate on a running generator.
-  __ bind(&wrong_state);
-  __ push(ebx);
-  __ CallRuntime(Runtime::kThrowGeneratorStateError, 1);
-
   __ bind(&done);
   context()->Plug(result_register());
 }
@@ -2448,6 +2405,7 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
     }
     VisitForStackValue(key);
     VisitForStackValue(value);
+    EmitSetHomeObjectIfNeeded(value, 2);
 
     switch (property->kind()) {
       case ObjectLiteral::Property::CONSTANT:
@@ -2617,7 +2575,6 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
     __ CallRuntime(Runtime::kThrowReferenceError, 1);
     __ bind(&assign);
     EmitStoreToStackLocalOrContextSlot(var, location);
-
   } else if (!var->is_const_mode() || op == Token::INIT_CONST) {
     if (var->IsLookupSlot()) {
       // Assignment to var.
@@ -2639,8 +2596,9 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
       }
       EmitStoreToStackLocalOrContextSlot(var, location);
     }
+  } else if (IsSignallingAssignmentToConst(var, op, strict_mode())) {
+    __ CallRuntime(Runtime::kThrowConstAssignError, 0);
   }
-  // Non-initializing assignments to consts are ignored.
 }
 
 
@@ -5031,7 +4989,7 @@ void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
 
 void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
   Scope* declaration_scope = scope()->DeclarationScope();
-  if (declaration_scope->is_global_scope() ||
+  if (declaration_scope->is_script_scope() ||
       declaration_scope->is_module_scope()) {
     // Contexts nested in the native context have a canonical empty function
     // as their closure, not the anonymous closure containing the global
index c64a4b0..03a0d8a 100644 (file)
@@ -31,9 +31,9 @@ class SafepointGenerator FINAL : public CallWrapper {
         deopt_mode_(mode) {}
   virtual ~SafepointGenerator() {}
 
-  virtual void BeforeCall(int call_size) const OVERRIDE {}
+  void BeforeCall(int call_size) const OVERRIDE {}
 
-  virtual void AfterCall() const OVERRIDE {
+  void AfterCall() const OVERRIDE {
     codegen_->RecordSafepoint(pointers_, deopt_mode_);
   }
 
@@ -2627,10 +2627,10 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
     DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
                                   LInstanceOfKnownGlobal* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() OVERRIDE {
+    void Generate() OVERRIDE {
       codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
     }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    LInstruction* instr() OVERRIDE { return instr_; }
     Label* map_check() { return &map_check_; }
    private:
     LInstanceOfKnownGlobal* instr_;
@@ -2753,6 +2753,7 @@ void LCodeGen::EmitReturn(LReturn* instr, bool dynamic_frame_alignment) {
     }
     __ Ret((parameter_count + extra_value_count) * kPointerSize, ecx);
   } else {
+    DCHECK(info()->IsStub());  // Functions would need to drop one more value.
     Register reg = ToRegister(instr->parameter_count());
     // The argument count parameter is a smi
     __ SmiUntag(reg);
@@ -2770,6 +2771,7 @@ void LCodeGen::EmitReturn(LReturn* instr, bool dynamic_frame_alignment) {
     if (dynamic_frame_alignment) {
       __ inc(reg);  // 1 more for alignment
     }
+
     __ shl(reg, kPointerSizeLog2);
     __ add(esp, reg);
     __ jmp(return_addr_reg);
@@ -2829,14 +2831,17 @@ template <class T>
 void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
   DCHECK(FLAG_vector_ics);
   Register vector_register = ToRegister(instr->temp_vector());
+  Register slot_register = VectorLoadICDescriptor::SlotRegister();
   DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister()));
+  DCHECK(slot_register.is(eax));
+
+  AllowDeferredHandleDereference vector_structure_check;
   Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
   __ mov(vector_register, vector);
   // No need to allocate this register.
-  DCHECK(VectorLoadICDescriptor::SlotRegister().is(eax));
-  int index = vector->GetIndex(instr->hydrogen()->slot());
-  __ mov(VectorLoadICDescriptor::SlotRegister(),
-         Immediate(Smi::FromInt(index)));
+  FeedbackVectorICSlot slot = instr->hydrogen()->slot();
+  int index = vector->GetIndex(slot);
+  __ mov(slot_register, Immediate(Smi::FromInt(index)));
 }
 
 
@@ -3450,45 +3455,81 @@ void LCodeGen::DoTailCallThroughMegamorphicCache(
   Register name = ToRegister(instr->name());
   DCHECK(receiver.is(LoadDescriptor::ReceiverRegister()));
   DCHECK(name.is(LoadDescriptor::NameRegister()));
+  Register slot = FLAG_vector_ics ? ToRegister(instr->slot()) : no_reg;
+  Register vector = FLAG_vector_ics ? ToRegister(instr->vector()) : no_reg;
 
   Register scratch = ebx;
-  Register extra = eax;
+  Register extra = edi;
+  DCHECK(!extra.is(slot) && !extra.is(vector));
   DCHECK(!scratch.is(receiver) && !scratch.is(name));
   DCHECK(!extra.is(receiver) && !extra.is(name));
 
   // Important for the tail-call.
   bool must_teardown_frame = NeedsEagerFrame();
 
-  // The probe will tail call to a handler if found.
-  isolate()->stub_cache()->GenerateProbe(masm(), instr->hydrogen()->flags(),
-                                         must_teardown_frame, receiver, name,
-                                         scratch, extra);
+  if (!instr->hydrogen()->is_just_miss()) {
+    if (FLAG_vector_ics) {
+      __ push(slot);
+      __ push(vector);
+    }
+
+    // The probe will tail call to a handler if found.
+    // If --vector-ics is on, then it knows to pop the two args first.
+    DCHECK(!instr->hydrogen()->is_keyed_load());
+    isolate()->stub_cache()->GenerateProbe(
+        masm(), Code::LOAD_IC, instr->hydrogen()->flags(), must_teardown_frame,
+        receiver, name, scratch, extra);
+
+    if (FLAG_vector_ics) {
+      __ pop(vector);
+      __ pop(slot);
+    }
+  }
 
   // Tail call to miss if we ended up here.
   if (must_teardown_frame) __ leave();
-  LoadIC::GenerateMiss(masm());
+  if (instr->hydrogen()->is_keyed_load()) {
+    KeyedLoadIC::GenerateMiss(masm());
+  } else {
+    LoadIC::GenerateMiss(masm());
+  }
 }
 
 
 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
   DCHECK(ToRegister(instr->result()).is(eax));
 
-  LPointerMap* pointers = instr->pointer_map();
-  SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+  if (instr->hydrogen()->IsTailCall()) {
+    if (NeedsEagerFrame()) __ leave();
 
-  if (instr->target()->IsConstantOperand()) {
-    LConstantOperand* target = LConstantOperand::cast(instr->target());
-    Handle<Code> code = Handle<Code>::cast(ToHandle(target));
-    generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
-    __ call(code, RelocInfo::CODE_TARGET);
+    if (instr->target()->IsConstantOperand()) {
+      LConstantOperand* target = LConstantOperand::cast(instr->target());
+      Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+      __ jmp(code, RelocInfo::CODE_TARGET);
+    } else {
+      DCHECK(instr->target()->IsRegister());
+      Register target = ToRegister(instr->target());
+      __ add(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
+      __ jmp(target);
+    }
   } else {
-    DCHECK(instr->target()->IsRegister());
-    Register target = ToRegister(instr->target());
-    generator.BeforeCall(__ CallSize(Operand(target)));
-    __ add(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
-    __ call(target);
+    LPointerMap* pointers = instr->pointer_map();
+    SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+
+    if (instr->target()->IsConstantOperand()) {
+      LConstantOperand* target = LConstantOperand::cast(instr->target());
+      Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+      generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
+      __ call(code, RelocInfo::CODE_TARGET);
+    } else {
+      DCHECK(instr->target()->IsRegister());
+      Register target = ToRegister(instr->target());
+      generator.BeforeCall(__ CallSize(Operand(target)));
+      __ add(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
+      __ call(target);
+    }
+    generator.AfterCall();
   }
-  generator.AfterCall();
 }
 
 
@@ -3584,10 +3625,11 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) {
     DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
                                     LMathAbs* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() OVERRIDE {
+    void Generate() OVERRIDE {
       codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
     }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
    private:
     LMathAbs* instr_;
   };
@@ -4338,10 +4380,9 @@ void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
     DeferredStringCharCodeAt(LCodeGen* codegen,
                              LStringCharCodeAt* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() OVERRIDE {
-      codegen()->DoDeferredStringCharCodeAt(instr_);
-    }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    void Generate() OVERRIDE { codegen()->DoDeferredStringCharCodeAt(instr_); }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
    private:
     LStringCharCodeAt* instr_;
   };
@@ -4396,10 +4437,11 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
     DeferredStringCharFromCode(LCodeGen* codegen,
                                LStringCharFromCode* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() OVERRIDE {
+    void Generate() OVERRIDE {
       codegen()->DoDeferredStringCharFromCode(instr_);
     }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
    private:
     LStringCharFromCode* instr_;
   };
@@ -4474,11 +4516,12 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
     DeferredNumberTagI(LCodeGen* codegen,
                        LNumberTagI* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() OVERRIDE {
+    void Generate() OVERRIDE {
       codegen()->DoDeferredNumberTagIU(
           instr_, instr_->value(), instr_->temp(), SIGNED_INT32);
     }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
    private:
     LNumberTagI* instr_;
   };
@@ -4500,11 +4543,12 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
    public:
     DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() OVERRIDE {
+    void Generate() OVERRIDE {
       codegen()->DoDeferredNumberTagIU(
           instr_, instr_->value(), instr_->temp(), UNSIGNED_INT32);
     }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
    private:
     LNumberTagU* instr_;
   };
@@ -4582,10 +4626,9 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
    public:
     DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() OVERRIDE {
-      codegen()->DoDeferredNumberTagD(instr_);
-    }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    void Generate() OVERRIDE { codegen()->DoDeferredNumberTagD(instr_); }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
    private:
     LNumberTagD* instr_;
   };
@@ -4782,10 +4825,9 @@ void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
    public:
     DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() OVERRIDE {
-      codegen()->DoDeferredTaggedToI(instr_, done());
-    }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    void Generate() OVERRIDE { codegen()->DoDeferredTaggedToI(instr_, done()); }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
    private:
     LTaggedToI* instr_;
   };
@@ -4986,11 +5028,12 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
         : LDeferredCode(codegen), instr_(instr), object_(object) {
       SetExit(check_maps());
     }
-    virtual void Generate() OVERRIDE {
+    void Generate() OVERRIDE {
       codegen()->DoDeferredInstanceMigration(instr_, object_);
     }
     Label* check_maps() { return &check_maps_; }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
    private:
     LCheckMaps* instr_;
     Label check_maps_;
@@ -5127,10 +5170,9 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
    public:
     DeferredAllocate(LCodeGen* codegen,  LAllocate* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() OVERRIDE {
-      codegen()->DoDeferredAllocate(instr_);
-    }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    void Generate() OVERRIDE { codegen()->DoDeferredAllocate(instr_); }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
    private:
     LAllocate* instr_;
   };
@@ -5493,10 +5535,9 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
    public:
     DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() OVERRIDE {
-      codegen()->DoDeferredStackCheck(instr_);
-    }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    void Generate() OVERRIDE { codegen()->DoDeferredStackCheck(instr_); }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
    private:
     LStackCheck* instr_;
   };
@@ -5643,10 +5684,11 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
           object_(object),
           index_(index) {
     }
-    virtual void Generate() OVERRIDE {
+    void Generate() OVERRIDE {
       codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_);
     }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
    private:
     LLoadFieldByIndex* instr_;
     Register object_;
index 8b20e86..3be2fc4 100644 (file)
@@ -1138,9 +1138,17 @@ LInstruction* LChunkBuilder::DoTailCallThroughMegamorphicCache(
       UseFixed(instr->receiver(), LoadDescriptor::ReceiverRegister());
   LOperand* name_register =
       UseFixed(instr->name(), LoadDescriptor::NameRegister());
+  LOperand* slot = NULL;
+  LOperand* vector = NULL;
+  if (FLAG_vector_ics) {
+    slot = UseFixed(instr->slot(), VectorLoadICDescriptor::SlotRegister());
+    vector =
+        UseFixed(instr->vector(), VectorLoadICDescriptor::VectorRegister());
+  }
+
   // Not marked as call. It can't deoptimize, and it never returns.
   return new (zone()) LTailCallThroughMegamorphicCache(
-      context, receiver_register, name_register);
+      context, receiver_register, name_register, slot, vector);
 }
 
 
@@ -2115,7 +2123,7 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
   LOperand* global_object =
       UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
   LOperand* vector = NULL;
-  if (FLAG_vector_ics) {
+  if (instr->HasVectorAndSlot()) {
     vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
   }
 
@@ -2176,7 +2184,7 @@ LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
   LOperand* object =
       UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
   LOperand* vector = NULL;
-  if (FLAG_vector_ics) {
+  if (instr->HasVectorAndSlot()) {
     vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
   }
   LLoadNamedGeneric* result = new(zone()) LLoadNamedGeneric(
@@ -2241,7 +2249,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
       UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
   LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
   LOperand* vector = NULL;
-  if (FLAG_vector_ics) {
+  if (instr->HasVectorAndSlot()) {
     vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
   }
   LLoadKeyedGeneric* result =
index 75fed82..49eba66 100644 (file)
@@ -167,17 +167,13 @@ class LCodeGen;
   V(WrapReceiver)
 
 
-#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)                        \
-  virtual Opcode opcode() const FINAL OVERRIDE {                      \
-    return LInstruction::k##type;                                           \
-  }                                                                         \
-  virtual void CompileToNative(LCodeGen* generator) FINAL OVERRIDE;   \
-  virtual const char* Mnemonic() const FINAL OVERRIDE {               \
-    return mnemonic;                                                        \
-  }                                                                         \
-  static L##type* cast(LInstruction* instr) {                               \
-    DCHECK(instr->Is##type());                                              \
-    return reinterpret_cast<L##type*>(instr);                               \
+#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)            \
+  Opcode opcode() const FINAL { return LInstruction::k##type; } \
+  void CompileToNative(LCodeGen* generator) FINAL;              \
+  const char* Mnemonic() const FINAL { return mnemonic; }       \
+  static L##type* cast(LInstruction* instr) {                   \
+    DCHECK(instr->Is##type());                                  \
+    return reinterpret_cast<L##type*>(instr);                   \
   }
 
 
@@ -292,11 +288,9 @@ class LTemplateResultInstruction : public LInstruction {
  public:
   // Allow 0 or 1 output operands.
   STATIC_ASSERT(R == 0 || R == 1);
-  virtual bool HasResult() const FINAL OVERRIDE {
-    return R != 0 && result() != NULL;
-  }
+  bool HasResult() const FINAL { return R != 0 && result() != NULL; }
   void set_result(LOperand* operand) { results_[0] = operand; }
-  LOperand* result() const { return results_[0]; }
+  LOperand* result() const OVERRIDE { return results_[0]; }
 
  protected:
   EmbeddedContainer<LOperand*, R> results_;
@@ -314,11 +308,11 @@ class LTemplateInstruction : public LTemplateResultInstruction<R> {
 
  private:
   // Iterator support.
-  virtual int InputCount() FINAL OVERRIDE { return I; }
-  virtual LOperand* InputAt(int i) FINAL OVERRIDE { return inputs_[i]; }
+  int InputCount() FINAL { return I; }
+  LOperand* InputAt(int i) FINAL { return inputs_[i]; }
 
-  virtual int TempCount() FINAL OVERRIDE { return T; }
-  virtual LOperand* TempAt(int i) FINAL OVERRIDE { return temps_[i]; }
+  int TempCount() FINAL { return T; }
+  LOperand* TempAt(int i) FINAL { return temps_[i]; }
 };
 
 
@@ -332,8 +326,8 @@ class LGap : public LTemplateInstruction<0, 0, 0> {
   }
 
   // Can't use the DECLARE-macro here because of sub-classes.
-  virtual bool IsGap() const FINAL OVERRIDE { return true; }
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  bool IsGap() const FINAL { return true; }
+  void PrintDataTo(StringStream* stream) OVERRIDE;
   static LGap* cast(LInstruction* instr) {
     DCHECK(instr->IsGap());
     return reinterpret_cast<LGap*>(instr);
@@ -373,7 +367,7 @@ class LInstructionGap FINAL : public LGap {
  public:
   explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
 
-  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
+  bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
     return !IsRedundant();
   }
 
@@ -385,13 +379,13 @@ class LGoto FINAL : public LTemplateInstruction<0, 0, 0> {
  public:
   explicit LGoto(HBasicBlock* block) : block_(block) { }
 
-  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE;
+  bool HasInterestingComment(LCodeGen* gen) const OVERRIDE;
   DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
-  virtual bool IsControl() const OVERRIDE { return true; }
+  void PrintDataTo(StringStream* stream) OVERRIDE;
+  bool IsControl() const OVERRIDE { return true; }
 
   int block_id() const { return block_->block_id(); }
-  virtual bool ClobbersDoubleRegisters(Isolate* isolate) const OVERRIDE {
+  bool ClobbersDoubleRegisters(Isolate* isolate) const OVERRIDE {
     return false;
   }
 
@@ -426,7 +420,7 @@ class LDummyUse FINAL : public LTemplateInstruction<1, 1, 0> {
 
 class LDeoptimize FINAL : public LTemplateInstruction<0, 0, 0> {
  public:
-  virtual bool IsControl() const OVERRIDE { return true; }
+  bool IsControl() const OVERRIDE { return true; }
   DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
   DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
 };
@@ -437,12 +431,10 @@ class LLabel FINAL : public LGap {
   explicit LLabel(HBasicBlock* block)
       : LGap(block), replacement_(NULL) { }
 
-  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
-    return false;
-  }
+  bool HasInterestingComment(LCodeGen* gen) const OVERRIDE { return false; }
   DECLARE_CONCRETE_INSTRUCTION(Label, "label")
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int block_id() const { return block()->block_id(); }
   bool is_loop_header() const { return block()->IsLoopHeader(); }
@@ -460,9 +452,7 @@ class LLabel FINAL : public LGap {
 
 class LParameter FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
-  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
-    return false;
-  }
+  bool HasInterestingComment(LCodeGen* gen) const OVERRIDE { return false; }
   DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
 };
 
@@ -481,19 +471,23 @@ class LCallStub FINAL : public LTemplateInstruction<1, 1, 0> {
 
 
 class LTailCallThroughMegamorphicCache FINAL
-    : public LTemplateInstruction<0, 3, 0> {
+    : public LTemplateInstruction<0, 5, 0> {
  public:
-  explicit LTailCallThroughMegamorphicCache(LOperand* context,
-                                            LOperand* receiver,
-                                            LOperand* name) {
+  LTailCallThroughMegamorphicCache(LOperand* context, LOperand* receiver,
+                                   LOperand* name, LOperand* slot,
+                                   LOperand* vector) {
     inputs_[0] = context;
     inputs_[1] = receiver;
     inputs_[2] = name;
+    inputs_[3] = slot;
+    inputs_[4] = vector;
   }
 
   LOperand* context() { return inputs_[0]; }
   LOperand* receiver() { return inputs_[1]; }
   LOperand* name() { return inputs_[2]; }
+  LOperand* slot() { return inputs_[3]; }
+  LOperand* vector() { return inputs_[4]; }
 
   DECLARE_CONCRETE_INSTRUCTION(TailCallThroughMegamorphicCache,
                                "tail-call-through-megamorphic-cache")
@@ -503,9 +497,7 @@ class LTailCallThroughMegamorphicCache FINAL
 
 class LUnknownOSRValue FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
-  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
-    return false;
-  }
+  bool HasInterestingComment(LCodeGen* gen) const OVERRIDE { return false; }
   DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
 };
 
@@ -515,7 +507,7 @@ class LControlInstruction: public LTemplateInstruction<0, I, T> {
  public:
   LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
 
-  virtual bool IsControl() const FINAL OVERRIDE { return true; }
+  bool IsControl() const FINAL { return true; }
 
   int SuccessorCount() { return hydrogen()->SuccessorCount(); }
   HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
@@ -608,7 +600,7 @@ class LAccessArgumentsAt FINAL : public LTemplateInstruction<1, 3, 0> {
 
   DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -857,7 +849,7 @@ class LCompareNumericAndBranch FINAL : public LControlInstruction<2, 0> {
     return hydrogen()->representation().IsDouble();
   }
 
-  virtual void PrintDataTo(StringStream* stream);
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1038,7 +1030,7 @@ class LIsObjectAndBranch FINAL : public LControlInstruction<1, 1> {
 
   DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1055,7 +1047,7 @@ class LIsStringAndBranch FINAL : public LControlInstruction<1, 1> {
   DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1070,7 +1062,7 @@ class LIsSmiAndBranch FINAL : public LControlInstruction<1, 0> {
   DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1088,7 +1080,7 @@ class LIsUndetectableAndBranch FINAL : public LControlInstruction<1, 1> {
                                "is-undetectable-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1108,7 +1100,7 @@ class LStringCompareAndBranch FINAL : public LControlInstruction<3, 0> {
                                "string-compare-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   Token::Value op() const { return hydrogen()->token(); }
 };
@@ -1128,7 +1120,7 @@ class LHasInstanceTypeAndBranch FINAL : public LControlInstruction<1, 1> {
                                "has-instance-type-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1157,7 +1149,7 @@ class LHasCachedArrayIndexAndBranch FINAL
   DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
                                "has-cached-array-index-and-branch")
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1190,7 +1182,7 @@ class LClassOfTestAndBranch FINAL : public LControlInstruction<1, 2> {
                                "class-of-test-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1391,7 +1383,7 @@ class LBranch FINAL : public LControlInstruction<1, 1> {
   DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
   DECLARE_HYDROGEN_ACCESSOR(Branch)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1542,11 +1534,9 @@ class LArithmeticD FINAL : public LTemplateInstruction<1, 2, 0> {
 
   Token::Value op() const { return op_; }
 
-  virtual Opcode opcode() const OVERRIDE {
-    return LInstruction::kArithmeticD;
-  }
-  virtual void CompileToNative(LCodeGen* generator) OVERRIDE;
-  virtual const char* Mnemonic() const OVERRIDE;
+  Opcode opcode() const OVERRIDE { return LInstruction::kArithmeticD; }
+  void CompileToNative(LCodeGen* generator) OVERRIDE;
+  const char* Mnemonic() const OVERRIDE;
 
  private:
   Token::Value op_;
@@ -1569,11 +1559,9 @@ class LArithmeticT FINAL : public LTemplateInstruction<1, 3, 0> {
   LOperand* left() { return inputs_[1]; }
   LOperand* right() { return inputs_[2]; }
 
-  virtual Opcode opcode() const OVERRIDE {
-    return LInstruction::kArithmeticT;
-  }
-  virtual void CompileToNative(LCodeGen* generator) OVERRIDE;
-  virtual const char* Mnemonic() const OVERRIDE;
+  Opcode opcode() const OVERRIDE { return LInstruction::kArithmeticT; }
+  void CompileToNative(LCodeGen* generator) OVERRIDE;
+  const char* Mnemonic() const OVERRIDE;
 
   Token::Value op() const { return op_; }
 
@@ -1686,7 +1674,7 @@ class LLoadKeyed FINAL : public LTemplateInstruction<1, 2, 0> {
   DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
   DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
   uint32_t base_offset() const { return hydrogen()->base_offset(); }
   bool key_is_smi() {
     return hydrogen()->key()->representation().IsTagged();
@@ -1784,7 +1772,7 @@ class LLoadContextSlot FINAL : public LTemplateInstruction<1, 1, 0> {
 
   int slot_index() { return hydrogen()->slot_index(); }
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1805,7 +1793,7 @@ class LStoreContextSlot FINAL : public LTemplateInstruction<0, 2, 1> {
 
   int slot_index() { return hydrogen()->slot_index(); }
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1844,7 +1832,7 @@ class LStoreCodeEntry FINAL: public LTemplateInstruction<0, 2, 0> {
   LOperand* function() { return inputs_[0]; }
   LOperand* code_object() { return inputs_[1]; }
 
-  virtual void PrintDataTo(StringStream* stream);
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry")
   DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry)
@@ -1861,7 +1849,7 @@ class LInnerAllocatedObject FINAL: public LTemplateInstruction<1, 2, 0> {
   LOperand* base_object() const { return inputs_[0]; }
   LOperand* offset() const { return inputs_[1]; }
 
-  virtual void PrintDataTo(StringStream* stream);
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object")
 };
@@ -1905,7 +1893,7 @@ class LCallJSFunction FINAL : public LTemplateInstruction<1, 1, 0> {
   DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
   DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int arity() const { return hydrogen()->argument_count() - 1; }
 };
@@ -1927,18 +1915,18 @@ class LCallWithDescriptor FINAL : public LTemplateResultInstruction<1> {
  private:
   DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int arity() const { return hydrogen()->argument_count() - 1; }
 
   ZoneList<LOperand*> inputs_;
 
   // Iterator support.
-  virtual int InputCount() FINAL OVERRIDE { return inputs_.length(); }
-  virtual LOperand* InputAt(int i) FINAL OVERRIDE { return inputs_[i]; }
+  int InputCount() FINAL { return inputs_.length(); }
+  LOperand* InputAt(int i) FINAL { return inputs_[i]; }
 
-  virtual int TempCount() FINAL OVERRIDE { return 0; }
-  virtual LOperand* TempAt(int i) FINAL OVERRIDE { return NULL; }
+  int TempCount() FINAL { return 0; }
+  LOperand* TempAt(int i) FINAL { return NULL; }
 };
 
 
@@ -1955,7 +1943,7 @@ class LInvokeFunction FINAL : public LTemplateInstruction<1, 2, 0> {
   DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
   DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int arity() const { return hydrogen()->argument_count() - 1; }
 };
@@ -1991,7 +1979,7 @@ class LCallNew FINAL : public LTemplateInstruction<1, 2, 0> {
   DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
   DECLARE_HYDROGEN_ACCESSOR(CallNew)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int arity() const { return hydrogen()->argument_count() - 1; }
 };
@@ -2010,7 +1998,7 @@ class LCallNewArray FINAL : public LTemplateInstruction<1, 2, 0> {
   DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
   DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int arity() const { return hydrogen()->argument_count() - 1; }
 };
@@ -2027,7 +2015,7 @@ class LCallRuntime FINAL : public LTemplateInstruction<1, 1, 0> {
   DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
   DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
 
-  virtual bool ClobbersDoubleRegisters(Isolate* isolate) const OVERRIDE {
+  bool ClobbersDoubleRegisters(Isolate* isolate) const OVERRIDE {
     return save_doubles() == kDontSaveFPRegs;
   }
 
@@ -2219,7 +2207,7 @@ class LStoreNamedField FINAL : public LTemplateInstruction<0, 2, 2> {
   DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
   DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -2238,7 +2226,7 @@ class LStoreNamedGeneric FINAL : public LTemplateInstruction<0, 3, 0> {
   DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
   DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
   Handle<Object> name() const { return hydrogen()->name(); }
   StrictMode strict_mode() { return hydrogen()->strict_mode(); }
 };
@@ -2269,7 +2257,7 @@ class LStoreKeyed FINAL : public LTemplateInstruction<0, 3, 0> {
   DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
   DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
   uint32_t base_offset() const { return hydrogen()->base_offset(); }
   bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
 };
@@ -2295,7 +2283,7 @@ class LStoreKeyedGeneric FINAL : public LTemplateInstruction<0, 4, 0> {
   DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
   DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   StrictMode strict_mode() { return hydrogen()->strict_mode(); }
 };
@@ -2322,7 +2310,7 @@ class LTransitionElementsKind FINAL : public LTemplateInstruction<0, 2, 2> {
                                "transition-elements-kind")
   DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
   Handle<Map> transitioned_map() {
@@ -2612,15 +2600,13 @@ class LTypeofIsAndBranch FINAL : public LControlInstruction<1, 0> {
 
   Handle<String> type_literal() { return hydrogen()->type_literal(); }
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
 class LOsrEntry FINAL : public LTemplateInstruction<0, 0, 0> {
  public:
-  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
-    return false;
-  }
+  bool HasInterestingComment(LCodeGen* gen) const OVERRIDE { return false; }
   DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
 };
 
@@ -2829,7 +2815,7 @@ class LChunkBuilder FINAL : public LChunkBuilderBase {
 
   // An input operand in register, stack slot or a constant operand.
   // Will not be moved to a register even if one is freely available.
-  virtual MUST_USE_RESULT LOperand* UseAny(HValue* value) OVERRIDE;
+  MUST_USE_RESULT LOperand* UseAny(HValue* value) OVERRIDE;
 
   // Temporary operand that must be in a register.
   MUST_USE_RESULT LUnallocated* TempRegister();
index 098415e..38259d7 100644 (file)
@@ -740,16 +740,16 @@ void MacroAssembler::CheckMap(Register obj,
 }
 
 
-void MacroAssembler::DispatchMap(Register obj,
-                                 Register unused,
-                                 Handle<Map> map,
-                                 Handle<Code> success,
-                                 SmiCheckType smi_check_type) {
+void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
+                                     Register scratch2, Handle<WeakCell> cell,
+                                     Handle<Code> success,
+                                     SmiCheckType smi_check_type) {
   Label fail;
   if (smi_check_type == DO_SMI_CHECK) {
     JumpIfSmi(obj, &fail);
   }
-  cmp(FieldOperand(obj, HeapObject::kMapOffset), Immediate(map));
+  mov(scratch1, FieldOperand(obj, HeapObject::kMapOffset));
+  CmpWeakValue(scratch1, cell, scratch2);
   j(equal, success);
 
   bind(&fail);
@@ -1356,10 +1356,10 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
   }
 
   bind(&done);
-  // Check that the value is a normal propety.
+  // Check that the value is a field property.
   const int kDetailsOffset =
       SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
-  DCHECK_EQ(NORMAL, 0);
+  DCHECK_EQ(FIELD, 0);
   test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
        Immediate(PropertyDetails::TypeField::kMask << kSmiTagSize));
   j(not_zero, miss);
@@ -2580,6 +2580,21 @@ void MacroAssembler::PushHeapObject(Handle<HeapObject> object) {
 }
 
 
+void MacroAssembler::CmpWeakValue(Register value, Handle<WeakCell> cell,
+                                  Register scratch) {
+  mov(scratch, cell);
+  cmp(value, FieldOperand(scratch, WeakCell::kValueOffset));
+}
+
+
+void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
+                                   Label* miss) {
+  mov(value, cell);
+  mov(value, FieldOperand(value, WeakCell::kValueOffset));
+  JumpIfSmi(value, miss);
+}
+
+
 void MacroAssembler::Ret() {
   ret(0);
 }
@@ -3157,18 +3172,6 @@ void MacroAssembler::CheckPageFlagForMap(
 }
 
 
-void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
-                                        Register scratch,
-                                        Label* if_deprecated) {
-  if (map->CanBeDeprecated()) {
-    mov(scratch, map);
-    mov(scratch, FieldOperand(scratch, Map::kBitField3Offset));
-    and_(scratch, Immediate(Map::Deprecated::kMask));
-    j(not_zero, if_deprecated);
-  }
-}
-
-
 void MacroAssembler::JumpIfBlack(Register object,
                                  Register scratch0,
                                  Register scratch1,
index 383233b..83f6216 100644 (file)
@@ -94,10 +94,6 @@ class MacroAssembler: public Assembler {
       Label* condition_met,
       Label::Distance condition_met_distance = Label::kFar);
 
-  void CheckMapDeprecated(Handle<Map> map,
-                          Register scratch,
-                          Label* if_deprecated);
-
   // Check if object is in new space.  Jumps if the object is not in new space.
   // The register scratch can be object itself, but scratch will be clobbered.
   void JumpIfNotInNewSpace(Register object,
@@ -299,6 +295,13 @@ class MacroAssembler: public Assembler {
     }
   }
 
+  // Compare the given value and the value of weak cell.
+  void CmpWeakValue(Register value, Handle<WeakCell> cell, Register scratch);
+
+  // Load the value of the weak cell in the value register. Branch to the given
+  // miss label if the weak cell was cleared.
+  void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
+
   // ---------------------------------------------------------------------------
   // JavaScript invokes
 
@@ -408,14 +411,12 @@ class MacroAssembler: public Assembler {
                 Label* fail,
                 SmiCheckType smi_check_type);
 
-  // Check if the map of an object is equal to a specified map and branch to a
-  // specified target if equal. Skip the smi check if not required (object is
-  // known to be a heap object)
-  void DispatchMap(Register obj,
-                   Register unused,
-                   Handle<Map> map,
-                   Handle<Code> success,
-                   SmiCheckType smi_check_type);
+  // Check if the map of an object is equal to a specified weak map and branch
+  // to a specified target if equal. Skip the smi check if not required
+  // (object is known to be a heap object)
+  void DispatchWeakMap(Register obj, Register scratch1, Register scratch2,
+                       Handle<WeakCell> cell, Handle<Code> success,
+                       SmiCheckType smi_check_type);
 
   // Check if the object in register heap_object is a string. Afterwards the
   // register map contains the object map and the register instance_type
index 928b70b..fe4369a 100644 (file)
@@ -40,7 +40,10 @@ class PropertyAccessCompiler BASE_EMBEDDED {
         kind_(kind),
         cache_holder_(cache_holder),
         isolate_(isolate),
-        masm_(isolate, NULL, 256) {}
+        masm_(isolate, NULL, 256) {
+    // TODO(yangguo): remove this once we can serialize IC stubs.
+    masm_.enable_serializer();
+  }
 
   Code::Kind kind() const { return kind_; }
   CacheHolderFlag cache_holder() const { return cache_holder_; }
@@ -51,6 +54,14 @@ class PropertyAccessCompiler BASE_EMBEDDED {
 
   Register receiver() const { return registers_[0]; }
   Register name() const { return registers_[1]; }
+  Register slot() const {
+    DCHECK(FLAG_vector_ics);
+    return VectorLoadICDescriptor::SlotRegister();
+  }
+  Register vector() const {
+    DCHECK(FLAG_vector_ics);
+    return VectorLoadICDescriptor::VectorRegister();
+  }
   Register scratch1() const { return registers_[2]; }
   Register scratch2() const { return registers_[3]; }
   Register scratch3() const { return registers_[4]; }
index b29e786..9905e4e 100644 (file)
@@ -92,6 +92,28 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
 }
 
 
+void PropertyHandlerCompiler::PushVectorAndSlot(Register vector,
+                                                Register slot) {
+  MacroAssembler* masm = this->masm();
+  __ push(vector);
+  __ push(slot);
+}
+
+
+void PropertyHandlerCompiler::PopVectorAndSlot(Register vector, Register slot) {
+  MacroAssembler* masm = this->masm();
+  __ pop(slot);
+  __ pop(vector);
+}
+
+
+void PropertyHandlerCompiler::DiscardVectorAndSlot() {
+  MacroAssembler* masm = this->masm();
+  // Remove vector and slot.
+  __ add(sp, sp, Operand(2 * kPointerSize));
+}
+
+
 void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
     MacroAssembler* masm, Label* miss_label, Register receiver,
     Handle<Name> name, Register scratch0, Register scratch1) {
@@ -140,26 +162,16 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
 
 
 void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
-    MacroAssembler* masm, int index, Register prototype, Label* miss) {
-  Isolate* isolate = masm->isolate();
-  // Get the global function with the given index.
-  Handle<JSFunction> function(
-      JSFunction::cast(isolate->native_context()->get(index)));
-
-  // Check we're still in the same context.
-  Register scratch = prototype;
+    MacroAssembler* masm, int index, Register result, Label* miss) {
   const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
-  __ ldr(scratch, MemOperand(cp, offset));
-  __ ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
-  __ ldr(scratch, MemOperand(scratch, Context::SlotOffset(index)));
-  __ Move(ip, function);
-  __ cmp(ip, scratch);
-  __ b(ne, miss);
-
+  __ ldr(result, MemOperand(cp, offset));
+  __ ldr(result, FieldMemOperand(result, GlobalObject::kNativeContextOffset));
+  __ ldr(result, MemOperand(result, Context::SlotOffset(index)));
   // Load its initial map. The global functions all have initial maps.
-  __ Move(prototype, Handle<Map>(function->initial_map()));
+  __ ldr(result,
+         FieldMemOperand(result, JSFunction::kPrototypeOrInitialMapOffset));
   // Load the prototype from the initial map.
-  __ ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
+  __ ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
 }
 
 
@@ -326,18 +338,38 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label,
 }
 
 
-void NamedStoreHandlerCompiler::GenerateRestoreNameAndMap(
-    Handle<Name> name, Handle<Map> transition) {
+void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
   __ mov(this->name(), Operand(name));
-  __ mov(StoreTransitionDescriptor::MapRegister(), Operand(transition));
 }
 
 
-void NamedStoreHandlerCompiler::GenerateConstantCheck(Object* constant,
+void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
+                                                   Register scratch,
+                                                   Label* miss) {
+  Handle<WeakCell> cell = Map::WeakCellForMap(transition);
+  Register map_reg = StoreTransitionDescriptor::MapRegister();
+  DCHECK(!map_reg.is(scratch));
+  __ LoadWeakValue(map_reg, cell, miss);
+  if (transition->CanBeDeprecated()) {
+    __ ldr(scratch, FieldMemOperand(map_reg, Map::kBitField3Offset));
+    __ tst(scratch, Operand(Map::Deprecated::kMask));
+    __ b(ne, miss);
+  }
+}
+
+
+void NamedStoreHandlerCompiler::GenerateConstantCheck(Register map_reg,
+                                                      int descriptor,
                                                       Register value_reg,
+                                                      Register scratch,
                                                       Label* miss_label) {
-  __ Move(scratch1(), handle(constant, isolate()));
-  __ cmp(value_reg, scratch1());
+  DCHECK(!map_reg.is(scratch));
+  DCHECK(!map_reg.is(value_reg));
+  DCHECK(!value_reg.is(scratch));
+  __ LoadInstanceDescriptors(map_reg, scratch);
+  __ ldr(scratch,
+         FieldMemOperand(scratch, DescriptorArray::GetValueOffset(descriptor)));
+  __ cmp(value_reg, scratch);
   __ b(ne, miss_label);
 }
 
@@ -416,11 +448,11 @@ Register PropertyHandlerCompiler::CheckPrototypes(
       __ ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
     } else {
       Register map_reg = scratch1;
+      __ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
       if (depth != 1 || check == CHECK_ALL_MAPS) {
-        // CheckMap implicitly loads the map of |reg| into |map_reg|.
-        __ CheckMap(reg, map_reg, current_map, miss, DONT_DO_SMI_CHECK);
-      } else {
-        __ ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
+        Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
+        __ CmpWeakValue(map_reg, cell, scratch2);
+        __ b(ne, miss);
       }
 
       // Check access rights to the global object.  This has to happen after
@@ -438,17 +470,7 @@ Register PropertyHandlerCompiler::CheckPrototypes(
 
       reg = holder_reg;  // From now on the object will be in holder_reg.
 
-      // Two possible reasons for loading the prototype from the map:
-      // (1) Can't store references to new space in code.
-      // (2) Handler is shared for all receivers with the same prototype
-      //     map (but not necessarily the same prototype instance).
-      bool load_prototype_from_map =
-          heap()->InNewSpace(*prototype) || depth == 1;
-      if (load_prototype_from_map) {
-        __ ldr(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
-      } else {
-        __ mov(reg, Operand(prototype));
-      }
+      __ ldr(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
     }
 
     // Go to the next object in the prototype chain.
@@ -461,7 +483,10 @@ Register PropertyHandlerCompiler::CheckPrototypes(
 
   if (depth != 0 || check == CHECK_ALL_MAPS) {
     // Check the holder map.
-    __ CheckMap(reg, scratch1, current_map, miss, DONT_DO_SMI_CHECK);
+    __ ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+    Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
+    __ CmpWeakValue(scratch1, cell, scratch2);
+    __ b(ne, miss);
   }
 
   // Perform security check for access to the global object.
@@ -481,6 +506,10 @@ void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
     Label success;
     __ b(&success);
     __ bind(miss);
+    if (IC::ICUseVector(kind())) {
+      DCHECK(kind() == Code::LOAD_IC);
+      PopVectorAndSlot();
+    }
     TailCallBuiltin(masm(), MissBuiltin(kind()));
     __ bind(&success);
   }
@@ -579,6 +608,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
     } else {
       __ Push(holder_reg, this->name());
     }
+    InterceptorVectorSlotPush(holder_reg);
     // Invoke an interceptor.  Note: map checks from receiver to
     // interceptor's holder has been compiled before (see a caller
     // of this method.)
@@ -596,6 +626,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
     __ Ret();
 
     __ bind(&interceptor_failed);
+    InterceptorVectorSlotPop(holder_reg);
     __ pop(this->name());
     __ pop(holder_reg);
     if (must_preserve_receiver_reg) {
@@ -625,7 +656,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
 Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
     Handle<JSObject> object, Handle<Name> name,
     Handle<ExecutableAccessorInfo> callback) {
-  Register holder_reg = Frontend(receiver(), name);
+  Register holder_reg = Frontend(name);
 
   __ push(receiver());  // receiver
   __ push(holder_reg);
@@ -666,11 +697,15 @@ Register NamedStoreHandlerCompiler::value() {
 Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
     Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) {
   Label miss;
+  if (IC::ICUseVector(kind())) {
+    PushVectorAndSlot();
+  }
   FrontendHeader(receiver(), name, &miss);
 
   // Get the value from the cell.
   Register result = StoreDescriptor::ValueRegister();
-  __ mov(result, Operand(cell));
+  Handle<WeakCell> weak_cell = factory()->NewWeakCell(cell);
+  __ LoadWeakValue(result, weak_cell, &miss);
   __ ldr(result, FieldMemOperand(result, Cell::kValueOffset));
 
   // Check for deleted property if property can actually be deleted.
@@ -682,6 +717,9 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
 
   Counters* counters = isolate()->counters();
   __ IncrementCounter(counters->named_load_global_stub(), 1, r1, r3);
+  if (IC::ICUseVector(kind())) {
+    DiscardVectorAndSlot();
+  }
   __ Ret();
 
   FrontendFooter(name, &miss);
index 52aafca..70a5d84 100644 (file)
@@ -265,18 +265,35 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
 static const Register LoadIC_TempRegister() { return r3; }
 
 
+static void LoadIC_PushArgs(MacroAssembler* masm) {
+  Register receiver = LoadDescriptor::ReceiverRegister();
+  Register name = LoadDescriptor::NameRegister();
+  if (FLAG_vector_ics) {
+    Register slot = VectorLoadICDescriptor::SlotRegister();
+    Register vector = VectorLoadICDescriptor::VectorRegister();
+
+    __ Push(receiver, name, slot, vector);
+  } else {
+    __ Push(receiver, name);
+  }
+}
+
+
 void LoadIC::GenerateMiss(MacroAssembler* masm) {
   // The return address is in lr.
   Isolate* isolate = masm->isolate();
 
-  __ IncrementCounter(isolate->counters()->load_miss(), 1, r3, r4);
+  DCHECK(!FLAG_vector_ics ||
+         !AreAliased(r4, r5, VectorLoadICDescriptor::SlotRegister(),
+                     VectorLoadICDescriptor::VectorRegister()));
+  __ IncrementCounter(isolate->counters()->load_miss(), 1, r4, r5);
 
-  __ mov(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
-  __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
+  LoadIC_PushArgs(masm);
 
   // Perform tail call to the entry.
   ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
-  __ TailCallExternalReference(ref, 2, 1);
+  int arg_count = FLAG_vector_ics ? 4 : 2;
+  __ TailCallExternalReference(ref, arg_count, 1);
 }
 
 
@@ -403,15 +420,18 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
   // The return address is in lr.
   Isolate* isolate = masm->isolate();
 
-  __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, r3, r4);
+  DCHECK(!FLAG_vector_ics ||
+         !AreAliased(r4, r5, VectorLoadICDescriptor::SlotRegister(),
+                     VectorLoadICDescriptor::VectorRegister()));
+  __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, r4, r5);
 
-  __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
+  LoadIC_PushArgs(masm);
 
   // Perform tail call to the entry.
   ExternalReference ref =
       ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
-
-  __ TailCallExternalReference(ref, 2, 1);
+  int arg_count = FLAG_vector_ics ? 4 : 2;
+  __ TailCallExternalReference(ref, arg_count, 1);
 }
 
 
@@ -804,8 +824,8 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
   __ JumpIfNotUniqueNameInstanceType(r4, &slow);
   Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
       Code::ComputeHandlerFlags(Code::STORE_IC));
-  masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
-                                               key, r3, r4, r5, r6);
+  masm->isolate()->stub_cache()->GenerateProbe(
+      masm, Code::STORE_IC, flags, false, receiver, key, r3, r4, r5, r6);
   // Cache miss.
   __ b(&miss);
 
@@ -866,8 +886,8 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
   Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
       Code::ComputeHandlerFlags(Code::STORE_IC));
 
-  masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
-                                               name, r3, r4, r5, r6);
+  masm->isolate()->stub_cache()->GenerateProbe(
+      masm, Code::STORE_IC, flags, false, receiver, name, r3, r4, r5, r6);
 
   // Cache miss: Jump to runtime.
   GenerateMiss(masm);
index 7bef56e..b44702b 100644 (file)
@@ -41,9 +41,12 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
 
   if (check == PROPERTY &&
       (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
-    // In case we are compiling an IC for dictionary loads and stores, just
+    // In case we are compiling an IC for dictionary loads or stores, just
     // check whether the name is unique.
     if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
+      // Keyed loads with dictionaries shouldn't be here, they go generic.
+      // The DCHECK is to protect assumptions when --vector-ics is on.
+      DCHECK(kind() != Code::KEYED_LOAD_IC);
       Register tmp = scratch1();
       __ JumpIfSmi(this->name(), &miss);
       __ ldr(tmp, FieldMemOperand(this->name(), HeapObject::kMapOffset));
@@ -72,8 +75,8 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
     Handle<Map> map = IC::TypeToMap(*type, isolate());
     if (!map->is_deprecated()) {
       number_of_handled_maps++;
-      __ mov(ip, Operand(map));
-      __ cmp(map_reg, ip);
+      Handle<WeakCell> cell = Map::WeakCellForMap(map);
+      __ CmpWeakValue(map_reg, cell, scratch2());
       if (type->Is(HeapType::Number())) {
         DCHECK(!number_case.is_unused());
         __ bind(&number_case);
@@ -100,16 +103,18 @@ Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
   __ JumpIfSmi(receiver(), &miss);
 
   int receiver_count = receiver_maps->length();
-  __ ldr(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset));
+  Register map_reg = scratch1();
+  __ ldr(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
   for (int i = 0; i < receiver_count; ++i) {
-    __ mov(ip, Operand(receiver_maps->at(i)));
-    __ cmp(scratch1(), ip);
+    Handle<WeakCell> cell = Map::WeakCellForMap(receiver_maps->at(i));
+    __ CmpWeakValue(map_reg, cell, scratch2());
     if (transitioned_maps->at(i).is_null()) {
       __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq);
     } else {
       Label next_map;
       __ b(ne, &next_map);
-      __ mov(transition_map(), Operand(transitioned_maps->at(i)));
+      Handle<WeakCell> cell = Map::WeakCellForMap(transitioned_maps->at(i));
+      __ LoadWeakValue(transition_map(), cell, &miss);
       __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, al);
       __ bind(&next_map);
     }
index bc8b0fb..1d6bd30 100644 (file)
@@ -7,7 +7,9 @@
 #if V8_TARGET_ARCH_ARM
 
 #include "src/codegen.h"
+#include "src/ic/ic.h"
 #include "src/ic/stub-cache.h"
+#include "src/interface-descriptors.h"
 
 namespace v8 {
 namespace internal {
@@ -16,7 +18,7 @@ namespace internal {
 
 
 static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
-                       Code::Flags flags, bool leave_frame,
+                       Code::Kind ic_kind, Code::Flags flags, bool leave_frame,
                        StubCache::Table table, Register receiver, Register name,
                        // Number of the cache entry, not scaled.
                        Register offset, Register scratch, Register scratch2,
@@ -94,10 +96,11 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
 }
 
 
-void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
-                              bool leave_frame, Register receiver,
-                              Register name, Register scratch, Register extra,
-                              Register extra2, Register extra3) {
+void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
+                              Code::Flags flags, bool leave_frame,
+                              Register receiver, Register name,
+                              Register scratch, Register extra, Register extra2,
+                              Register extra3) {
   Isolate* isolate = masm->isolate();
   Label miss;
 
@@ -109,15 +112,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
   DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
 
   // Make sure that there are no register conflicts.
-  DCHECK(!scratch.is(receiver));
-  DCHECK(!scratch.is(name));
-  DCHECK(!extra.is(receiver));
-  DCHECK(!extra.is(name));
-  DCHECK(!extra.is(scratch));
-  DCHECK(!extra2.is(receiver));
-  DCHECK(!extra2.is(name));
-  DCHECK(!extra2.is(scratch));
-  DCHECK(!extra2.is(extra));
+  DCHECK(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
 
   // Check scratch, extra and extra2 registers are valid.
   DCHECK(!scratch.is(no_reg));
@@ -125,6 +120,17 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
   DCHECK(!extra2.is(no_reg));
   DCHECK(!extra3.is(no_reg));
 
+#ifdef DEBUG
+  // If vector-based ics are in use, ensure that scratch, extra, extra2 and
+  // extra3 don't conflict with the vector and slot registers, which need
+  // to be preserved for a handler call or miss.
+  if (IC::ICUseVector(ic_kind)) {
+    Register vector = VectorLoadICDescriptor::VectorRegister();
+    Register slot = VectorLoadICDescriptor::SlotRegister();
+    DCHECK(!AreAliased(vector, slot, scratch, extra, extra2, extra3));
+  }
+#endif
+
   Counters* counters = masm->isolate()->counters();
   __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2,
                       extra3);
@@ -147,8 +153,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
   __ and_(scratch, scratch, Operand(mask));
 
   // Probe the primary table.
-  ProbeTable(isolate, masm, flags, leave_frame, kPrimary, receiver, name,
-             scratch, extra, extra2, extra3);
+  ProbeTable(isolate, masm, ic_kind, flags, leave_frame, kPrimary, receiver,
+             name, scratch, extra, extra2, extra3);
 
   // Primary miss: Compute hash for secondary probe.
   __ sub(scratch, scratch, Operand(name, LSR, kCacheIndexShift));
@@ -157,8 +163,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
   __ and_(scratch, scratch, Operand(mask2));
 
   // Probe the secondary table.
-  ProbeTable(isolate, masm, flags, leave_frame, kSecondary, receiver, name,
-             scratch, extra, extra2, extra3);
+  ProbeTable(isolate, masm, ic_kind, flags, leave_frame, kSecondary, receiver,
+             name, scratch, extra, extra2, extra3);
 
   // Cache miss: Fall-through and let caller handle the miss by
   // entering the runtime system.
index 8cef505..1c28bf5 100644 (file)
@@ -15,6 +15,27 @@ namespace internal {
 
 #define __ ACCESS_MASM(masm)
 
+void PropertyHandlerCompiler::PushVectorAndSlot(Register vector,
+                                                Register slot) {
+  MacroAssembler* masm = this->masm();
+  __ Push(vector);
+  __ Push(slot);
+}
+
+
+void PropertyHandlerCompiler::PopVectorAndSlot(Register vector, Register slot) {
+  MacroAssembler* masm = this->masm();
+  __ Pop(slot);
+  __ Pop(vector);
+}
+
+
+void PropertyHandlerCompiler::DiscardVectorAndSlot() {
+  MacroAssembler* masm = this->masm();
+  // Remove vector and slot.
+  __ Drop(2);
+}
+
 
 void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
     MacroAssembler* masm, Label* miss_label, Register receiver,
@@ -57,24 +78,15 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
 
 
 void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
-    MacroAssembler* masm, int index, Register prototype, Label* miss) {
-  Isolate* isolate = masm->isolate();
-  // Get the global function with the given index.
-  Handle<JSFunction> function(
-      JSFunction::cast(isolate->native_context()->get(index)));
-
-  // Check we're still in the same context.
-  Register scratch = prototype;
-  __ Ldr(scratch, GlobalObjectMemOperand());
-  __ Ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
-  __ Ldr(scratch, ContextMemOperand(scratch, index));
-  __ Cmp(scratch, Operand(function));
-  __ B(ne, miss);
-
+    MacroAssembler* masm, int index, Register result, Label* miss) {
+  __ Ldr(result, GlobalObjectMemOperand());
+  __ Ldr(result, FieldMemOperand(result, GlobalObject::kNativeContextOffset));
+  __ Ldr(result, ContextMemOperand(result, index));
   // Load its initial map. The global functions all have initial maps.
-  __ Mov(prototype, Operand(Handle<Map>(function->initial_map())));
+  __ Ldr(result,
+         FieldMemOperand(result, JSFunction::kPrototypeOrInitialMapOffset));
   // Load the prototype from the initial map.
-  __ Ldr(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
+  __ Ldr(result, FieldMemOperand(result, Map::kPrototypeOffset));
 }
 
 
@@ -315,11 +327,15 @@ void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
 Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
     Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) {
   Label miss;
+  if (IC::ICUseVector(kind())) {
+    PushVectorAndSlot();
+  }
   FrontendHeader(receiver(), name, &miss);
 
   // Get the value from the cell.
   Register result = StoreDescriptor::ValueRegister();
-  __ Mov(result, Operand(cell));
+  Handle<WeakCell> weak_cell = factory()->NewWeakCell(cell);
+  __ LoadWeakValue(result, weak_cell, &miss);
   __ Ldr(result, FieldMemOperand(result, Cell::kValueOffset));
 
   // Check for deleted property if property can actually be deleted.
@@ -329,6 +345,9 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
 
   Counters* counters = isolate()->counters();
   __ IncrementCounter(counters->named_load_global_stub(), 1, x1, x3);
+  if (IC::ICUseVector(kind())) {
+    DiscardVectorAndSlot();
+  }
   __ Ret();
 
   FrontendFooter(name, &miss);
@@ -370,18 +389,37 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label,
 }
 
 
-void NamedStoreHandlerCompiler::GenerateRestoreNameAndMap(
-    Handle<Name> name, Handle<Map> transition) {
+void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
   __ Mov(this->name(), Operand(name));
-  __ Mov(StoreTransitionDescriptor::MapRegister(), Operand(transition));
 }
 
 
-void NamedStoreHandlerCompiler::GenerateConstantCheck(Object* constant,
+void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
+                                                   Register scratch,
+                                                   Label* miss) {
+  Handle<WeakCell> cell = Map::WeakCellForMap(transition);
+  Register map_reg = StoreTransitionDescriptor::MapRegister();
+  DCHECK(!map_reg.is(scratch));
+  __ LoadWeakValue(map_reg, cell, miss);
+  if (transition->CanBeDeprecated()) {
+    __ Ldrsw(scratch, FieldMemOperand(map_reg, Map::kBitField3Offset));
+    __ TestAndBranchIfAnySet(scratch, Map::Deprecated::kMask, miss);
+  }
+}
+
+
+void NamedStoreHandlerCompiler::GenerateConstantCheck(Register map_reg,
+                                                      int descriptor,
                                                       Register value_reg,
+                                                      Register scratch,
                                                       Label* miss_label) {
-  __ LoadObject(scratch1(), handle(constant, isolate()));
-  __ Cmp(value_reg, scratch1());
+  DCHECK(!map_reg.is(scratch));
+  DCHECK(!map_reg.is(value_reg));
+  DCHECK(!value_reg.is(scratch));
+  __ LoadInstanceDescriptors(map_reg, scratch);
+  __ Ldr(scratch,
+         FieldMemOperand(scratch, DescriptorArray::GetValueOffset(descriptor)));
+  __ Cmp(value_reg, scratch);
   __ B(ne, miss_label);
 }
 
@@ -457,17 +495,13 @@ Register PropertyHandlerCompiler::CheckPrototypes(
       reg = holder_reg;  // From now on the object will be in holder_reg.
       __ Ldr(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
     } else {
-      // Two possible reasons for loading the prototype from the map:
-      // (1) Can't store references to new space in code.
-      // (2) Handler is shared for all receivers with the same prototype
-      //     map (but not necessarily the same prototype instance).
-      bool load_prototype_from_map =
-          heap()->InNewSpace(*prototype) || depth == 1;
       Register map_reg = scratch1;
       __ Ldr(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
 
       if (depth != 1 || check == CHECK_ALL_MAPS) {
-        __ CheckMap(map_reg, current_map, miss, DONT_DO_SMI_CHECK);
+        Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
+        __ CmpWeakValue(map_reg, cell, scratch2);
+        __ B(ne, miss);
       }
 
       // Check access rights to the global object.  This has to happen after
@@ -486,11 +520,7 @@ Register PropertyHandlerCompiler::CheckPrototypes(
 
       reg = holder_reg;  // From now on the object will be in holder_reg.
 
-      if (load_prototype_from_map) {
-        __ Ldr(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
-      } else {
-        __ Mov(reg, Operand(prototype));
-      }
+      __ Ldr(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
     }
 
     // Go to the next object in the prototype chain.
@@ -504,7 +534,10 @@ Register PropertyHandlerCompiler::CheckPrototypes(
   // Check the holder map.
   if (depth != 0 || check == CHECK_ALL_MAPS) {
     // Check the holder map.
-    __ CheckMap(reg, scratch1, current_map, miss, DONT_DO_SMI_CHECK);
+    __ Ldr(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+    Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
+    __ CmpWeakValue(scratch1, cell, scratch2);
+    __ B(ne, miss);
   }
 
   // Perform security check for access to the global object.
@@ -525,6 +558,10 @@ void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
     __ B(&success);
 
     __ Bind(miss);
+    if (IC::ICUseVector(kind())) {
+      DCHECK(kind() == Code::LOAD_IC);
+      PopVectorAndSlot();
+    }
     TailCallBuiltin(masm(), MissBuiltin(kind()));
 
     __ Bind(&success);
@@ -637,6 +674,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
     } else {
       __ Push(holder_reg, this->name());
     }
+    InterceptorVectorSlotPush(holder_reg);
     // Invoke an interceptor.  Note: map checks from receiver to
     // interceptor's holder has been compiled before (see a caller
     // of this method.)
@@ -653,6 +691,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
     __ Ret();
 
     __ Bind(&interceptor_failed);
+    InterceptorVectorSlotPop(holder_reg);
     if (must_preserve_receiver_reg) {
       __ Pop(this->name(), holder_reg, receiver());
     } else {
@@ -683,7 +722,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
     Handle<JSObject> object, Handle<Name> name,
     Handle<ExecutableAccessorInfo> callback) {
   ASM_LOCATION("NamedStoreHandlerCompiler::CompileStoreCallback");
-  Register holder_reg = Frontend(receiver(), name);
+  Register holder_reg = Frontend(name);
 
   // Stub never generated for non-global objects that require access checks.
   DCHECK(holder()->IsJSGlobalProxy() || !holder()->IsAccessCheckNeeded());
index 4804a23..a01015c 100644 (file)
@@ -354,12 +354,23 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
   Isolate* isolate = masm->isolate();
   ASM_LOCATION("LoadIC::GenerateMiss");
 
-  __ IncrementCounter(isolate->counters()->load_miss(), 1, x3, x4);
+  DCHECK(!FLAG_vector_ics ||
+         !AreAliased(x4, x5, VectorLoadICDescriptor::SlotRegister(),
+                     VectorLoadICDescriptor::VectorRegister()));
+  __ IncrementCounter(isolate->counters()->load_miss(), 1, x4, x5);
 
   // Perform tail call to the entry.
-  __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
+  if (FLAG_vector_ics) {
+    __ Push(VectorLoadICDescriptor::ReceiverRegister(),
+            VectorLoadICDescriptor::NameRegister(),
+            VectorLoadICDescriptor::SlotRegister(),
+            VectorLoadICDescriptor::VectorRegister());
+  } else {
+    __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
+  }
   ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
-  __ TailCallExternalReference(ref, 2, 1);
+  int arg_count = FLAG_vector_ics ? 4 : 2;
+  __ TailCallExternalReference(ref, arg_count, 1);
 }
 
 
@@ -420,15 +431,25 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
   // The return address is in lr.
   Isolate* isolate = masm->isolate();
 
+  DCHECK(!FLAG_vector_ics ||
+         !AreAliased(x10, x11, VectorLoadICDescriptor::SlotRegister(),
+                     VectorLoadICDescriptor::VectorRegister()));
   __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, x10, x11);
 
-  __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
+  if (FLAG_vector_ics) {
+    __ Push(VectorLoadICDescriptor::ReceiverRegister(),
+            VectorLoadICDescriptor::NameRegister(),
+            VectorLoadICDescriptor::SlotRegister(),
+            VectorLoadICDescriptor::VectorRegister());
+  } else {
+    __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
+  }
 
   // Perform tail call to the entry.
   ExternalReference ref =
       ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
-
-  __ TailCallExternalReference(ref, 2, 1);
+  int arg_count = FLAG_vector_ics ? 4 : 2;
+  __ TailCallExternalReference(ref, arg_count, 1);
 }
 
 
@@ -837,8 +858,8 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
   __ JumpIfNotUniqueNameInstanceType(x10, &slow);
   Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
       Code::ComputeHandlerFlags(Code::STORE_IC));
-  masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
-                                               key, x3, x4, x5, x6);
+  masm->isolate()->stub_cache()->GenerateProbe(
+      masm, Code::STORE_IC, flags, false, receiver, key, x3, x4, x5, x6);
   // Cache miss.
   __ B(&miss);
 
@@ -897,8 +918,8 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
   // Probe the stub cache.
   Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
       Code::ComputeHandlerFlags(Code::STORE_IC));
-  masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
-                                               name, x3, x4, x5, x6);
+  masm->isolate()->stub_cache()->GenerateProbe(
+      masm, Code::STORE_IC, flags, false, receiver, name, x3, x4, x5, x6);
 
   // Cache miss: Jump to runtime.
   GenerateMiss(masm);
index ffc1069..a3d0d48 100644 (file)
@@ -42,9 +42,12 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
 
   if (check == PROPERTY &&
       (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
-    // In case we are compiling an IC for dictionary loads and stores, just
+    // In case we are compiling an IC for dictionary loads or stores, just
     // check whether the name is unique.
     if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
+      // Keyed loads with dictionaries shouldn't be here, they go generic.
+      // The DCHECK is to protect assumptions when --vector-ics is on.
+      DCHECK(kind() != Code::KEYED_LOAD_IC);
       Register tmp = scratch1();
       __ JumpIfSmi(this->name(), &miss);
       __ Ldr(tmp, FieldMemOperand(this->name(), HeapObject::kMapOffset));
@@ -71,8 +74,9 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
     Handle<Map> map = IC::TypeToMap(*type, isolate());
     if (!map->is_deprecated()) {
       number_of_handled_maps++;
+      Handle<WeakCell> cell = Map::WeakCellForMap(map);
+      __ CmpWeakValue(map_reg, cell, scratch2());
       Label try_next;
-      __ Cmp(map_reg, Operand(map));
       __ B(ne, &try_next);
       if (type->Is(HeapType::Number())) {
         DCHECK(!number_case.is_unused());
@@ -104,16 +108,18 @@ Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
   __ JumpIfSmi(receiver(), &miss);
 
   int receiver_count = receiver_maps->length();
-  __ Ldr(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset));
+  Register map_reg = scratch1();
+  __ Ldr(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
   for (int i = 0; i < receiver_count; i++) {
-    __ Cmp(scratch1(), Operand(receiver_maps->at(i)));
-
+    Handle<WeakCell> cell = Map::WeakCellForMap(receiver_maps->at(i));
+    __ CmpWeakValue(map_reg, cell, scratch2());
     Label skip;
     __ B(&skip, ne);
     if (!transitioned_maps->at(i).is_null()) {
       // This argument is used by the handler stub. For example, see
       // ElementsTransitionGenerator::GenerateMapChangeElementsTransition.
-      __ Mov(transition_map(), Operand(transitioned_maps->at(i)));
+      Handle<WeakCell> cell = Map::WeakCellForMap(transitioned_maps->at(i));
+      __ LoadWeakValue(transition_map(), cell, &miss);
     }
     __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET);
     __ Bind(&skip);
index 4d31d49..a9c56a3 100644 (file)
@@ -7,7 +7,9 @@
 #if V8_TARGET_ARCH_ARM64
 
 #include "src/codegen.h"
+#include "src/ic/ic.h"
 #include "src/ic/stub-cache.h"
+#include "src/interface-descriptors.h"
 
 namespace v8 {
 namespace internal {
@@ -23,7 +25,7 @@ namespace internal {
 //
 // 'receiver', 'name' and 'offset' registers are preserved on miss.
 static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
-                       Code::Flags flags, bool leave_frame,
+                       Code::Kind ic_kind, Code::Flags flags, bool leave_frame,
                        StubCache::Table table, Register receiver, Register name,
                        Register offset, Register scratch, Register scratch2,
                        Register scratch3) {
@@ -90,10 +92,11 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
 }
 
 
-void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
-                              bool leave_frame, Register receiver,
-                              Register name, Register scratch, Register extra,
-                              Register extra2, Register extra3) {
+void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
+                              Code::Flags flags, bool leave_frame,
+                              Register receiver, Register name,
+                              Register scratch, Register extra, Register extra2,
+                              Register extra3) {
   Isolate* isolate = masm->isolate();
   Label miss;
 
@@ -108,6 +111,17 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
   DCHECK(!extra2.is(no_reg));
   DCHECK(!extra3.is(no_reg));
 
+#ifdef DEBUG
+  // If vector-based ics are in use, ensure that scratch, extra, extra2 and
+  // extra3 don't conflict with the vector and slot registers, which need
+  // to be preserved for a handler call or miss.
+  if (IC::ICUseVector(ic_kind)) {
+    Register vector = VectorLoadICDescriptor::VectorRegister();
+    Register slot = VectorLoadICDescriptor::SlotRegister();
+    DCHECK(!AreAliased(vector, slot, scratch, extra, extra2, extra3));
+  }
+#endif
+
   Counters* counters = masm->isolate()->counters();
   __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2,
                       extra3);
@@ -125,8 +139,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
           CountTrailingZeros(kPrimaryTableSize, 64));
 
   // Probe the primary table.
-  ProbeTable(isolate, masm, flags, leave_frame, kPrimary, receiver, name,
-             scratch, extra, extra2, extra3);
+  ProbeTable(isolate, masm, ic_kind, flags, leave_frame, kPrimary, receiver,
+             name, scratch, extra, extra2, extra3);
 
   // Primary miss: Compute hash for secondary table.
   __ Sub(scratch, scratch, Operand(name, LSR, kCacheIndexShift));
@@ -134,8 +148,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
   __ And(scratch, scratch, kSecondaryTableSize - 1);
 
   // Probe the secondary table.
-  ProbeTable(isolate, masm, flags, leave_frame, kSecondary, receiver, name,
-             scratch, extra, extra2, extra3);
+  ProbeTable(isolate, masm, ic_kind, flags, leave_frame, kSecondary, receiver,
+             name, scratch, extra, extra2, extra3);
 
   // Cache miss: Fall-through and let caller handle the miss by
   // entering the runtime system.
index 7f440c0..ae977c3 100644 (file)
@@ -129,11 +129,17 @@ Register NamedStoreHandlerCompiler::FrontendHeader(Register object_reg,
 }
 
 
-Register PropertyHandlerCompiler::Frontend(Register object_reg,
-                                           Handle<Name> name) {
+Register PropertyHandlerCompiler::Frontend(Handle<Name> name) {
   Label miss;
-  Register reg = FrontendHeader(object_reg, name, &miss);
+  if (IC::ICUseVector(kind())) {
+    PushVectorAndSlot();
+  }
+  Register reg = FrontendHeader(receiver(), name, &miss);
   FrontendFooter(name, &miss);
+  // The footer consumes the vector and slot from the stack if miss occurs.
+  if (IC::ICUseVector(kind())) {
+    DiscardVectorAndSlot();
+  }
   return reg;
 }
 
@@ -179,7 +185,7 @@ void PropertyHandlerCompiler::NonexistentFrontendHeader(Handle<Name> name,
 
 Handle<Code> NamedLoadHandlerCompiler::CompileLoadField(Handle<Name> name,
                                                         FieldIndex field) {
-  Register reg = Frontend(receiver(), name);
+  Register reg = Frontend(name);
   __ Move(receiver(), reg);
   LoadFieldStub stub(isolate(), field);
   GenerateTailCall(masm(), stub.GetCode());
@@ -189,7 +195,7 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadField(Handle<Name> name,
 
 Handle<Code> NamedLoadHandlerCompiler::CompileLoadConstant(Handle<Name> name,
                                                            int constant_index) {
-  Register reg = Frontend(receiver(), name);
+  Register reg = Frontend(name);
   __ Move(receiver(), reg);
   LoadConstantStub stub(isolate(), constant_index);
   GenerateTailCall(masm(), stub.GetCode());
@@ -200,7 +206,14 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadConstant(Handle<Name> name,
 Handle<Code> NamedLoadHandlerCompiler::CompileLoadNonexistent(
     Handle<Name> name) {
   Label miss;
+  if (IC::ICUseVector(kind())) {
+    DCHECK(kind() == Code::LOAD_IC);
+    PushVectorAndSlot();
+  }
   NonexistentFrontendHeader(name, &miss, scratch2(), scratch3());
+  if (IC::ICUseVector(kind())) {
+    DiscardVectorAndSlot();
+  }
   GenerateLoadConstant(isolate()->factory()->undefined_value());
   FrontendFooter(name, &miss);
   return GetCode(kind(), Code::FAST, name);
@@ -209,7 +222,7 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadNonexistent(
 
 Handle<Code> NamedLoadHandlerCompiler::CompileLoadCallback(
     Handle<Name> name, Handle<ExecutableAccessorInfo> callback) {
-  Register reg = Frontend(receiver(), name);
+  Register reg = Frontend(name);
   GenerateLoadCallback(reg, callback);
   return GetCode(kind(), Code::FAST, name);
 }
@@ -218,7 +231,7 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadCallback(
 Handle<Code> NamedLoadHandlerCompiler::CompileLoadCallback(
     Handle<Name> name, const CallOptimization& call_optimization) {
   DCHECK(call_optimization.is_simple_api_call());
-  Frontend(receiver(), name);
+  Frontend(name);
   Handle<Map> receiver_map = IC::TypeToMap(*type(), isolate());
   GenerateFastApiCall(masm(), call_optimization, receiver_map, receiver(),
                       scratch1(), false, 0, NULL);
@@ -226,6 +239,35 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadCallback(
 }
 
 
+void NamedLoadHandlerCompiler::InterceptorVectorSlotPush(Register holder_reg) {
+  if (IC::ICUseVector(kind())) {
+    if (holder_reg.is(receiver())) {
+      PushVectorAndSlot();
+    } else {
+      DCHECK(holder_reg.is(scratch1()));
+      PushVectorAndSlot(scratch2(), scratch3());
+    }
+  }
+}
+
+
+void NamedLoadHandlerCompiler::InterceptorVectorSlotPop(Register holder_reg,
+                                                        PopMode mode) {
+  if (IC::ICUseVector(kind())) {
+    if (mode == DISCARD) {
+      DiscardVectorAndSlot();
+    } else {
+      if (holder_reg.is(receiver())) {
+        PopVectorAndSlot();
+      } else {
+        DCHECK(holder_reg.is(scratch1()));
+        PopVectorAndSlot(scratch2(), scratch3());
+      }
+    }
+  }
+}
+
+
 Handle<Code> NamedLoadHandlerCompiler::CompileLoadInterceptor(
     LookupIterator* it) {
   // So far the most popular follow ups for interceptor loads are FIELD and
@@ -241,7 +283,8 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadInterceptor(
     case LookupIterator::NOT_FOUND:
       break;
     case LookupIterator::DATA:
-      inline_followup = it->property_details().type() == FIELD;
+      inline_followup =
+          it->property_details().type() == FIELD && !it->is_dictionary_holder();
       break;
     case LookupIterator::ACCESSOR: {
       Handle<Object> accessors = it->GetAccessors();
@@ -255,7 +298,12 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadInterceptor(
     }
   }
 
-  Register reg = Frontend(receiver(), it->name());
+  Label miss;
+  InterceptorVectorSlotPush(receiver());
+  Register reg = FrontendHeader(receiver(), it->name(), &miss);
+  FrontendFooter(it->name(), &miss);
+  InterceptorVectorSlotPop(reg);
+
   if (inline_followup) {
     // TODO(368): Compile in the whole chain: all the interceptors in
     // prototypes and ultimate answer.
@@ -273,7 +321,13 @@ void NamedLoadHandlerCompiler::GenerateLoadPostInterceptor(
 
   set_type_for_object(holder());
   set_holder(real_named_property_holder);
-  Register reg = Frontend(interceptor_reg, it->name());
+
+  Label miss;
+  InterceptorVectorSlotPush(interceptor_reg);
+  Register reg = FrontendHeader(interceptor_reg, it->name(), &miss);
+  FrontendFooter(it->name(), &miss);
+  // We discard the vector and slot now because we don't miss below this point.
+  InterceptorVectorSlotPop(reg, DISCARD);
 
   switch (it->state()) {
     case LookupIterator::ACCESS_CHECK:
@@ -300,7 +354,7 @@ void NamedLoadHandlerCompiler::GenerateLoadPostInterceptor(
 
 Handle<Code> NamedLoadHandlerCompiler::CompileLoadViaGetter(
     Handle<Name> name, Handle<JSFunction> getter) {
-  Frontend(receiver(), name);
+  Frontend(name);
   GenerateLoadViaGetter(masm(), type(), receiver(), getter);
   return GetCode(kind(), Code::FAST, name);
 }
@@ -311,9 +365,6 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreTransition(
     Handle<Map> transition, Handle<Name> name) {
   Label miss;
 
-  // Ensure no transitions to deprecated maps are followed.
-  __ CheckMapDeprecated(transition, scratch1(), &miss);
-
   // Check that we are allowed to write this.
   bool is_nonexistent = holder()->map() == transition->GetBackPointer();
   if (is_nonexistent) {
@@ -332,7 +383,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreTransition(
   }
 
   int descriptor = transition->LastAdded();
-  DescriptorArray* descriptors = transition->instance_descriptors();
+  Handle<DescriptorArray> descriptors(transition->instance_descriptors());
   PropertyDetails details = descriptors->GetDetails(descriptor);
   Representation representation = details.representation();
   DCHECK(!representation.IsNone());
@@ -342,9 +393,11 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreTransition(
 
   // Call to respective StoreTransitionStub.
   if (details.type() == CONSTANT) {
-    GenerateConstantCheck(descriptors->GetValue(descriptor), value(), &miss);
-
-    GenerateRestoreNameAndMap(name, transition);
+    GenerateRestoreMap(transition, scratch2(), &miss);
+    DCHECK(descriptors->GetValue(descriptor)->IsJSFunction());
+    Register map_reg = StoreTransitionDescriptor::MapRegister();
+    GenerateConstantCheck(map_reg, descriptor, value(), scratch2(), &miss);
+    GenerateRestoreName(name);
     StoreTransitionStub stub(isolate());
     GenerateTailCall(masm(), stub.GetCode());
 
@@ -358,7 +411,8 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreTransition(
             ? StoreTransitionStub::ExtendStorageAndStoreMapAndValue
             : StoreTransitionStub::StoreMapAndValue;
 
-    GenerateRestoreNameAndMap(name, transition);
+    GenerateRestoreMap(transition, scratch2(), &miss);
+    GenerateRestoreName(name);
     StoreTransitionStub stub(isolate(),
                              FieldIndex::ForDescriptor(*transition, descriptor),
                              representation, store_mode);
@@ -388,7 +442,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreField(LookupIterator* it) {
 
 Handle<Code> NamedStoreHandlerCompiler::CompileStoreViaSetter(
     Handle<JSObject> object, Handle<Name> name, Handle<JSFunction> setter) {
-  Frontend(receiver(), name);
+  Frontend(name);
   GenerateStoreViaSetter(masm(), type(), receiver(), setter);
 
   return GetCode(kind(), Code::FAST, name);
@@ -398,7 +452,7 @@ Handle<Code> NamedStoreHandlerCompiler::CompileStoreViaSetter(
 Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
     Handle<JSObject> object, Handle<Name> name,
     const CallOptimization& call_optimization) {
-  Frontend(receiver(), name);
+  Frontend(name);
   Register values[] = {value()};
   GenerateFastApiCall(masm(), call_optimization, handle(object->map()),
                       receiver(), scratch1(), true, 1, values);
index 4fedd4e..bed6577 100644 (file)
@@ -38,10 +38,21 @@ class PropertyHandlerCompiler : public PropertyAccessCompiler {
 
   virtual void FrontendFooter(Handle<Name> name, Label* miss) { UNREACHABLE(); }
 
-  Register Frontend(Register object_reg, Handle<Name> name);
+  // Frontend loads from receiver(), returns holder register which may be
+  // different.
+  Register Frontend(Handle<Name> name);
   void NonexistentFrontendHeader(Handle<Name> name, Label* miss,
                                  Register scratch1, Register scratch2);
 
+  // When FLAG_vector_ics is true, handlers that have the possibility of missing
+  // will need to save and pass these to miss handlers.
+  void PushVectorAndSlot() { PushVectorAndSlot(vector(), slot()); }
+  void PushVectorAndSlot(Register vector, Register slot);
+  void PopVectorAndSlot() { PopVectorAndSlot(vector(), slot()); }
+  void PopVectorAndSlot(Register vector, Register slot);
+
+  void DiscardVectorAndSlot();
+
   // TODO(verwaest): Make non-static.
   static void GenerateFastApiCall(MacroAssembler* masm,
                                   const CallOptimization& optimization,
@@ -170,6 +181,12 @@ class NamedLoadHandlerCompiler : public PropertyHandlerCompiler {
                             Handle<ExecutableAccessorInfo> callback);
   void GenerateLoadCallback(const CallOptimization& call_optimization,
                             Handle<Map> receiver_map);
+
+  // Helper emits no code if vector-ics are disabled.
+  void InterceptorVectorSlotPush(Register holder_reg);
+  enum PopMode { POP, DISCARD };
+  void InterceptorVectorSlotPop(Register holder_reg, PopMode mode = POP);
+
   void GenerateLoadInterceptor(Register holder_reg);
   void GenerateLoadInterceptorWithFollowup(LookupIterator* it,
                                            Register holder_reg);
@@ -230,9 +247,12 @@ class NamedStoreHandlerCompiler : public PropertyHandlerCompiler {
   void GenerateRestoreName(Label* label, Handle<Name> name);
 
  private:
-  void GenerateRestoreNameAndMap(Handle<Name> name, Handle<Map> transition);
+  void GenerateRestoreName(Handle<Name> name);
+  void GenerateRestoreMap(Handle<Map> transition, Register scratch,
+                          Label* miss);
 
-  void GenerateConstantCheck(Object* constant, Register value_reg,
+  void GenerateConstantCheck(Register map_reg, int descriptor,
+                             Register value_reg, Register scratch,
                              Label* miss_label);
 
   void GenerateFieldTypeChecks(HeapType* field_type, Register value_reg,
index 3df2140..90512e9 100644 (file)
@@ -47,6 +47,28 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
 }
 
 
+void PropertyHandlerCompiler::PushVectorAndSlot(Register vector,
+                                                Register slot) {
+  MacroAssembler* masm = this->masm();
+  __ push(vector);
+  __ push(slot);
+}
+
+
+void PropertyHandlerCompiler::PopVectorAndSlot(Register vector, Register slot) {
+  MacroAssembler* masm = this->masm();
+  __ pop(slot);
+  __ pop(vector);
+}
+
+
+void PropertyHandlerCompiler::DiscardVectorAndSlot() {
+  MacroAssembler* masm = this->masm();
+  // Remove vector and slot.
+  __ add(esp, Immediate(2 * kPointerSize));
+}
+
+
 void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
     MacroAssembler* masm, Label* miss_label, Register receiver,
     Handle<Name> name, Register scratch0, Register scratch1) {
@@ -88,28 +110,23 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
 
 
 void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
-    MacroAssembler* masm, int index, Register prototype, Label* miss) {
-  // Get the global function with the given index.
-  Handle<JSFunction> function(
-      JSFunction::cast(masm->isolate()->native_context()->get(index)));
-  // Check we're still in the same context.
-  Register scratch = prototype;
+    MacroAssembler* masm, int index, Register result, Label* miss) {
   const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
-  __ mov(scratch, Operand(esi, offset));
-  __ mov(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
-  __ cmp(Operand(scratch, Context::SlotOffset(index)), function);
-  __ j(not_equal, miss);
-
+  __ mov(result, Operand(esi, offset));
+  __ mov(result, FieldOperand(result, GlobalObject::kNativeContextOffset));
+  __ mov(result, Operand(result, Context::SlotOffset(index)));
   // Load its initial map. The global functions all have initial maps.
-  __ Move(prototype, Immediate(Handle<Map>(function->initial_map())));
+  __ mov(result,
+         FieldOperand(result, JSFunction::kPrototypeOrInitialMapOffset));
   // Load the prototype from the initial map.
-  __ mov(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
+  __ mov(result, FieldOperand(result, Map::kPrototypeOffset));
 }
 
 
 void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
     MacroAssembler* masm, Register receiver, Register scratch1,
     Register scratch2, Label* miss_label) {
+  DCHECK(!FLAG_vector_ics);
   __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
   __ mov(eax, scratch1);
   __ ret(0);
@@ -329,17 +346,38 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label,
 }
 
 
-void NamedStoreHandlerCompiler::GenerateRestoreNameAndMap(
-    Handle<Name> name, Handle<Map> transition) {
+void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
   __ mov(this->name(), Immediate(name));
-  __ mov(StoreTransitionDescriptor::MapRegister(), Immediate(transition));
 }
 
 
-void NamedStoreHandlerCompiler::GenerateConstantCheck(Object* constant,
+void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
+                                                   Register scratch,
+                                                   Label* miss) {
+  Handle<WeakCell> cell = Map::WeakCellForMap(transition);
+  Register map_reg = StoreTransitionDescriptor::MapRegister();
+  DCHECK(!map_reg.is(scratch));
+  __ LoadWeakValue(map_reg, cell, miss);
+  if (transition->CanBeDeprecated()) {
+    __ mov(scratch, FieldOperand(map_reg, Map::kBitField3Offset));
+    __ and_(scratch, Immediate(Map::Deprecated::kMask));
+    __ j(not_zero, miss);
+  }
+}
+
+
+void NamedStoreHandlerCompiler::GenerateConstantCheck(Register map_reg,
+                                                      int descriptor,
                                                       Register value_reg,
+                                                      Register scratch,
                                                       Label* miss_label) {
-  __ CmpObject(value_reg, handle(constant, isolate()));
+  DCHECK(!map_reg.is(scratch));
+  DCHECK(!map_reg.is(value_reg));
+  DCHECK(!value_reg.is(scratch));
+  __ LoadInstanceDescriptors(map_reg, scratch);
+  __ mov(scratch,
+         FieldOperand(scratch, DescriptorArray::GetValueOffset(descriptor)));
+  __ cmp(value_reg, scratch);
   __ j(not_equal, miss_label);
 }
 
@@ -415,14 +453,12 @@ Register PropertyHandlerCompiler::CheckPrototypes(
       reg = holder_reg;  // From now on the object will be in holder_reg.
       __ mov(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
     } else {
-      bool in_new_space = heap()->InNewSpace(*prototype);
-      // Two possible reasons for loading the prototype from the map:
-      // (1) Can't store references to new space in code.
-      // (2) Handler is shared for all receivers with the same prototype
-      //     map (but not necessarily the same prototype instance).
-      bool load_prototype_from_map = in_new_space || depth == 1;
+      Register map_reg = scratch1;
+      __ mov(map_reg, FieldOperand(reg, HeapObject::kMapOffset));
       if (depth != 1 || check == CHECK_ALL_MAPS) {
-        __ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK);
+        Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
+        __ CmpWeakValue(map_reg, cell, scratch2);
+        __ j(not_equal, miss);
       }
 
       // Check access rights to the global object.  This has to happen after
@@ -432,24 +468,15 @@ Register PropertyHandlerCompiler::CheckPrototypes(
       // global proxy (as opposed to using slow ICs). See corresponding code
       // in LookupForRead().
       if (current_map->IsJSGlobalProxyMap()) {
-        __ CheckAccessGlobalProxy(reg, scratch1, scratch2, miss);
+        __ CheckAccessGlobalProxy(reg, map_reg, scratch2, miss);
+        // Restore map_reg.
+        __ mov(map_reg, FieldOperand(reg, HeapObject::kMapOffset));
       } else if (current_map->IsJSGlobalObjectMap()) {
         GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
                                   name, scratch2, miss);
       }
-
-      if (load_prototype_from_map) {
-        // Save the map in scratch1 for later.
-        __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
-      }
-
       reg = holder_reg;  // From now on the object will be in holder_reg.
-
-      if (load_prototype_from_map) {
-        __ mov(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
-      } else {
-        __ mov(reg, prototype);
-      }
+      __ mov(reg, FieldOperand(map_reg, Map::kPrototypeOffset));
     }
 
     // Go to the next object in the prototype chain.
@@ -462,7 +489,10 @@ Register PropertyHandlerCompiler::CheckPrototypes(
 
   if (depth != 0 || check == CHECK_ALL_MAPS) {
     // Check the holder map.
-    __ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK);
+    __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
+    Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
+    __ CmpWeakValue(scratch1, cell, scratch2);
+    __ j(not_equal, miss);
   }
 
   // Perform security check for access to the global object.
@@ -482,6 +512,10 @@ void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
     Label success;
     __ jmp(&success);
     __ bind(miss);
+    if (IC::ICUseVector(kind())) {
+      DCHECK(kind() == Code::LOAD_IC);
+      PopVectorAndSlot();
+    }
     TailCallBuiltin(masm(), MissBuiltin(kind()));
     __ bind(&success);
   }
@@ -581,7 +615,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
     }
     __ push(holder_reg);
     __ push(this->name());
-
+    InterceptorVectorSlotPush(holder_reg);
     // Invoke an interceptor.  Note: map checks from receiver to
     // interceptor's holder has been compiled before (see a caller
     // of this method.)
@@ -605,6 +639,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
       __ mov(this->name(), Immediate(bit_cast<int32_t>(kZapValue)));
     }
 
+    InterceptorVectorSlotPop(holder_reg);
     __ pop(this->name());
     __ pop(holder_reg);
     if (must_preserve_receiver_reg) {
@@ -637,7 +672,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
 Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
     Handle<JSObject> object, Handle<Name> name,
     Handle<ExecutableAccessorInfo> callback) {
-  Register holder_reg = Frontend(receiver(), name);
+  Register holder_reg = Frontend(name);
 
   __ pop(scratch1());  // remove the return address
   __ push(receiver());
@@ -683,16 +718,15 @@ Register NamedStoreHandlerCompiler::value() {
 Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
     Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) {
   Label miss;
-
+  if (IC::ICUseVector(kind())) {
+    PushVectorAndSlot();
+  }
   FrontendHeader(receiver(), name, &miss);
   // Get the value from the cell.
   Register result = StoreDescriptor::ValueRegister();
-  if (masm()->serializer_enabled()) {
-    __ mov(result, Immediate(cell));
-    __ mov(result, FieldOperand(result, PropertyCell::kValueOffset));
-  } else {
-    __ mov(result, Operand::ForCell(cell));
-  }
+  Handle<WeakCell> weak_cell = factory()->NewWeakCell(cell);
+  __ LoadWeakValue(result, weak_cell, &miss);
+  __ mov(result, FieldOperand(result, PropertyCell::kValueOffset));
 
   // Check for deleted property if property can actually be deleted.
   if (is_configurable) {
@@ -706,6 +740,9 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
   Counters* counters = isolate()->counters();
   __ IncrementCounter(counters->named_load_global_stub(), 1);
   // The code above already loads the result into the return register.
+  if (IC::ICUseVector(kind())) {
+    DiscardVectorAndSlot();
+  }
   __ ret(0);
 
   FrontendFooter(name, &miss);
index ac42f30..f43b641 100644 (file)
@@ -44,10 +44,13 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
   Label miss;
 
   if (check == PROPERTY &&
-      (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
-    // In case we are compiling an IC for dictionary loads and stores, just
+      (kind() == Code::KEYED_STORE_IC || kind() == Code::KEYED_LOAD_IC)) {
+    // In case we are compiling an IC for dictionary loads or stores, just
     // check whether the name is unique.
     if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
+      // Keyed loads with dictionaries shouldn't be here, they go generic.
+      // The DCHECK is to protect assumptions when --vector-ics is on.
+      DCHECK(kind() != Code::KEYED_LOAD_IC);
       Register tmp = scratch1();
       __ JumpIfSmi(this->name(), &miss);
       __ mov(tmp, FieldOperand(this->name(), HeapObject::kMapOffset));
@@ -75,7 +78,8 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
     Handle<Map> map = IC::TypeToMap(*type, isolate());
     if (!map->is_deprecated()) {
       number_of_handled_maps++;
-      __ cmp(map_reg, map);
+      Handle<WeakCell> cell = Map::WeakCellForMap(map);
+      __ CmpWeakValue(map_reg, cell, scratch2());
       if (type->Is(HeapType::Number())) {
         DCHECK(!number_case.is_unused());
         __ bind(&number_case);
@@ -99,16 +103,19 @@ Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
     MapHandleList* receiver_maps, CodeHandleList* handler_stubs,
     MapHandleList* transitioned_maps) {
   Label miss;
-  __ JumpIfSmi(receiver(), &miss, Label::kNear);
-  __ mov(scratch1(), FieldOperand(receiver(), HeapObject::kMapOffset));
+  __ JumpIfSmi(receiver(), &miss);
+  Register map_reg = scratch1();
+  __ mov(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
   for (int i = 0; i < receiver_maps->length(); ++i) {
-    __ cmp(scratch1(), receiver_maps->at(i));
+    Handle<WeakCell> cell = Map::WeakCellForMap(receiver_maps->at(i));
+    __ CmpWeakValue(map_reg, cell, scratch2());
     if (transitioned_maps->at(i).is_null()) {
       __ j(equal, handler_stubs->at(i));
     } else {
       Label next_map;
       __ j(not_equal, &next_map, Label::kNear);
-      __ mov(transition_map(), Immediate(transitioned_maps->at(i)));
+      Handle<WeakCell> cell = Map::WeakCellForMap(transitioned_maps->at(i));
+      __ LoadWeakValue(transition_map(), cell, &miss);
       __ jmp(handler_stubs->at(i), RelocInfo::CODE_TARGET);
       __ bind(&next_map);
     }
index a622ba4..9822f26 100644 (file)
@@ -692,8 +692,8 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
   __ JumpIfNotUniqueNameInstanceType(ebx, &slow);
   Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
       Code::ComputeHandlerFlags(Code::STORE_IC));
-  masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
-                                               key, ebx, no_reg);
+  masm->isolate()->stub_cache()->GenerateProbe(
+      masm, Code::STORE_IC, flags, false, receiver, key, ebx, no_reg);
   // Cache miss.
   __ jmp(&miss);
 
@@ -767,31 +767,52 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
 static void LoadIC_PushArgs(MacroAssembler* masm) {
   Register receiver = LoadDescriptor::ReceiverRegister();
   Register name = LoadDescriptor::NameRegister();
-  DCHECK(!ebx.is(receiver) && !ebx.is(name));
+  if (FLAG_vector_ics) {
+    Register slot = VectorLoadICDescriptor::SlotRegister();
+    Register vector = VectorLoadICDescriptor::VectorRegister();
+    DCHECK(!edi.is(receiver) && !edi.is(name) && !edi.is(slot) &&
+           !edi.is(vector));
+
+    __ pop(edi);
+    __ push(receiver);
+    __ push(name);
+    __ push(slot);
+    __ push(vector);
+    __ push(edi);
+  } else {
+    DCHECK(!ebx.is(receiver) && !ebx.is(name));
 
-  __ pop(ebx);
-  __ push(receiver);
-  __ push(name);
-  __ push(ebx);
+    __ pop(ebx);
+    __ push(receiver);
+    __ push(name);
+    __ push(ebx);
+  }
 }
 
 
 void LoadIC::GenerateMiss(MacroAssembler* masm) {
   // Return address is on the stack.
   __ IncrementCounter(masm->isolate()->counters()->load_miss(), 1);
-
   LoadIC_PushArgs(masm);
 
   // Perform tail call to the entry.
   ExternalReference ref =
       ExternalReference(IC_Utility(kLoadIC_Miss), masm->isolate());
-  __ TailCallExternalReference(ref, 2, 1);
+  int arg_count = FLAG_vector_ics ? 4 : 2;
+  __ TailCallExternalReference(ref, arg_count, 1);
 }
 
 
 void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
   // Return address is on the stack.
-  LoadIC_PushArgs(masm);
+  Register receiver = LoadDescriptor::ReceiverRegister();
+  Register name = LoadDescriptor::NameRegister();
+  DCHECK(!ebx.is(receiver) && !ebx.is(name));
+
+  __ pop(ebx);
+  __ push(receiver);
+  __ push(name);
+  __ push(ebx);
 
   // Perform tail call to the entry.
   __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
@@ -807,13 +828,21 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
   // Perform tail call to the entry.
   ExternalReference ref =
       ExternalReference(IC_Utility(kKeyedLoadIC_Miss), masm->isolate());
-  __ TailCallExternalReference(ref, 2, 1);
+  int arg_count = FLAG_vector_ics ? 4 : 2;
+  __ TailCallExternalReference(ref, arg_count, 1);
 }
 
 
 void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
   // Return address is on the stack.
-  LoadIC_PushArgs(masm);
+  Register receiver = LoadDescriptor::ReceiverRegister();
+  Register name = LoadDescriptor::NameRegister();
+  DCHECK(!ebx.is(receiver) && !ebx.is(name));
+
+  __ pop(ebx);
+  __ push(receiver);
+  __ push(name);
+  __ push(ebx);
 
   // Perform tail call to the entry.
   __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
@@ -825,7 +854,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
   Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
       Code::ComputeHandlerFlags(Code::STORE_IC));
   masm->isolate()->stub_cache()->GenerateProbe(
-      masm, flags, false, StoreDescriptor::ReceiverRegister(),
+      masm, Code::STORE_IC, flags, false, StoreDescriptor::ReceiverRegister(),
       StoreDescriptor::NameRegister(), ebx, no_reg);
 
   // Cache miss: Jump to runtime.
index c1f7c9a..cb560f1 100644 (file)
@@ -7,7 +7,9 @@
 #if V8_TARGET_ARCH_IA32
 
 #include "src/codegen.h"
+#include "src/ic/ic.h"
 #include "src/ic/stub-cache.h"
+#include "src/interface-descriptors.h"
 
 namespace v8 {
 namespace internal {
@@ -16,7 +18,7 @@ namespace internal {
 
 
 static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
-                       Code::Flags flags, bool leave_frame,
+                       Code::Kind ic_kind, Code::Flags flags, bool leave_frame,
                        StubCache::Table table, Register name, Register receiver,
                        // Number of the cache entry pointer-size scaled.
                        Register offset, Register extra) {
@@ -56,6 +58,13 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
     }
 #endif
 
+    if (IC::ICUseVector(ic_kind)) {
+      // The vector and slot were pushed onto the stack before starting the
+      // probe, and need to be dropped before calling the handler.
+      __ pop(VectorLoadICDescriptor::VectorRegister());
+      __ pop(VectorLoadICDescriptor::SlotRegister());
+    }
+
     if (leave_frame) __ leave();
 
     // Jump to the first instruction in the code stub.
@@ -100,6 +109,17 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
     __ pop(offset);
     __ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
 
+    if (IC::ICUseVector(ic_kind)) {
+      // The vector and slot were pushed onto the stack before starting the
+      // probe, and need to be dropped before calling the handler.
+      Register vector = VectorLoadICDescriptor::VectorRegister();
+      Register slot = VectorLoadICDescriptor::SlotRegister();
+      DCHECK(!offset.is(vector) && !offset.is(slot));
+
+      __ pop(vector);
+      __ pop(slot);
+    }
+
     if (leave_frame) __ leave();
 
     // Jump to the first instruction in the code stub.
@@ -113,10 +133,11 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
 }
 
 
-void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
-                              bool leave_frame, Register receiver,
-                              Register name, Register scratch, Register extra,
-                              Register extra2, Register extra3) {
+void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
+                              Code::Flags flags, bool leave_frame,
+                              Register receiver, Register name,
+                              Register scratch, Register extra, Register extra2,
+                              Register extra3) {
   Label miss;
 
   // Assert that code is valid.  The multiplying code relies on the entry size
@@ -159,8 +180,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
   DCHECK(kCacheIndexShift == kPointerSizeLog2);
 
   // Probe the primary table.
-  ProbeTable(isolate(), masm, flags, leave_frame, kPrimary, name, receiver,
-             offset, extra);
+  ProbeTable(isolate(), masm, ic_kind, flags, leave_frame, kPrimary, name,
+             receiver, offset, extra);
 
   // Primary miss: Compute hash for secondary probe.
   __ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
@@ -172,8 +193,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
   __ and_(offset, (kSecondaryTableSize - 1) << kCacheIndexShift);
 
   // Probe the secondary table.
-  ProbeTable(isolate(), masm, flags, leave_frame, kSecondary, name, receiver,
-             offset, extra);
+  ProbeTable(isolate(), masm, ic_kind, flags, leave_frame, kSecondary, name,
+             receiver, offset, extra);
 
   // Cache miss: Fall-through and let caller handle the miss by
   // entering the runtime system.
index 1f6eb4e..e087acf 100644 (file)
@@ -63,6 +63,9 @@ Handle<Code> PropertyICCompiler::ComputeMonomorphic(
         KeyedStoreIC::IcCheckTypeField::update(extra_ic_state, PROPERTY);
     DCHECK(STANDARD_STORE ==
            KeyedStoreIC::GetKeyedAccessStoreMode(extra_ic_state));
+  } else if (kind == Code::KEYED_LOAD_IC) {
+    extra_ic_state = KeyedLoadIC::IcCheckTypeField::update(extra_ic_state,
+                                                           PROPERTY);
   }
 
   Handle<Code> ic;
@@ -86,6 +89,7 @@ Handle<Code> PropertyICCompiler::ComputeMonomorphic(
 Handle<Code> PropertyICCompiler::ComputeKeyedLoadMonomorphic(
     Handle<Map> receiver_map) {
   Isolate* isolate = receiver_map->GetIsolate();
+  DCHECK(KeyedLoadIC::GetKeyType(kNoExtraICState) == ELEMENT);
   Code::Flags flags = Code::ComputeMonomorphicFlags(Code::KEYED_LOAD_IC);
   Handle<Name> name = isolate->factory()->KeyedLoadMonomorphic_string();
 
@@ -240,7 +244,8 @@ Handle<Code> PropertyICCompiler::ComputeCompareNil(Handle<Map> receiver_map,
   }
 
   Code::FindAndReplacePattern pattern;
-  pattern.Add(isolate->factory()->meta_map(), receiver_map);
+  Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
+  pattern.Add(isolate->factory()->meta_map(), cell);
   Handle<Code> ic = stub->GetCodeCopy(pattern);
 
   if (!receiver_map->is_dictionary_map()) {
@@ -255,6 +260,7 @@ Handle<Code> PropertyICCompiler::ComputeCompareNil(Handle<Map> receiver_map,
 Handle<Code> PropertyICCompiler::ComputeKeyedLoadPolymorphic(
     MapHandleList* receiver_maps) {
   Isolate* isolate = receiver_maps->at(0)->GetIsolate();
+  DCHECK(KeyedLoadIC::GetKeyType(kNoExtraICState) == ELEMENT);
   Code::Flags flags = Code::ComputeFlags(Code::KEYED_LOAD_IC, POLYMORPHIC);
   Handle<PolymorphicCodeCache> cache =
       isolate->factory()->polymorphic_code_cache();
@@ -374,7 +380,6 @@ Handle<Code> PropertyICCompiler::GetCode(Code::Kind kind, Code::StubType type,
   Code::Flags flags =
       Code::ComputeFlags(kind, state, extra_ic_state_, type, cache_holder());
   Handle<Code> code = GetCodeWithFlags(flags, name);
-  IC::RegisterWeakMapDependency(code);
   PROFILE(isolate(), CodeCreateEvent(log_kind(code), *code, *name));
   return code;
 }
@@ -445,7 +450,10 @@ Handle<Code> PropertyICCompiler::CompileKeyedStoreMonomorphic(
     stub = StoreElementStub(isolate(), elements_kind).GetCode();
   }
 
-  __ DispatchMap(receiver(), scratch1(), receiver_map, stub, DO_SMI_CHECK);
+  Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
+
+  __ DispatchWeakMap(receiver(), scratch1(), scratch2(), cell, stub,
+                     DO_SMI_CHECK);
 
   TailCallBuiltin(masm(), Builtins::kKeyedStoreIC_Miss);
 
index 22f66d0..58d7d46 100644 (file)
@@ -96,6 +96,12 @@ Code* IC::GetTargetAtAddress(Address address,
 void IC::SetTargetAtAddress(Address address, Code* target,
                             ConstantPoolArray* constant_pool) {
   DCHECK(target->is_inline_cache_stub() || target->is_compare_ic_stub());
+
+  // Don't use this for load_ics when --vector-ics is turned on.
+  DCHECK(!(FLAG_vector_ics && target->is_inline_cache_stub()) ||
+         (target->kind() != Code::LOAD_IC &&
+          target->kind() != Code::KEYED_LOAD_IC));
+
   Heap* heap = target->GetHeap();
   Code* old_target = GetTargetAtAddress(address, constant_pool);
 #ifdef DEBUG
@@ -119,9 +125,6 @@ void IC::SetTargetAtAddress(Address address, Code* target,
 
 
 void IC::set_target(Code* code) {
-#ifdef VERIFY_HEAP
-  code->VerifyEmbeddedObjectsDependency();
-#endif
   SetTargetAtAddress(address(), code, constant_pool());
   target_set_ = true;
 }
index 18ea7f3..9c883ad 100644 (file)
@@ -17,19 +17,6 @@ void ICUtility::Clear(Isolate* isolate, Address address,
 }
 
 
-// static
-template <class Nexus>
-void ICUtility::Clear(Isolate* isolate, Code::Kind kind, Code* host,
-                      Nexus* nexus) {
-  IC::Clear<Nexus>(isolate, kind, host, nexus);
-}
-
-
-// Force instantiation of template instances for vector-based IC clearing.
-template void ICUtility::Clear<CallICNexus>(Isolate*, Code::Kind, Code*,
-                                            CallICNexus*);
-
-
 CallICState::CallICState(ExtraICState extra_ic_state)
     : argc_(ArgcBits::decode(extra_ic_state)),
       call_type_(CallTypeBits::decode(extra_ic_state)) {}
index 9bb877a..72fc865 100644 (file)
@@ -19,10 +19,6 @@ class ICUtility : public AllStatic {
   // Clear the inline cache to initial state.
   static void Clear(Isolate* isolate, Address address,
                     ConstantPoolArray* constant_pool);
-  // Clear a vector-based inline cache to initial state.
-  template <class Nexus>
-  static void Clear(Isolate* isolate, Code::Kind kind, Code* host,
-                    Nexus* nexus);
 };
 
 
index a9369ed..48cef68 100644 (file)
@@ -258,7 +258,11 @@ bool IC::TryRemoveInvalidPrototypeDependentStub(Handle<Object> receiver,
                                                 Handle<String> name) {
   if (!IsNameCompatibleWithPrototypeFailure(name)) return false;
   Handle<Map> receiver_map = TypeToMap(*receiver_type(), isolate());
-  maybe_handler_ = target()->FindHandlerForMap(*receiver_map);
+  if (UseVector()) {
+    maybe_handler_ = nexus()->FindHandlerForMap(receiver_map);
+  } else {
+    maybe_handler_ = target()->FindHandlerForMap(*receiver_map);
+  }
 
   // The current map wasn't handled yet. There's no reason to stay monomorphic,
   // *unless* we're moving from a deprecated map to its replacement, or
@@ -310,7 +314,8 @@ bool IC::IsNameCompatibleWithPrototypeFailure(Handle<Object> name) {
   if (target()->is_keyed_stub()) {
     // Determine whether the failure is due to a name failure.
     if (!name->IsName()) return false;
-    Name* stub_name = target()->FindFirstName();
+    Name* stub_name =
+        UseVector() ? nexus()->FindFirstName() : target()->FindFirstName();
     if (*name != stub_name) return false;
   }
 
@@ -339,7 +344,7 @@ void IC::UpdateState(Handle<Object> receiver, Handle<Object> name) {
   // an inline cache miss for the builtins object after lazily loading
   // JavaScript builtins, we return uninitialized as the state to
   // force the inline cache back to monomorphic state.
-  if (receiver->IsJSBuiltinsObject()) state_ = UNINITIALIZED;
+  if (receiver->IsJSBuiltinsObject()) state_ = PREMONOMORPHIC;
 }
 
 
@@ -452,7 +457,7 @@ void IC::OnTypeFeedbackChanged(Isolate* isolate, Code* host,
 void IC::PostPatching(Address address, Code* target, Code* old_target) {
   // Type vector based ICs update these statistics at a different time because
   // they don't always patch on state change.
-  if (target->kind() == Code::CALL_IC) return;
+  if (ICUseVector(target->kind())) return;
 
   Isolate* isolate = target->GetHeap()->isolate();
   State old_state = UNINITIALIZED;
@@ -469,42 +474,6 @@ void IC::PostPatching(Address address, Code* target, Code* old_target) {
 }
 
 
-void IC::RegisterWeakMapDependency(Handle<Code> stub) {
-  if (FLAG_collect_maps && FLAG_weak_embedded_maps_in_ic &&
-      stub->CanBeWeakStub()) {
-    DCHECK(!stub->is_weak_stub());
-    MapHandleList maps;
-    stub->FindAllMaps(&maps);
-    if (maps.length() == 1 && stub->IsWeakObjectInIC(*maps.at(0))) {
-      Map::AddDependentIC(maps.at(0), stub);
-      stub->mark_as_weak_stub();
-      if (FLAG_enable_ool_constant_pool) {
-        stub->constant_pool()->set_weak_object_state(
-            ConstantPoolArray::WEAK_OBJECTS_IN_IC);
-      }
-    }
-  }
-}
-
-
-void IC::InvalidateMaps(Code* stub) {
-  DCHECK(stub->is_weak_stub());
-  stub->mark_as_invalidated_weak_stub();
-  Isolate* isolate = stub->GetIsolate();
-  Heap* heap = isolate->heap();
-  Object* undefined = heap->undefined_value();
-  int mode_mask = RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
-  for (RelocIterator it(stub, mode_mask); !it.done(); it.next()) {
-    RelocInfo::Mode mode = it.rinfo()->rmode();
-    if (mode == RelocInfo::EMBEDDED_OBJECT &&
-        it.rinfo()->target_object()->IsMap()) {
-      it.rinfo()->set_target_object(undefined, SKIP_WRITE_BARRIER);
-    }
-  }
-  CpuFeatures::FlushICache(stub->instruction_start(), stub->instruction_size());
-}
-
-
 void IC::Clear(Isolate* isolate, Address address,
                ConstantPoolArray* constant_pool) {
   Code* target = GetTargetAtAddress(address, constant_pool);
@@ -514,8 +483,10 @@ void IC::Clear(Isolate* isolate, Address address,
 
   switch (target->kind()) {
     case Code::LOAD_IC:
+      if (FLAG_vector_ics) return;
       return LoadIC::Clear(isolate, address, target, constant_pool);
     case Code::KEYED_LOAD_IC:
+      if (FLAG_vector_ics) return;
       return KeyedLoadIC::Clear(isolate, address, target, constant_pool);
     case Code::STORE_IC:
       return StoreIC::Clear(isolate, address, target, constant_pool);
@@ -537,23 +508,9 @@ void IC::Clear(Isolate* isolate, Address address,
 }
 
 
-template <class Nexus>
-void IC::Clear(Isolate* isolate, Code::Kind kind, Code* host, Nexus* nexus) {
-  switch (kind) {
-    case Code::CALL_IC:
-      return CallIC::Clear(isolate, host, nexus);
-    default:
-      UNREACHABLE();
-  }
-}
-
-
-// Force instantiation of template instances for vector-based IC clearing.
-template void IC::Clear(Isolate*, Code::Kind, Code*, CallICNexus*);
-
-
 void KeyedLoadIC::Clear(Isolate* isolate, Address address, Code* target,
                         ConstantPoolArray* constant_pool) {
+  DCHECK(!FLAG_vector_ics);
   if (IsCleared(target)) return;
 
   // Make sure to also clear the map used in inline fast cases.  If we
@@ -563,6 +520,17 @@ void KeyedLoadIC::Clear(Isolate* isolate, Address address, Code* target,
 }
 
 
+void KeyedLoadIC::Clear(Isolate* isolate, Code* host, KeyedLoadICNexus* nexus) {
+  if (IsCleared(nexus)) return;
+  // Make sure to also clear the map used in inline fast cases.  If we
+  // do not clear these maps, cached code can keep objects alive
+  // through the embedded maps.
+  State state = nexus->StateFromFeedback();
+  nexus->ConfigurePremonomorphic();
+  OnTypeFeedbackChanged(isolate, host, nexus->vector(), state, PREMONOMORPHIC);
+}
+
+
 void CallIC::Clear(Isolate* isolate, Code* host, CallICNexus* nexus) {
   // Determine our state.
   Object* feedback = nexus->vector()->Get(nexus->slot());
@@ -578,6 +546,7 @@ void CallIC::Clear(Isolate* isolate, Code* host, CallICNexus* nexus) {
 
 void LoadIC::Clear(Isolate* isolate, Address address, Code* target,
                    ConstantPoolArray* constant_pool) {
+  DCHECK(!FLAG_vector_ics);
   if (IsCleared(target)) return;
   Code* code = PropertyICCompiler::FindPreMonomorphic(isolate, Code::LOAD_IC,
                                                       target->extra_ic_state());
@@ -585,6 +554,14 @@ void LoadIC::Clear(Isolate* isolate, Address address, Code* target,
 }
 
 
+void LoadIC::Clear(Isolate* isolate, Code* host, LoadICNexus* nexus) {
+  if (IsCleared(nexus)) return;
+  State state = nexus->StateFromFeedback();
+  nexus->ConfigurePremonomorphic();
+  OnTypeFeedbackChanged(isolate, host, nexus->vector(), state, PREMONOMORPHIC);
+}
+
+
 void StoreIC::Clear(Isolate* isolate, Address address, Code* target,
                     ConstantPoolArray* constant_pool) {
   if (IsCleared(target)) return;
@@ -635,6 +612,69 @@ static bool MigrateDeprecated(Handle<Object> object) {
 }
 
 
+void IC::ConfigureVectorState(IC::State new_state) {
+  DCHECK(UseVector());
+  if (kind() == Code::LOAD_IC) {
+    LoadICNexus* nexus = casted_nexus<LoadICNexus>();
+    if (new_state == PREMONOMORPHIC) {
+      nexus->ConfigurePremonomorphic();
+    } else if (new_state == MEGAMORPHIC) {
+      nexus->ConfigureMegamorphic();
+    } else {
+      UNREACHABLE();
+    }
+  } else if (kind() == Code::KEYED_LOAD_IC) {
+    KeyedLoadICNexus* nexus = casted_nexus<KeyedLoadICNexus>();
+    if (new_state == GENERIC) {
+      nexus->ConfigureGeneric();
+    } else if (new_state == PREMONOMORPHIC) {
+      nexus->ConfigurePremonomorphic();
+    } else {
+      UNREACHABLE();
+    }
+  } else {
+    UNREACHABLE();
+  }
+
+  OnTypeFeedbackChanged(isolate(), get_host(), *vector(), saved_state(),
+                        new_state);
+}
+
+
+void IC::ConfigureVectorState(Handle<Name> name, Handle<HeapType> type,
+                              Handle<Code> handler) {
+  DCHECK(UseVector());
+  if (kind() == Code::LOAD_IC) {
+    LoadICNexus* nexus = casted_nexus<LoadICNexus>();
+    nexus->ConfigureMonomorphic(type, handler);
+  } else {
+    DCHECK(kind() == Code::KEYED_LOAD_IC);
+    KeyedLoadICNexus* nexus = casted_nexus<KeyedLoadICNexus>();
+    nexus->ConfigureMonomorphic(name, type, handler);
+  }
+
+  OnTypeFeedbackChanged(isolate(), get_host(), *vector(), saved_state(),
+                        MONOMORPHIC);
+}
+
+
+void IC::ConfigureVectorState(Handle<Name> name, TypeHandleList* types,
+                              CodeHandleList* handlers) {
+  DCHECK(UseVector());
+  if (kind() == Code::LOAD_IC) {
+    LoadICNexus* nexus = casted_nexus<LoadICNexus>();
+    nexus->ConfigurePolymorphic(types, handlers);
+  } else {
+    DCHECK(kind() == Code::KEYED_LOAD_IC);
+    KeyedLoadICNexus* nexus = casted_nexus<KeyedLoadICNexus>();
+    nexus->ConfigurePolymorphic(name, types, handlers);
+  }
+
+  OnTypeFeedbackChanged(isolate(), get_host(), *vector(), saved_state(),
+                        POLYMORPHIC);
+}
+
+
 MaybeHandle<Object> LoadIC::Load(Handle<Object> object, Handle<Name> name) {
   // If the object is undefined or null it's illegal to try to get any
   // of its properties; throw a TypeError in that case.
@@ -648,7 +688,11 @@ MaybeHandle<Object> LoadIC::Load(Handle<Object> object, Handle<Name> name) {
   if (kind() == Code::KEYED_LOAD_IC && name->AsArrayIndex(&index)) {
     // Rewrite to the generic keyed load stub.
     if (FLAG_use_ic) {
-      set_target(*KeyedLoadIC::generic_stub(isolate()));
+      if (UseVector()) {
+        ConfigureVectorState(GENERIC);
+      } else {
+        set_target(*KeyedLoadIC::generic_stub(isolate()));
+      }
       TRACE_IC("LoadIC", name);
       TRACE_GENERIC_IC(isolate(), "LoadIC", "name as array index");
     }
@@ -661,6 +705,25 @@ MaybeHandle<Object> LoadIC::Load(Handle<Object> object, Handle<Name> name) {
 
   bool use_ic = MigrateDeprecated(object) ? false : FLAG_use_ic;
 
+  if (FLAG_harmony_scoping && object->IsGlobalObject() && name->IsString()) {
+    // Look up in script context table.
+    Handle<String> str_name = Handle<String>::cast(name);
+    Handle<GlobalObject> global = Handle<GlobalObject>::cast(object);
+    Handle<ScriptContextTable> script_contexts(
+        global->native_context()->script_context_table());
+
+    ScriptContextTable::LookupResult lookup_result;
+    if (ScriptContextTable::Lookup(script_contexts, str_name, &lookup_result)) {
+      if (use_ic && LoadScriptContextFieldStub::Accepted(&lookup_result)) {
+        LoadScriptContextFieldStub stub(isolate(), &lookup_result);
+        PatchCache(name, stub.GetCode());
+      }
+      return FixedArray::get(ScriptContextTable::GetContext(
+                                 script_contexts, lookup_result.context_index),
+                             lookup_result.slot_index);
+    }
+  }
+
   // Named lookup in the object.
   LookupIterator it(object, name);
   LookupForRead(&it);
@@ -733,15 +796,26 @@ bool IC::UpdatePolymorphicIC(Handle<Name> name, Handle<Code> code) {
       number_of_types - deprecated_types - (handler_to_overwrite != -1);
 
   if (number_of_valid_types >= 4) return false;
-  if (number_of_types == 0) return false;
-  if (!target()->FindHandlers(&handlers, types.length())) return false;
+  if (number_of_types == 0 && state() != MONOMORPHIC &&
+      state() != POLYMORPHIC) {
+    return false;
+  }
+  if (UseVector()) {
+    if (!nexus()->FindHandlers(&handlers, types.length())) return false;
+  } else {
+    if (!target()->FindHandlers(&handlers, types.length())) return false;
+  }
 
   number_of_valid_types++;
   if (number_of_valid_types > 1 && target()->is_keyed_stub()) return false;
   Handle<Code> ic;
   if (number_of_valid_types == 1) {
-    ic = PropertyICCompiler::ComputeMonomorphic(kind(), name, type, code,
-                                                extra_ic_state());
+    if (UseVector()) {
+      ConfigureVectorState(name, receiver_type(), code);
+    } else {
+      ic = PropertyICCompiler::ComputeMonomorphic(kind(), name, type, code,
+                                                  extra_ic_state());
+    }
   } else {
     if (handler_to_overwrite >= 0) {
       handlers.Set(handler_to_overwrite, code);
@@ -752,11 +826,17 @@ bool IC::UpdatePolymorphicIC(Handle<Name> name, Handle<Code> code) {
       types.Add(type);
       handlers.Add(code);
     }
-    ic = PropertyICCompiler::ComputePolymorphic(kind(), &types, &handlers,
-                                                number_of_valid_types, name,
-                                                extra_ic_state());
+
+    if (UseVector()) {
+      ConfigureVectorState(name, &types, &handlers);
+    } else {
+      ic = PropertyICCompiler::ComputePolymorphic(kind(), &types, &handlers,
+                                                  number_of_valid_types, name,
+                                                  extra_ic_state());
+    }
   }
-  set_target(*ic);
+
+  if (!UseVector()) set_target(*ic);
   return true;
 }
 
@@ -804,9 +884,13 @@ template Handle<HeapType> IC::MapToType<HeapType>(Handle<Map> map,
 
 void IC::UpdateMonomorphicIC(Handle<Code> handler, Handle<Name> name) {
   DCHECK(handler->is_handler());
-  Handle<Code> ic = PropertyICCompiler::ComputeMonomorphic(
-      kind(), name, receiver_type(), handler, extra_ic_state());
-  set_target(*ic);
+  if (UseVector()) {
+    ConfigureVectorState(name, receiver_type(), handler);
+  } else {
+    Handle<Code> ic = PropertyICCompiler::ComputeMonomorphic(
+        kind(), name, receiver_type(), handler, extra_ic_state());
+    set_target(*ic);
+  }
 }
 
 
@@ -851,7 +935,12 @@ void IC::PatchCache(Handle<Name> name, Handle<Code> code) {
         // same key.
         CopyICToMegamorphicCache(name);
       }
-      set_target(*megamorphic_stub());
+      if (UseVector()) {
+        ConfigureVectorState(kind() == Code::KEYED_LOAD_IC ? GENERIC
+                                                           : MEGAMORPHIC);
+      } else {
+        set_target(*megamorphic_stub());
+      }
     // Fall through.
     case MEGAMORPHIC:
       UpdateMegamorphicCache(*receiver_type(), *name, *code);
@@ -874,6 +963,10 @@ void IC::PatchCache(Handle<Name> name, Handle<Code> code) {
 
 Handle<Code> LoadIC::initialize_stub(Isolate* isolate,
                                      ExtraICState extra_state) {
+  if (FLAG_vector_ics) {
+    return LoadICTrampolineStub(isolate, LoadICState(extra_state)).GetCode();
+  }
+
   return PropertyICCompiler::ComputeLoad(isolate, UNINITIALIZED, extra_state);
 }
 
@@ -917,6 +1010,7 @@ Handle<Code> LoadIC::megamorphic_stub() {
 
 Handle<Code> LoadIC::pre_monomorphic_stub(Isolate* isolate,
                                           ExtraICState extra_state) {
+  DCHECK(!FLAG_vector_ics);
   return PropertyICCompiler::ComputeLoad(isolate, PREMONOMORPHIC, extra_state);
 }
 
@@ -946,7 +1040,11 @@ void LoadIC::UpdateCaches(LookupIterator* lookup) {
   if (state() == UNINITIALIZED) {
     // This is the first time we execute this inline cache. Set the target to
     // the pre monomorphic stub to delay setting the monomorphic state.
-    set_target(*pre_monomorphic_stub());
+    if (UseVector()) {
+      ConfigureVectorState(PREMONOMORPHIC);
+    } else {
+      set_target(*pre_monomorphic_stub());
+    }
     TRACE_IC("LoadIC", lookup->name());
     return;
   }
@@ -1209,11 +1307,19 @@ static Handle<Object> TryConvertKey(Handle<Object> key, Isolate* isolate) {
 
 
 Handle<Code> KeyedLoadIC::LoadElementStub(Handle<HeapObject> receiver) {
+  Handle<Code> null_handle;
   Handle<Map> receiver_map(receiver->map(), isolate());
   MapHandleList target_receiver_maps;
   TargetMaps(&target_receiver_maps);
 
+
   if (target_receiver_maps.length() == 0) {
+    if (FLAG_vector_ics) {
+      Handle<Code> handler =
+          PropertyICCompiler::ComputeKeyedLoadMonomorphicHandler(receiver_map);
+      ConfigureVectorState(Handle<Name>::null(), receiver_type(), handler);
+      return null_handle;
+    }
     return PropertyICCompiler::ComputeKeyedLoadMonomorphic(receiver_map);
   }
 
@@ -1228,6 +1334,12 @@ Handle<Code> KeyedLoadIC::LoadElementStub(Handle<HeapObject> receiver) {
       IsMoreGeneralElementsKindTransition(
           target_receiver_maps.at(0)->elements_kind(),
           Handle<JSObject>::cast(receiver)->GetElementsKind())) {
+    if (FLAG_vector_ics) {
+      Handle<Code> handler =
+          PropertyICCompiler::ComputeKeyedLoadMonomorphicHandler(receiver_map);
+      ConfigureVectorState(Handle<Name>::null(), receiver_type(), handler);
+      return null_handle;
+    }
     return PropertyICCompiler::ComputeKeyedLoadMonomorphic(receiver_map);
   }
 
@@ -1239,6 +1351,10 @@ Handle<Code> KeyedLoadIC::LoadElementStub(Handle<HeapObject> receiver) {
     // If the miss wasn't due to an unseen map, a polymorphic stub
     // won't help, use the generic stub.
     TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "same map added twice");
+    if (FLAG_vector_ics) {
+      ConfigureVectorState(GENERIC);
+      return null_handle;
+    }
     return generic_stub();
   }
 
@@ -1246,9 +1362,25 @@ Handle<Code> KeyedLoadIC::LoadElementStub(Handle<HeapObject> receiver) {
   // version of the IC.
   if (target_receiver_maps.length() > kMaxKeyedPolymorphism) {
     TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "max polymorph exceeded");
+    if (FLAG_vector_ics) {
+      ConfigureVectorState(GENERIC);
+      return null_handle;
+    }
     return generic_stub();
   }
 
+  if (FLAG_vector_ics) {
+    CodeHandleList handlers(target_receiver_maps.length());
+    ElementHandlerCompiler compiler(isolate());
+    compiler.CompileElementHandlers(&target_receiver_maps, &handlers);
+    TypeHandleList types(target_receiver_maps.length());
+    for (int i = 0; i < target_receiver_maps.length(); i++) {
+      types.Add(HeapType::Class(target_receiver_maps.at(i), isolate()));
+    }
+    ConfigureVectorState(Handle<Name>::null(), &types, &handlers);
+    return null_handle;
+  }
+
   return PropertyICCompiler::ComputeKeyedLoadPolymorphic(&target_receiver_maps);
 }
 
@@ -1284,11 +1416,14 @@ MaybeHandle<Object> KeyedLoadIC::Load(Handle<Object> object,
   }
 
   if (!is_target_set()) {
-    Code* generic = *generic_stub();
-    if (*stub == generic) {
-      TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "set generic");
+    if (!FLAG_vector_ics) {
+      Code* generic = *generic_stub();
+      if (*stub == generic) {
+        TRACE_GENERIC_IC(isolate(), "KeyedLoadIC", "set generic");
+      }
+
+      set_target(*stub);
     }
-    set_target(*stub);
     TRACE_IC("LoadIC", key);
   }
 
@@ -1363,6 +1498,32 @@ bool StoreIC::LookupForWrite(LookupIterator* it, Handle<Object> value,
 MaybeHandle<Object> StoreIC::Store(Handle<Object> object, Handle<Name> name,
                                    Handle<Object> value,
                                    JSReceiver::StoreFromKeyed store_mode) {
+  if (FLAG_harmony_scoping && object->IsGlobalObject() && name->IsString()) {
+    // Look up in script context table.
+    Handle<String> str_name = Handle<String>::cast(name);
+    Handle<GlobalObject> global = Handle<GlobalObject>::cast(object);
+    Handle<ScriptContextTable> script_contexts(
+        global->native_context()->script_context_table());
+
+    ScriptContextTable::LookupResult lookup_result;
+    if (ScriptContextTable::Lookup(script_contexts, str_name, &lookup_result)) {
+      Handle<Context> script_context = ScriptContextTable::GetContext(
+          script_contexts, lookup_result.context_index);
+      if (lookup_result.mode == CONST) {
+        return TypeError("harmony_const_assign", object, name);
+      }
+
+      if (FLAG_use_ic &&
+          StoreScriptContextFieldStub::Accepted(&lookup_result)) {
+        StoreScriptContextFieldStub stub(isolate(), &lookup_result);
+        PatchCache(name, stub.GetCode());
+      }
+
+      script_context->set(lookup_result.slot_index, *value);
+      return value;
+    }
+  }
+
   // TODO(verwaest): Let SetProperty do the migration, since storing a property
   // might deprecate the current map again, if value does not fit.
   if (MigrateDeprecated(object) || object->IsJSProxy()) {
@@ -2113,13 +2274,38 @@ RUNTIME_FUNCTION(CallIC_Customization_Miss) {
 RUNTIME_FUNCTION(LoadIC_Miss) {
   TimerEventScope<TimerEventIcMiss> timer(isolate);
   HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
-  LoadIC ic(IC::NO_EXTRA_FRAME, isolate);
   Handle<Object> receiver = args.at<Object>(0);
   Handle<Name> key = args.at<Name>(1);
-  ic.UpdateState(receiver, key);
   Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key));
+
+  if (FLAG_vector_ics) {
+    DCHECK(args.length() == 4);
+    Handle<Smi> slot = args.at<Smi>(2);
+    Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(3);
+    FeedbackVectorICSlot vector_slot = vector->ToICSlot(slot->value());
+    // A monomorphic or polymorphic KeyedLoadIC with a string key can call the
+    // LoadIC miss handler if the handler misses. Since the vector Nexus is
+    // set up outside the IC, handle that here.
+    if (vector->GetKind(vector_slot) == Code::LOAD_IC) {
+      LoadICNexus nexus(vector, vector_slot);
+      LoadIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
+      ic.UpdateState(receiver, key);
+      ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+                                         ic.Load(receiver, key));
+    } else {
+      DCHECK(vector->GetKind(vector_slot) == Code::KEYED_LOAD_IC);
+      KeyedLoadICNexus nexus(vector, vector_slot);
+      KeyedLoadIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
+      ic.UpdateState(receiver, key);
+      ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+                                         ic.Load(receiver, key));
+    }
+  } else {
+    DCHECK(args.length() == 2);
+    LoadIC ic(IC::NO_EXTRA_FRAME, isolate);
+    ic.UpdateState(receiver, key);
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key));
+  }
   return *result;
 }
 
@@ -2128,13 +2314,26 @@ RUNTIME_FUNCTION(LoadIC_Miss) {
 RUNTIME_FUNCTION(KeyedLoadIC_Miss) {
   TimerEventScope<TimerEventIcMiss> timer(isolate);
   HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
-  KeyedLoadIC ic(IC::NO_EXTRA_FRAME, isolate);
   Handle<Object> receiver = args.at<Object>(0);
   Handle<Object> key = args.at<Object>(1);
-  ic.UpdateState(receiver, key);
   Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key));
+
+  if (FLAG_vector_ics) {
+    DCHECK(args.length() == 4);
+    Handle<Smi> slot = args.at<Smi>(2);
+    Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(3);
+    FeedbackVectorICSlot vector_slot = vector->ToICSlot(slot->value());
+    KeyedLoadICNexus nexus(vector, vector_slot);
+    KeyedLoadIC ic(IC::NO_EXTRA_FRAME, isolate, &nexus);
+    ic.UpdateState(receiver, key);
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key));
+  } else {
+    DCHECK(args.length() == 2);
+    KeyedLoadIC ic(IC::NO_EXTRA_FRAME, isolate);
+    ic.UpdateState(receiver, key);
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key));
+  }
+
   return *result;
 }
 
@@ -2142,13 +2341,26 @@ RUNTIME_FUNCTION(KeyedLoadIC_Miss) {
 RUNTIME_FUNCTION(KeyedLoadIC_MissFromStubFailure) {
   TimerEventScope<TimerEventIcMiss> timer(isolate);
   HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
-  KeyedLoadIC ic(IC::EXTRA_CALL_FRAME, isolate);
   Handle<Object> receiver = args.at<Object>(0);
   Handle<Object> key = args.at<Object>(1);
-  ic.UpdateState(receiver, key);
   Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key));
+
+  if (FLAG_vector_ics) {
+    DCHECK(args.length() == 4);
+    Handle<Smi> slot = args.at<Smi>(2);
+    Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(3);
+    FeedbackVectorICSlot vector_slot = vector->ToICSlot(slot->value());
+    KeyedLoadICNexus nexus(vector, vector_slot);
+    KeyedLoadIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
+    ic.UpdateState(receiver, key);
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key));
+  } else {
+    DCHECK(args.length() == 2);
+    KeyedLoadIC ic(IC::EXTRA_CALL_FRAME, isolate);
+    ic.UpdateState(receiver, key);
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key));
+  }
+
   return *result;
 }
 
@@ -2598,19 +2810,17 @@ RUNTIME_FUNCTION(StoreCallbackProperty) {
  */
 RUNTIME_FUNCTION(LoadPropertyWithInterceptorOnly) {
   DCHECK(args.length() == NamedLoadHandlerCompiler::kInterceptorArgsLength);
-  Handle<Name> name_handle =
+  Handle<Name> name =
       args.at<Name>(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex);
   Handle<InterceptorInfo> interceptor_info = args.at<InterceptorInfo>(
       NamedLoadHandlerCompiler::kInterceptorArgsInfoIndex);
 
-  // TODO(rossberg): Support symbols in the API.
-  if (name_handle->IsSymbol())
+  if (name->IsSymbol() && !interceptor_info->can_intercept_symbols())
     return isolate->heap()->no_interceptor_result_sentinel();
-  Handle<String> name = Handle<String>::cast(name_handle);
 
   Address getter_address = v8::ToCData<Address>(interceptor_info->getter());
-  v8::NamedPropertyGetterCallback getter =
-      FUNCTION_CAST<v8::NamedPropertyGetterCallback>(getter_address);
+  v8::GenericNamedPropertyGetterCallback getter =
+      FUNCTION_CAST<v8::GenericNamedPropertyGetterCallback>(getter_address);
   DCHECK(getter != NULL);
 
   Handle<JSObject> receiver =
@@ -2640,7 +2850,7 @@ static Object* ThrowReferenceError(Isolate* isolate, Name* name) {
   // If the load is non-contextual, just return the undefined result.
   // Note that both keyed and non-keyed loads may end up here.
   HandleScope scope(isolate);
-  LoadIC ic(IC::NO_EXTRA_FRAME, isolate);
+  LoadIC ic(IC::NO_EXTRA_FRAME, isolate, true);
   if (ic.contextual_mode() != CONTEXTUAL) {
     return isolate->heap()->undefined_value();
   }
@@ -2722,13 +2932,39 @@ RUNTIME_FUNCTION(LoadElementWithInterceptor) {
 RUNTIME_FUNCTION(LoadIC_MissFromStubFailure) {
   TimerEventScope<TimerEventIcMiss> timer(isolate);
   HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
-  LoadIC ic(IC::EXTRA_CALL_FRAME, isolate);
   Handle<Object> receiver = args.at<Object>(0);
   Handle<Name> key = args.at<Name>(1);
-  ic.UpdateState(receiver, key);
   Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key));
+
+  if (FLAG_vector_ics) {
+    DCHECK(args.length() == 4);
+    Handle<Smi> slot = args.at<Smi>(2);
+    Handle<TypeFeedbackVector> vector = args.at<TypeFeedbackVector>(3);
+    FeedbackVectorICSlot vector_slot = vector->ToICSlot(slot->value());
+    // A monomorphic or polymorphic KeyedLoadIC with a string key can call the
+    // LoadIC miss handler if the handler misses. Since the vector Nexus is
+    // set up outside the IC, handle that here.
+    if (vector->GetKind(vector_slot) == Code::LOAD_IC) {
+      LoadICNexus nexus(vector, vector_slot);
+      LoadIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
+      ic.UpdateState(receiver, key);
+      ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+                                         ic.Load(receiver, key));
+    } else {
+      DCHECK(vector->GetKind(vector_slot) == Code::KEYED_LOAD_IC);
+      KeyedLoadICNexus nexus(vector, vector_slot);
+      KeyedLoadIC ic(IC::EXTRA_CALL_FRAME, isolate, &nexus);
+      ic.UpdateState(receiver, key);
+      ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+                                         ic.Load(receiver, key));
+    }
+  } else {
+    DCHECK(args.length() == 2);
+    LoadIC ic(IC::EXTRA_CALL_FRAME, isolate);
+    ic.UpdateState(receiver, key);
+    ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, ic.Load(receiver, key));
+  }
+
   return *result;
 }
 
index 5ed8082..541fa0c 100644 (file)
@@ -76,23 +76,10 @@ class IC {
     state_ = PROTOTYPE_FAILURE;
   }
 
-  // If the stub contains weak maps then this function adds the stub to
-  // the dependent code array of each weak map.
-  static void RegisterWeakMapDependency(Handle<Code> stub);
-
-  // This function is called when a weak map in the stub is dying,
-  // invalidates the stub by setting maps in it to undefined.
-  static void InvalidateMaps(Code* stub);
-
   // Clear the inline cache to initial state.
   static void Clear(Isolate* isolate, Address address,
                     ConstantPoolArray* constant_pool);
 
-  // Clear the vector-based inline cache to initial state.
-  template <class Nexus>
-  static void Clear(Isolate* isolate, Code::Kind kind, Code* host,
-                    Nexus* nexus);
-
 #ifdef DEBUG
   bool IsLoadStub() const {
     return target()->is_load_stub() || target()->is_keyed_load_stub();
@@ -138,6 +125,12 @@ class IC {
   static Handle<HeapType> CurrentTypeOf(Handle<Object> object,
                                         Isolate* isolate);
 
+  static bool ICUseVector(Code::Kind kind) {
+    return (FLAG_vector_ics &&
+            (kind == Code::LOAD_IC || kind == Code::KEYED_LOAD_IC)) ||
+           kind == Code::CALL_IC;
+  }
+
  protected:
   // Get the call-site target; used for determining the state.
   Handle<Code> target() const { return target_; }
@@ -158,14 +151,21 @@ class IC {
   bool is_target_set() { return target_set_; }
 
   bool UseVector() const {
-    bool use = (FLAG_vector_ics &&
-                (kind() == Code::LOAD_IC || kind() == Code::KEYED_LOAD_IC)) ||
-               kind() == Code::CALL_IC;
+    bool use = ICUseVector(kind());
     // If we are supposed to use the nexus, verify the nexus is non-null.
     DCHECK(!use || nexus_ != NULL);
     return use;
   }
 
+  // Configure for most states.
+  void ConfigureVectorState(IC::State new_state);
+  // Configure the vector for MONOMORPHIC.
+  void ConfigureVectorState(Handle<Name> name, Handle<HeapType> type,
+                            Handle<Code> handler);
+  // Configure the vector for POLYMORPHIC.
+  void ConfigureVectorState(Handle<Name> name, TypeHandleList* types,
+                            CodeHandleList* handlers);
+
   char TransitionMarkFromState(IC::State state);
   void TraceIC(const char* type, Handle<Object> name);
   void TraceIC(const char* type, Handle<Object> name, State old_state,
@@ -272,11 +272,15 @@ class IC {
   void FindTargetMaps() {
     if (target_maps_set_) return;
     target_maps_set_ = true;
-    if (state_ == MONOMORPHIC) {
-      Map* map = target_->FindFirstMap();
-      if (map != NULL) target_maps_.Add(handle(map));
-    } else if (state_ != UNINITIALIZED && state_ != PREMONOMORPHIC) {
-      target_->FindAllMaps(&target_maps_);
+    if (UseVector()) {
+      nexus()->ExtractMaps(&target_maps_);
+    } else {
+      if (state_ == MONOMORPHIC) {
+        Map* map = target_->FindFirstMap();
+        if (map != NULL) target_maps_.Add(handle(map));
+      } else if (state_ != UNINITIALIZED && state_ != PREMONOMORPHIC) {
+        target_->FindAllMaps(&target_maps_);
+      }
     }
   }
 
@@ -364,7 +368,18 @@ class LoadIC : public IC {
     return LoadICState::GetContextualMode(extra_ic_state());
   }
 
-  explicit LoadIC(FrameDepth depth, Isolate* isolate) : IC(depth, isolate) {
+  LoadIC(FrameDepth depth, Isolate* isolate, FeedbackNexus* nexus = NULL)
+      : IC(depth, isolate, nexus) {
+    DCHECK(!FLAG_vector_ics || nexus != NULL);
+    DCHECK(IsLoadStub());
+  }
+
+  // TODO(mvstanton): The for_queries_only is because we have a case where we
+  // construct an IC only to gather the contextual mode, and we don't have
+  // vector/slot information. for_queries_only is a temporary hack to enable the
+  // strong DCHECK protection around vector/slot.
+  LoadIC(FrameDepth depth, Isolate* isolate, bool for_queries_only)
+      : IC(depth, isolate, NULL, for_queries_only) {
     DCHECK(IsLoadStub());
   }
 
@@ -396,6 +411,8 @@ class LoadIC : public IC {
   MUST_USE_RESULT MaybeHandle<Object> Load(Handle<Object> object,
                                            Handle<Name> name);
 
+  static void Clear(Isolate* isolate, Code* host, LoadICNexus* nexus);
+
  protected:
   inline void set_target(Code* code);
 
@@ -408,7 +425,7 @@ class LoadIC : public IC {
     }
   }
 
-  virtual Handle<Code> megamorphic_stub() OVERRIDE;
+  Handle<Code> megamorphic_stub() OVERRIDE;
 
   // Update the inline cache and the global stub cache based on the
   // lookup result.
@@ -434,8 +451,23 @@ class LoadIC : public IC {
 
 class KeyedLoadIC : public LoadIC {
  public:
-  explicit KeyedLoadIC(FrameDepth depth, Isolate* isolate)
-      : LoadIC(depth, isolate) {
+  // ExtraICState bits (building on IC)
+  class IcCheckTypeField : public BitField<IcCheckType, 1, 1> {};
+
+  static ExtraICState ComputeExtraICState(ContextualMode contextual_mode,
+                                          IcCheckType key_type) {
+    return LoadICState(contextual_mode).GetExtraICState() |
+           IcCheckTypeField::encode(key_type);
+  }
+
+  static IcCheckType GetKeyType(ExtraICState extra_state) {
+    return IcCheckTypeField::decode(extra_state);
+  }
+
+  KeyedLoadIC(FrameDepth depth, Isolate* isolate,
+              KeyedLoadICNexus* nexus = NULL)
+      : LoadIC(depth, isolate, nexus) {
+    DCHECK(!FLAG_vector_ics || nexus != NULL);
     DCHECK(target()->is_keyed_load_stub());
   }
 
@@ -463,6 +495,8 @@ class KeyedLoadIC : public LoadIC {
   static Handle<Code> generic_stub(Isolate* isolate);
   static Handle<Code> pre_monomorphic_stub(Isolate* isolate);
 
+  static void Clear(Isolate* isolate, Code* host, KeyedLoadICNexus* nexus);
+
  protected:
   // receiver is HeapObject because it could be a String or a JSObject
   Handle<Code> LoadElementStub(Handle<HeapObject> receiver);
@@ -525,7 +559,7 @@ class StoreIC : public IC {
                       JSReceiver::StoreFromKeyed store_mode);
 
  protected:
-  virtual Handle<Code> megamorphic_stub() OVERRIDE;
+  Handle<Code> megamorphic_stub() OVERRIDE;
 
   // Stub accessors.
   Handle<Code> generic_stub() const;
index ceff593..75032e1 100644 (file)
@@ -92,6 +92,26 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
 }
 
 
+void PropertyHandlerCompiler::PushVectorAndSlot(Register vector,
+                                                Register slot) {
+  MacroAssembler* masm = this->masm();
+  __ Push(vector, slot);
+}
+
+
+void PropertyHandlerCompiler::PopVectorAndSlot(Register vector, Register slot) {
+  MacroAssembler* masm = this->masm();
+  __ Pop(vector, slot);
+}
+
+
+void PropertyHandlerCompiler::DiscardVectorAndSlot() {
+  MacroAssembler* masm = this->masm();
+  // Remove vector and slot.
+  __ Addu(sp, sp, Operand(2 * kPointerSize));
+}
+
+
 void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
     MacroAssembler* masm, Label* miss_label, Register receiver,
     Handle<Name> name, Register scratch0, Register scratch1) {
@@ -138,25 +158,16 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
 
 
 void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
-    MacroAssembler* masm, int index, Register prototype, Label* miss) {
-  Isolate* isolate = masm->isolate();
-  // Get the global function with the given index.
-  Handle<JSFunction> function(
-      JSFunction::cast(isolate->native_context()->get(index)));
-
-  // Check we're still in the same context.
-  Register scratch = prototype;
+    MacroAssembler* masm, int index, Register result, Label* miss) {
   const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
-  __ lw(scratch, MemOperand(cp, offset));
-  __ lw(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
-  __ lw(scratch, MemOperand(scratch, Context::SlotOffset(index)));
-  __ li(at, function);
-  __ Branch(miss, ne, at, Operand(scratch));
-
+  __ lw(result, MemOperand(cp, offset));
+  __ lw(result, FieldMemOperand(result, GlobalObject::kNativeContextOffset));
+  __ lw(result, MemOperand(result, Context::SlotOffset(index)));
   // Load its initial map. The global functions all have initial maps.
-  __ li(prototype, Handle<Map>(function->initial_map()));
+  __ lw(result,
+        FieldMemOperand(result, JSFunction::kPrototypeOrInitialMapOffset));
   // Load the prototype from the initial map.
-  __ lw(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
+  __ lw(result, FieldMemOperand(result, Map::kPrototypeOffset));
 }
 
 
@@ -321,18 +332,38 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label,
 }
 
 
-void NamedStoreHandlerCompiler::GenerateRestoreNameAndMap(
-    Handle<Name> name, Handle<Map> transition) {
+void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
   __ li(this->name(), Operand(name));
-  __ li(StoreTransitionDescriptor::MapRegister(), Operand(transition));
 }
 
 
-void NamedStoreHandlerCompiler::GenerateConstantCheck(Object* constant,
+void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
+                                                   Register scratch,
+                                                   Label* miss) {
+  Handle<WeakCell> cell = Map::WeakCellForMap(transition);
+  Register map_reg = StoreTransitionDescriptor::MapRegister();
+  DCHECK(!map_reg.is(scratch));
+  __ LoadWeakValue(map_reg, cell, miss);
+  if (transition->CanBeDeprecated()) {
+    __ lw(scratch, FieldMemOperand(map_reg, Map::kBitField3Offset));
+    __ And(at, scratch, Operand(Map::Deprecated::kMask));
+    __ Branch(miss, ne, at, Operand(zero_reg));
+  }
+}
+
+
+void NamedStoreHandlerCompiler::GenerateConstantCheck(Register map_reg,
+                                                      int descriptor,
                                                       Register value_reg,
+                                                      Register scratch,
                                                       Label* miss_label) {
-  __ li(scratch1(), handle(constant, isolate()));
-  __ Branch(miss_label, ne, value_reg, Operand(scratch1()));
+  DCHECK(!map_reg.is(scratch));
+  DCHECK(!map_reg.is(value_reg));
+  DCHECK(!value_reg.is(scratch));
+  __ LoadInstanceDescriptors(map_reg, scratch);
+  __ lw(scratch,
+        FieldMemOperand(scratch, DescriptorArray::GetValueOffset(descriptor)));
+  __ Branch(miss_label, ne, value_reg, Operand(scratch));
 }
 
 
@@ -412,11 +443,11 @@ Register PropertyHandlerCompiler::CheckPrototypes(
       __ lw(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
     } else {
       Register map_reg = scratch1;
+      __ lw(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
       if (depth != 1 || check == CHECK_ALL_MAPS) {
-        // CheckMap implicitly loads the map of |reg| into |map_reg|.
-        __ CheckMap(reg, map_reg, current_map, miss, DONT_DO_SMI_CHECK);
-      } else {
-        __ lw(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
+        Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
+        __ GetWeakValue(scratch2, cell);
+        __ Branch(miss, ne, scratch2, Operand(map_reg));
       }
 
       // Check access rights to the global object.  This has to happen after
@@ -434,17 +465,7 @@ Register PropertyHandlerCompiler::CheckPrototypes(
 
       reg = holder_reg;  // From now on the object will be in holder_reg.
 
-      // Two possible reasons for loading the prototype from the map:
-      // (1) Can't store references to new space in code.
-      // (2) Handler is shared for all receivers with the same prototype
-      //     map (but not necessarily the same prototype instance).
-      bool load_prototype_from_map =
-          heap()->InNewSpace(*prototype) || depth == 1;
-      if (load_prototype_from_map) {
-        __ lw(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
-      } else {
-        __ li(reg, Operand(prototype));
-      }
+      __ lw(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
     }
 
     // Go to the next object in the prototype chain.
@@ -457,7 +478,10 @@ Register PropertyHandlerCompiler::CheckPrototypes(
 
   if (depth != 0 || check == CHECK_ALL_MAPS) {
     // Check the holder map.
-    __ CheckMap(reg, scratch1, current_map, miss, DONT_DO_SMI_CHECK);
+    __ lw(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+    Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
+    __ GetWeakValue(scratch2, cell);
+    __ Branch(miss, ne, scratch2, Operand(scratch1));
   }
 
   // Perform security check for access to the global object.
@@ -477,6 +501,10 @@ void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
     Label success;
     __ Branch(&success);
     __ bind(miss);
+    if (IC::ICUseVector(kind())) {
+      DCHECK(kind() == Code::LOAD_IC);
+      PopVectorAndSlot();
+    }
     TailCallBuiltin(masm(), MissBuiltin(kind()));
     __ bind(&success);
   }
@@ -578,6 +606,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
     } else {
       __ Push(holder_reg, this->name());
     }
+    InterceptorVectorSlotPush(holder_reg);
     // Invoke an interceptor.  Note: map checks from receiver to
     // interceptor's holder has been compiled before (see a caller
     // of this method).
@@ -594,6 +623,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
     __ Ret();
 
     __ bind(&interceptor_failed);
+    InterceptorVectorSlotPop(holder_reg);
     if (must_preserve_receiver_reg) {
       __ Pop(receiver(), holder_reg, this->name());
     } else {
@@ -623,7 +653,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
 Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
     Handle<JSObject> object, Handle<Name> name,
     Handle<ExecutableAccessorInfo> callback) {
-  Register holder_reg = Frontend(receiver(), name);
+  Register holder_reg = Frontend(name);
 
   __ Push(receiver(), holder_reg);  // Receiver.
   __ li(at, Operand(callback));     // Callback info.
@@ -663,12 +693,16 @@ Register NamedStoreHandlerCompiler::value() {
 Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
     Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) {
   Label miss;
+  if (IC::ICUseVector(kind())) {
+    PushVectorAndSlot();
+  }
 
   FrontendHeader(receiver(), name, &miss);
 
   // Get the value from the cell.
   Register result = StoreDescriptor::ValueRegister();
-  __ li(result, Operand(cell));
+  Handle<WeakCell> weak_cell = factory()->NewWeakCell(cell);
+  __ LoadWeakValue(result, weak_cell, &miss);
   __ lw(result, FieldMemOperand(result, Cell::kValueOffset));
 
   // Check for deleted property if property can actually be deleted.
@@ -679,6 +713,9 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
 
   Counters* counters = isolate()->counters();
   __ IncrementCounter(counters->named_load_global_stub(), 1, a1, a3);
+  if (IC::ICUseVector(kind())) {
+    DiscardVectorAndSlot();
+  }
   __ Ret(USE_DELAY_SLOT);
   __ mov(v0, result);
 
index c1e67f9..000e326 100644 (file)
@@ -24,9 +24,12 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
 
   if (check == PROPERTY &&
       (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
-    // In case we are compiling an IC for dictionary loads and stores, just
+    // In case we are compiling an IC for dictionary loads or stores, just
     // check whether the name is unique.
     if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
+      // Keyed loads with dictionaries shouldn't be here, they go generic.
+      // The DCHECK is to protect assumptions when --vector-ics is on.
+      DCHECK(kind() != Code::KEYED_LOAD_IC);
       Register tmp = scratch1();
       __ JumpIfSmi(this->name(), &miss);
       __ lw(tmp, FieldMemOperand(this->name(), HeapObject::kMapOffset));
@@ -57,13 +60,14 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
       number_of_handled_maps++;
       // Check map and tail call if there's a match.
       // Separate compare from branch, to provide path for above JumpIfSmi().
-      __ Subu(match, map_reg, Operand(map));
+      Handle<WeakCell> cell = Map::WeakCellForMap(map);
+      __ GetWeakValue(match, cell);
       if (type->Is(HeapType::Number())) {
         DCHECK(!number_case.is_unused());
         __ bind(&number_case);
       }
       __ Jump(handlers->at(current), RelocInfo::CODE_TARGET, eq, match,
-              Operand(zero_reg));
+              Operand(map_reg));
     }
   }
   DCHECK(number_of_handled_maps != 0);
@@ -85,15 +89,20 @@ Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
   __ JumpIfSmi(receiver(), &miss);
 
   int receiver_count = receiver_maps->length();
-  __ lw(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset));
+  Register map_reg = scratch1();
+  Register match = scratch2();
+  __ lw(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
   for (int i = 0; i < receiver_count; ++i) {
+    Handle<WeakCell> cell = Map::WeakCellForMap(receiver_maps->at(i));
+    __ GetWeakValue(match, cell);
     if (transitioned_maps->at(i).is_null()) {
-      __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq, scratch1(),
-              Operand(receiver_maps->at(i)));
+      __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq, match,
+              Operand(map_reg));
     } else {
       Label next_map;
-      __ Branch(&next_map, ne, scratch1(), Operand(receiver_maps->at(i)));
-      __ li(transition_map(), Operand(transitioned_maps->at(i)));
+      __ Branch(&next_map, ne, match, Operand(map_reg));
+      Handle<WeakCell> cell = Map::WeakCellForMap(transitioned_maps->at(i));
+      __ LoadWeakValue(transition_map(), cell, &miss);
       __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET);
       __ bind(&next_map);
     }
index 0984490..7c8a5ea 100644 (file)
@@ -272,18 +272,35 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
 static const Register LoadIC_TempRegister() { return a3; }
 
 
+static void LoadIC_PushArgs(MacroAssembler* masm) {
+  Register receiver = LoadDescriptor::ReceiverRegister();
+  Register name = LoadDescriptor::NameRegister();
+  if (FLAG_vector_ics) {
+    Register slot = VectorLoadICDescriptor::SlotRegister();
+    Register vector = VectorLoadICDescriptor::VectorRegister();
+
+    __ Push(receiver, name, slot, vector);
+  } else {
+    __ Push(receiver, name);
+  }
+}
+
+
 void LoadIC::GenerateMiss(MacroAssembler* masm) {
   // The return address is in ra.
   Isolate* isolate = masm->isolate();
 
-  __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a3, t0);
+  DCHECK(!FLAG_vector_ics ||
+         !AreAliased(t0, t1, VectorLoadICDescriptor::SlotRegister(),
+                     VectorLoadICDescriptor::VectorRegister()));
+  __ IncrementCounter(isolate->counters()->load_miss(), 1, t0, t1);
 
-  __ mov(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
-  __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
+  LoadIC_PushArgs(masm);
 
   // Perform tail call to the entry.
   ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
-  __ TailCallExternalReference(ref, 2, 1);
+  int arg_count = FLAG_vector_ics ? 4 : 2;
+  __ TailCallExternalReference(ref, arg_count, 1);
 }
 
 
@@ -412,15 +429,19 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
   // The return address is in ra.
   Isolate* isolate = masm->isolate();
 
-  __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a3, t0);
+  DCHECK(!FLAG_vector_ics ||
+         !AreAliased(t0, t1, VectorLoadICDescriptor::SlotRegister(),
+                     VectorLoadICDescriptor::VectorRegister()));
+  __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, t0, t1);
 
-  __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
+  LoadIC_PushArgs(masm);
 
   // Perform tail call to the entry.
   ExternalReference ref =
       ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
 
-  __ TailCallExternalReference(ref, 2, 1);
+  int arg_count = FLAG_vector_ics ? 4 : 2;
+  __ TailCallExternalReference(ref, arg_count, 1);
 }
 
 
@@ -801,8 +822,8 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
   __ JumpIfNotUniqueNameInstanceType(t0, &slow);
   Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
       Code::ComputeHandlerFlags(Code::STORE_IC));
-  masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
-                                               key, a3, t0, t1, t2);
+  masm->isolate()->stub_cache()->GenerateProbe(
+      masm, Code::STORE_IC, flags, false, receiver, key, a3, t0, t1, t2);
   // Cache miss.
   __ Branch(&miss);
 
@@ -871,8 +892,8 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
   // Get the receiver from the stack and probe the stub cache.
   Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
       Code::ComputeHandlerFlags(Code::STORE_IC));
-  masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
-                                               name, a3, t0, t1, t2);
+  masm->isolate()->stub_cache()->GenerateProbe(
+      masm, Code::STORE_IC, flags, false, receiver, name, a3, t0, t1, t2);
 
   // Cache miss: Jump to runtime.
   GenerateMiss(masm);
index e538712..fab66d8 100644 (file)
@@ -7,7 +7,9 @@
 #if V8_TARGET_ARCH_MIPS
 
 #include "src/codegen.h"
+#include "src/ic/ic.h"
 #include "src/ic/stub-cache.h"
+#include "src/interface-descriptors.h"
 
 namespace v8 {
 namespace internal {
@@ -16,7 +18,7 @@ namespace internal {
 
 
 static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
-                       Code::Flags flags, bool leave_frame,
+                       Code::Kind ic_kind, Code::Flags flags, bool leave_frame,
                        StubCache::Table table, Register receiver, Register name,
                        // Number of the cache entry, not scaled.
                        Register offset, Register scratch, Register scratch2,
@@ -90,10 +92,11 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
 }
 
 
-void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
-                              bool leave_frame, Register receiver,
-                              Register name, Register scratch, Register extra,
-                              Register extra2, Register extra3) {
+void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
+                              Code::Flags flags, bool leave_frame,
+                              Register receiver, Register name,
+                              Register scratch, Register extra, Register extra2,
+                              Register extra3) {
   Isolate* isolate = masm->isolate();
   Label miss;
 
@@ -105,15 +108,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
   DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
 
   // Make sure that there are no register conflicts.
-  DCHECK(!scratch.is(receiver));
-  DCHECK(!scratch.is(name));
-  DCHECK(!extra.is(receiver));
-  DCHECK(!extra.is(name));
-  DCHECK(!extra.is(scratch));
-  DCHECK(!extra2.is(receiver));
-  DCHECK(!extra2.is(name));
-  DCHECK(!extra2.is(scratch));
-  DCHECK(!extra2.is(extra));
+  DCHECK(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
 
   // Check register validity.
   DCHECK(!scratch.is(no_reg));
@@ -121,6 +116,17 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
   DCHECK(!extra2.is(no_reg));
   DCHECK(!extra3.is(no_reg));
 
+#ifdef DEBUG
+  // If vector-based ics are in use, ensure that scratch, extra, extra2 and
+  // extra3 don't conflict with the vector and slot registers, which need
+  // to be preserved for a handler call or miss.
+  if (IC::ICUseVector(ic_kind)) {
+    Register vector = VectorLoadICDescriptor::VectorRegister();
+    Register slot = VectorLoadICDescriptor::SlotRegister();
+    DCHECK(!AreAliased(vector, slot, scratch, extra, extra2, extra3));
+  }
+#endif
+
   Counters* counters = masm->isolate()->counters();
   __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2,
                       extra3);
@@ -140,8 +146,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
   __ And(scratch, scratch, Operand(mask));
 
   // Probe the primary table.
-  ProbeTable(isolate, masm, flags, leave_frame, kPrimary, receiver, name,
-             scratch, extra, extra2, extra3);
+  ProbeTable(isolate, masm, ic_kind, flags, leave_frame, kPrimary, receiver,
+             name, scratch, extra, extra2, extra3);
 
   // Primary miss: Compute hash for secondary probe.
   __ srl(at, name, kCacheIndexShift);
@@ -151,8 +157,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
   __ And(scratch, scratch, Operand(mask2));
 
   // Probe the secondary table.
-  ProbeTable(isolate, masm, flags, leave_frame, kSecondary, receiver, name,
-             scratch, extra, extra2, extra3);
+  ProbeTable(isolate, masm, ic_kind, flags, leave_frame, kSecondary, receiver,
+             name, scratch, extra, extra2, extra3);
 
   // Cache miss: Fall-through and let caller handle the miss by
   // entering the runtime system.
index 8251b2a..d3b861b 100644 (file)
@@ -92,6 +92,26 @@ void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
 }
 
 
+void PropertyHandlerCompiler::PushVectorAndSlot(Register vector,
+                                                Register slot) {
+  MacroAssembler* masm = this->masm();
+  __ Push(vector, slot);
+}
+
+
+void PropertyHandlerCompiler::PopVectorAndSlot(Register vector, Register slot) {
+  MacroAssembler* masm = this->masm();
+  __ Pop(vector, slot);
+}
+
+
+void PropertyHandlerCompiler::DiscardVectorAndSlot() {
+  MacroAssembler* masm = this->masm();
+  // Remove vector and slot.
+  __ Daddu(sp, sp, Operand(2 * kPointerSize));
+}
+
+
 void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
     MacroAssembler* masm, Label* miss_label, Register receiver,
     Handle<Name> name, Register scratch0, Register scratch1) {
@@ -138,25 +158,17 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
 
 
 void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
-    MacroAssembler* masm, int index, Register prototype, Label* miss) {
-  Isolate* isolate = masm->isolate();
-  // Get the global function with the given index.
-  Handle<JSFunction> function(
-      JSFunction::cast(isolate->native_context()->get(index)));
-
+    MacroAssembler* masm, int index, Register result, Label* miss) {
   // Check we're still in the same context.
-  Register scratch = prototype;
   const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
-  __ ld(scratch, MemOperand(cp, offset));
-  __ ld(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
-  __ ld(scratch, MemOperand(scratch, Context::SlotOffset(index)));
-  __ li(at, function);
-  __ Branch(miss, ne, at, Operand(scratch));
-
+  __ ld(result, MemOperand(cp, offset));
+  __ ld(result, FieldMemOperand(result, GlobalObject::kNativeContextOffset));
+  __ ld(result, MemOperand(result, Context::SlotOffset(index)));
   // Load its initial map. The global functions all have initial maps.
-  __ li(prototype, Handle<Map>(function->initial_map()));
+  __ ld(result,
+        FieldMemOperand(result, JSFunction::kPrototypeOrInitialMapOffset));
   // Load the prototype from the initial map.
-  __ ld(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
+  __ ld(result, FieldMemOperand(result, Map::kPrototypeOffset));
 }
 
 
@@ -321,18 +333,38 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label,
 }
 
 
-void NamedStoreHandlerCompiler::GenerateRestoreNameAndMap(
-    Handle<Name> name, Handle<Map> transition) {
+void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
   __ li(this->name(), Operand(name));
-  __ li(StoreTransitionDescriptor::MapRegister(), Operand(transition));
 }
 
 
-void NamedStoreHandlerCompiler::GenerateConstantCheck(Object* constant,
+void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
+                                                   Register scratch,
+                                                   Label* miss) {
+  Handle<WeakCell> cell = Map::WeakCellForMap(transition);
+  Register map_reg = StoreTransitionDescriptor::MapRegister();
+  DCHECK(!map_reg.is(scratch));
+  __ LoadWeakValue(map_reg, cell, miss);
+  if (transition->CanBeDeprecated()) {
+    __ ld(scratch, FieldMemOperand(map_reg, Map::kBitField3Offset));
+    __ And(at, scratch, Operand(Map::Deprecated::kMask));
+    __ Branch(miss, ne, at, Operand(zero_reg));
+  }
+}
+
+
+void NamedStoreHandlerCompiler::GenerateConstantCheck(Register map_reg,
+                                                      int descriptor,
                                                       Register value_reg,
+                                                      Register scratch,
                                                       Label* miss_label) {
-  __ li(scratch1(), handle(constant, isolate()));
-  __ Branch(miss_label, ne, value_reg, Operand(scratch1()));
+  DCHECK(!map_reg.is(scratch));
+  DCHECK(!map_reg.is(value_reg));
+  DCHECK(!value_reg.is(scratch));
+  __ LoadInstanceDescriptors(map_reg, scratch);
+  __ ld(scratch,
+        FieldMemOperand(scratch, DescriptorArray::GetValueOffset(descriptor)));
+  __ Branch(miss_label, ne, value_reg, Operand(scratch));
 }
 
 
@@ -411,18 +443,12 @@ Register PropertyHandlerCompiler::CheckPrototypes(
       reg = holder_reg;  // From now on the object will be in holder_reg.
       __ ld(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
     } else {
-      // Two possible reasons for loading the prototype from the map:
-      // (1) Can't store references to new space in code.
-      // (2) Handler is shared for all receivers with the same prototype
-      //     map (but not necessarily the same prototype instance).
-      bool load_prototype_from_map =
-          heap()->InNewSpace(*prototype) || depth == 1;
       Register map_reg = scratch1;
+      __ ld(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
       if (depth != 1 || check == CHECK_ALL_MAPS) {
-        // CheckMap implicitly loads the map of |reg| into |map_reg|.
-        __ CheckMap(reg, map_reg, current_map, miss, DONT_DO_SMI_CHECK);
-      } else {
-        __ ld(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
+        Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
+        __ GetWeakValue(scratch2, cell);
+        __ Branch(miss, ne, scratch2, Operand(map_reg));
       }
 
       // Check access rights to the global object.  This has to happen after
@@ -440,11 +466,7 @@ Register PropertyHandlerCompiler::CheckPrototypes(
 
       reg = holder_reg;  // From now on the object will be in holder_reg.
 
-      if (load_prototype_from_map) {
-        __ ld(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
-      } else {
-        __ li(reg, Operand(prototype));
-      }
+      __ ld(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
     }
 
     // Go to the next object in the prototype chain.
@@ -457,7 +479,10 @@ Register PropertyHandlerCompiler::CheckPrototypes(
 
   if (depth != 0 || check == CHECK_ALL_MAPS) {
     // Check the holder map.
-    __ CheckMap(reg, scratch1, current_map, miss, DONT_DO_SMI_CHECK);
+    __ ld(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+    Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
+    __ GetWeakValue(scratch2, cell);
+    __ Branch(miss, ne, scratch2, Operand(scratch1));
   }
 
   // Perform security check for access to the global object.
@@ -477,6 +502,10 @@ void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
     Label success;
     __ Branch(&success);
     __ bind(miss);
+    if (IC::ICUseVector(kind())) {
+      DCHECK(kind() == Code::LOAD_IC);
+      PopVectorAndSlot();
+    }
     TailCallBuiltin(masm(), MissBuiltin(kind()));
     __ bind(&success);
   }
@@ -578,6 +607,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
     } else {
       __ Push(holder_reg, this->name());
     }
+    InterceptorVectorSlotPush(holder_reg);
     // Invoke an interceptor.  Note: map checks from receiver to
     // interceptor's holder has been compiled before (see a caller
     // of this method).
@@ -594,6 +624,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
     __ Ret();
 
     __ bind(&interceptor_failed);
+    InterceptorVectorSlotPop(holder_reg);
     if (must_preserve_receiver_reg) {
       __ Pop(receiver(), holder_reg, this->name());
     } else {
@@ -623,7 +654,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
 Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
     Handle<JSObject> object, Handle<Name> name,
     Handle<ExecutableAccessorInfo> callback) {
-  Register holder_reg = Frontend(receiver(), name);
+  Register holder_reg = Frontend(name);
 
   __ Push(receiver(), holder_reg);  // Receiver.
   __ li(at, Operand(callback));     // Callback info.
@@ -663,12 +694,16 @@ Register NamedStoreHandlerCompiler::value() {
 Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
     Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) {
   Label miss;
+  if (IC::ICUseVector(kind())) {
+    PushVectorAndSlot();
+  }
 
   FrontendHeader(receiver(), name, &miss);
 
   // Get the value from the cell.
   Register result = StoreDescriptor::ValueRegister();
-  __ li(result, Operand(cell));
+  Handle<WeakCell> weak_cell = factory()->NewWeakCell(cell);
+  __ LoadWeakValue(result, weak_cell, &miss);
   __ ld(result, FieldMemOperand(result, Cell::kValueOffset));
 
   // Check for deleted property if property can actually be deleted.
@@ -679,6 +714,9 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
 
   Counters* counters = isolate()->counters();
   __ IncrementCounter(counters->named_load_global_stub(), 1, a1, a3);
+  if (IC::ICUseVector(kind())) {
+    DiscardVectorAndSlot();
+  }
   __ Ret(USE_DELAY_SLOT);
   __ mov(v0, result);
 
index 796ed87..1e1880f 100644 (file)
@@ -24,9 +24,12 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
 
   if (check == PROPERTY &&
       (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
-    // In case we are compiling an IC for dictionary loads and stores, just
+    // In case we are compiling an IC for dictionary loads or stores, just
     // check whether the name is unique.
     if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
+      // Keyed loads with dictionaries shouldn't be here, they go generic.
+      // The DCHECK is to protect assumptions when --vector-ics is on.
+      DCHECK(kind() != Code::KEYED_LOAD_IC);
       Register tmp = scratch1();
       __ JumpIfSmi(this->name(), &miss);
       __ ld(tmp, FieldMemOperand(this->name(), HeapObject::kMapOffset));
@@ -57,13 +60,14 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
       number_of_handled_maps++;
       // Check map and tail call if there's a match.
       // Separate compare from branch, to provide path for above JumpIfSmi().
-      __ Dsubu(match, map_reg, Operand(map));
+      Handle<WeakCell> cell = Map::WeakCellForMap(map);
+      __ GetWeakValue(match, cell);
       if (type->Is(HeapType::Number())) {
         DCHECK(!number_case.is_unused());
         __ bind(&number_case);
       }
       __ Jump(handlers->at(current), RelocInfo::CODE_TARGET, eq, match,
-              Operand(zero_reg));
+              Operand(map_reg));
     }
   }
   DCHECK(number_of_handled_maps != 0);
@@ -85,15 +89,20 @@ Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
   __ JumpIfSmi(receiver(), &miss);
 
   int receiver_count = receiver_maps->length();
-  __ ld(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset));
+  Register map_reg = scratch1();
+  Register match = scratch2();
+  __ ld(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
   for (int i = 0; i < receiver_count; ++i) {
+    Handle<WeakCell> cell = Map::WeakCellForMap(receiver_maps->at(i));
+    __ GetWeakValue(match, cell);
     if (transitioned_maps->at(i).is_null()) {
-      __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq, scratch1(),
-              Operand(receiver_maps->at(i)));
+      __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq, match,
+              Operand(map_reg));
     } else {
       Label next_map;
-      __ Branch(&next_map, ne, scratch1(), Operand(receiver_maps->at(i)));
-      __ li(transition_map(), Operand(transitioned_maps->at(i)));
+      __ Branch(&next_map, ne, match, Operand(map_reg));
+      Handle<WeakCell> cell = Map::WeakCellForMap(transitioned_maps->at(i));
+      __ LoadWeakValue(transition_map(), cell, &miss);
       __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET);
       __ bind(&next_map);
     }
index b4055b2..7ac191c 100644 (file)
@@ -270,18 +270,35 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
 static const Register LoadIC_TempRegister() { return a3; }
 
 
+static void LoadIC_PushArgs(MacroAssembler* masm) {
+  Register receiver = LoadDescriptor::ReceiverRegister();
+  Register name = LoadDescriptor::NameRegister();
+  if (FLAG_vector_ics) {
+    Register slot = VectorLoadICDescriptor::SlotRegister();
+    Register vector = VectorLoadICDescriptor::VectorRegister();
+
+    __ Push(receiver, name, slot, vector);
+  } else {
+    __ Push(receiver, name);
+  }
+}
+
+
 void LoadIC::GenerateMiss(MacroAssembler* masm) {
   // The return address is on the stack.
   Isolate* isolate = masm->isolate();
 
-  __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a3, a4);
+  DCHECK(!FLAG_vector_ics ||
+         !AreAliased(a4, a5, VectorLoadICDescriptor::SlotRegister(),
+                     VectorLoadICDescriptor::VectorRegister()));
+  __ IncrementCounter(isolate->counters()->load_miss(), 1, a4, a5);
 
-  __ mov(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
-  __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
+  LoadIC_PushArgs(masm);
 
   // Perform tail call to the entry.
   ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
-  __ TailCallExternalReference(ref, 2, 1);
+  int arg_count = FLAG_vector_ics ? 4 : 2;
+  __ TailCallExternalReference(ref, arg_count, 1);
 }
 
 
@@ -410,15 +427,19 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
   // The return address is in ra.
   Isolate* isolate = masm->isolate();
 
-  __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a3, a4);
+  DCHECK(!FLAG_vector_ics ||
+         !AreAliased(a4, a5, VectorLoadICDescriptor::SlotRegister(),
+                     VectorLoadICDescriptor::VectorRegister()));
+  __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a4, a5);
 
-  __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
+  LoadIC_PushArgs(masm);
 
   // Perform tail call to the entry.
   ExternalReference ref =
       ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
 
-  __ TailCallExternalReference(ref, 2, 1);
+  int arg_count = FLAG_vector_ics ? 4 : 2;
+  __ TailCallExternalReference(ref, arg_count, 1);
 }
 
 
@@ -810,8 +831,8 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
   __ JumpIfNotUniqueNameInstanceType(a4, &slow);
   Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
       Code::ComputeHandlerFlags(Code::STORE_IC));
-  masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
-                                               key, a3, a4, a5, a6);
+  masm->isolate()->stub_cache()->GenerateProbe(
+      masm, Code::STORE_IC, flags, false, receiver, key, a3, a4, a5, a6);
   // Cache miss.
   __ Branch(&miss);
 
@@ -880,8 +901,8 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
   // Get the receiver from the stack and probe the stub cache.
   Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
       Code::ComputeHandlerFlags(Code::STORE_IC));
-  masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
-                                               name, a3, a4, a5, a6);
+  masm->isolate()->stub_cache()->GenerateProbe(
+      masm, Code::STORE_IC, flags, false, receiver, name, a3, a4, a5, a6);
 
   // Cache miss: Jump to runtime.
   GenerateMiss(masm);
index 272e5be..04883d7 100644 (file)
@@ -7,7 +7,9 @@
 #if V8_TARGET_ARCH_MIPS64
 
 #include "src/codegen.h"
+#include "src/ic/ic.h"
 #include "src/ic/stub-cache.h"
+#include "src/interface-descriptors.h"
 
 namespace v8 {
 namespace internal {
@@ -16,7 +18,7 @@ namespace internal {
 
 
 static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
-                       Code::Flags flags, bool leave_frame,
+                       Code::Kind ic_kind, Code::Flags flags, bool leave_frame,
                        StubCache::Table table, Register receiver, Register name,
                        // Number of the cache entry, not scaled.
                        Register offset, Register scratch, Register scratch2,
@@ -90,10 +92,11 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
 }
 
 
-void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
-                              bool leave_frame, Register receiver,
-                              Register name, Register scratch, Register extra,
-                              Register extra2, Register extra3) {
+void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
+                              Code::Flags flags, bool leave_frame,
+                              Register receiver, Register name,
+                              Register scratch, Register extra, Register extra2,
+                              Register extra3) {
   Isolate* isolate = masm->isolate();
   Label miss;
 
@@ -106,15 +109,7 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
   DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
 
   // Make sure that there are no register conflicts.
-  DCHECK(!scratch.is(receiver));
-  DCHECK(!scratch.is(name));
-  DCHECK(!extra.is(receiver));
-  DCHECK(!extra.is(name));
-  DCHECK(!extra.is(scratch));
-  DCHECK(!extra2.is(receiver));
-  DCHECK(!extra2.is(name));
-  DCHECK(!extra2.is(scratch));
-  DCHECK(!extra2.is(extra));
+  DCHECK(!AreAliased(receiver, name, scratch, extra, extra2, extra3));
 
   // Check register validity.
   DCHECK(!scratch.is(no_reg));
@@ -122,6 +117,17 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
   DCHECK(!extra2.is(no_reg));
   DCHECK(!extra3.is(no_reg));
 
+#ifdef DEBUG
+  // If vector-based ics are in use, ensure that scratch, extra, extra2 and
+  // extra3 don't conflict with the vector and slot registers, which need
+  // to be preserved for a handler call or miss.
+  if (IC::ICUseVector(ic_kind)) {
+    Register vector = VectorLoadICDescriptor::VectorRegister();
+    Register slot = VectorLoadICDescriptor::SlotRegister();
+    DCHECK(!AreAliased(vector, slot, scratch, extra, extra2, extra3));
+  }
+#endif
+
   Counters* counters = masm->isolate()->counters();
   __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2,
                       extra3);
@@ -141,8 +147,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
   __ And(scratch, scratch, Operand(mask));
 
   // Probe the primary table.
-  ProbeTable(isolate, masm, flags, leave_frame, kPrimary, receiver, name,
-             scratch, extra, extra2, extra3);
+  ProbeTable(isolate, masm, ic_kind, flags, leave_frame, kPrimary, receiver,
+             name, scratch, extra, extra2, extra3);
 
   // Primary miss: Compute hash for secondary probe.
   __ dsrl(at, name, kCacheIndexShift);
@@ -152,8 +158,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
   __ And(scratch, scratch, Operand(mask2));
 
   // Probe the secondary table.
-  ProbeTable(isolate, masm, flags, leave_frame, kSecondary, receiver, name,
-             scratch, extra, extra2, extra3);
+  ProbeTable(isolate, masm, ic_kind, flags, leave_frame, kSecondary, receiver,
+             name, scratch, extra, extra2, extra3);
 
   // Cache miss: Fall-through and let caller handle the miss by
   // entering the runtime system.
diff --git a/deps/v8/src/ic/ppc/access-compiler-ppc.cc b/deps/v8/src/ic/ppc/access-compiler-ppc.cc
new file mode 100644 (file)
index 0000000..e98f517
--- /dev/null
@@ -0,0 +1,46 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_PPC
+
+#include "src/ic/access-compiler.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+
+void PropertyAccessCompiler::GenerateTailCall(MacroAssembler* masm,
+                                              Handle<Code> code) {
+  __ Jump(code, RelocInfo::CODE_TARGET);
+}
+
+
+Register* PropertyAccessCompiler::load_calling_convention() {
+  // receiver, name, scratch1, scratch2, scratch3, scratch4.
+  Register receiver = LoadDescriptor::ReceiverRegister();
+  Register name = LoadDescriptor::NameRegister();
+  static Register registers[] = {receiver, name, r6, r3, r7, r8};
+  return registers;
+}
+
+
+Register* PropertyAccessCompiler::store_calling_convention() {
+  // receiver, name, scratch1, scratch2, scratch3.
+  Register receiver = StoreDescriptor::ReceiverRegister();
+  Register name = StoreDescriptor::NameRegister();
+  DCHECK(r6.is(ElementTransitionAndStoreDescriptor::MapRegister()));
+  static Register registers[] = {receiver, name, r6, r7, r8};
+  return registers;
+}
+
+
+#undef __
+}
+}  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_PPC
diff --git a/deps/v8/src/ic/ppc/handler-compiler-ppc.cc b/deps/v8/src/ic/ppc/handler-compiler-ppc.cc
new file mode 100644 (file)
index 0000000..2f29c83
--- /dev/null
@@ -0,0 +1,698 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_PPC
+
+#include "src/ic/call-optimization.h"
+#include "src/ic/handler-compiler.h"
+#include "src/ic/ic.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+
+void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
+    MacroAssembler* masm, Handle<HeapType> type, Register receiver,
+    Handle<JSFunction> getter) {
+  // ----------- S t a t e -------------
+  //  -- r3    : receiver
+  //  -- r5    : name
+  //  -- lr    : return address
+  // -----------------------------------
+  {
+    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+
+    if (!getter.is_null()) {
+      // Call the JavaScript getter with the receiver on the stack.
+      if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+        // Swap in the global receiver.
+        __ LoadP(receiver,
+                 FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
+      }
+      __ push(receiver);
+      ParameterCount actual(0);
+      ParameterCount expected(getter);
+      __ InvokeFunction(getter, expected, actual, CALL_FUNCTION,
+                        NullCallWrapper());
+    } else {
+      // If we generate a global code snippet for deoptimization only, remember
+      // the place to continue after deoptimization.
+      masm->isolate()->heap()->SetGetterStubDeoptPCOffset(masm->pc_offset());
+    }
+
+    // Restore context register.
+    __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  }
+  __ Ret();
+}
+
+
+void NamedStoreHandlerCompiler::GenerateStoreViaSetter(
+    MacroAssembler* masm, Handle<HeapType> type, Register receiver,
+    Handle<JSFunction> setter) {
+  // ----------- S t a t e -------------
+  //  -- lr    : return address
+  // -----------------------------------
+  {
+    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+
+    // Save value register, so we can restore it later.
+    __ push(value());
+
+    if (!setter.is_null()) {
+      // Call the JavaScript setter with receiver and value on the stack.
+      if (IC::TypeToMap(*type, masm->isolate())->IsJSGlobalObjectMap()) {
+        // Swap in the global receiver.
+        __ LoadP(receiver,
+                 FieldMemOperand(receiver, JSGlobalObject::kGlobalProxyOffset));
+      }
+      __ Push(receiver, value());
+      ParameterCount actual(1);
+      ParameterCount expected(setter);
+      __ InvokeFunction(setter, expected, actual, CALL_FUNCTION,
+                        NullCallWrapper());
+    } else {
+      // If we generate a global code snippet for deoptimization only, remember
+      // the place to continue after deoptimization.
+      masm->isolate()->heap()->SetSetterStubDeoptPCOffset(masm->pc_offset());
+    }
+
+    // We have to return the passed value, not the return value of the setter.
+    __ pop(r3);
+
+    // Restore context register.
+    __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  }
+  __ Ret();
+}
+
+
+void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
+    MacroAssembler* masm, Label* miss_label, Register receiver,
+    Handle<Name> name, Register scratch0, Register scratch1) {
+  DCHECK(name->IsUniqueName());
+  DCHECK(!receiver.is(scratch0));
+  Counters* counters = masm->isolate()->counters();
+  __ IncrementCounter(counters->negative_lookups(), 1, scratch0, scratch1);
+  __ IncrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
+
+  Label done;
+
+  const int kInterceptorOrAccessCheckNeededMask =
+      (1 << Map::kHasNamedInterceptor) | (1 << Map::kIsAccessCheckNeeded);
+
+  // Bail out if the receiver has a named interceptor or requires access checks.
+  Register map = scratch1;
+  __ LoadP(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ lbz(scratch0, FieldMemOperand(map, Map::kBitFieldOffset));
+  __ andi(r0, scratch0, Operand(kInterceptorOrAccessCheckNeededMask));
+  __ bne(miss_label, cr0);
+
+  // Check that receiver is a JSObject.
+  __ lbz(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset));
+  __ cmpi(scratch0, Operand(FIRST_SPEC_OBJECT_TYPE));
+  __ blt(miss_label);
+
+  // Load properties array.
+  Register properties = scratch0;
+  __ LoadP(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+  // Check that the properties array is a dictionary.
+  __ LoadP(map, FieldMemOperand(properties, HeapObject::kMapOffset));
+  Register tmp = properties;
+  __ LoadRoot(tmp, Heap::kHashTableMapRootIndex);
+  __ cmp(map, tmp);
+  __ bne(miss_label);
+
+  // Restore the temporarily used register.
+  __ LoadP(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+
+
+  NameDictionaryLookupStub::GenerateNegativeLookup(
+      masm, miss_label, &done, receiver, properties, name, scratch1);
+  __ bind(&done);
+  __ DecrementCounter(counters->negative_lookups_miss(), 1, scratch0, scratch1);
+}
+
+
+void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
+    MacroAssembler* masm, int index, Register prototype, Label* miss) {
+  Isolate* isolate = masm->isolate();
+  // Get the global function with the given index.
+  Handle<JSFunction> function(
+      JSFunction::cast(isolate->native_context()->get(index)));
+
+  // Check we're still in the same context.
+  Register scratch = prototype;
+  const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
+  __ LoadP(scratch, MemOperand(cp, offset));
+  __ LoadP(scratch,
+           FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
+  __ LoadP(scratch, MemOperand(scratch, Context::SlotOffset(index)));
+  __ Move(ip, function);
+  __ cmp(ip, scratch);
+  __ bne(miss);
+
+  // Load its initial map. The global functions all have initial maps.
+  __ Move(prototype, Handle<Map>(function->initial_map()));
+  // Load the prototype from the initial map.
+  __ LoadP(prototype, FieldMemOperand(prototype, Map::kPrototypeOffset));
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
+    MacroAssembler* masm, Register receiver, Register scratch1,
+    Register scratch2, Label* miss_label) {
+  __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
+  __ mr(r3, scratch1);
+  __ Ret();
+}
+
+
+// Generate code to check that a global property cell is empty. Create
+// the property cell at compilation time if no cell exists for the
+// property.
+void PropertyHandlerCompiler::GenerateCheckPropertyCell(
+    MacroAssembler* masm, Handle<JSGlobalObject> global, Handle<Name> name,
+    Register scratch, Label* miss) {
+  Handle<Cell> cell = JSGlobalObject::EnsurePropertyCell(global, name);
+  DCHECK(cell->value()->IsTheHole());
+  __ mov(scratch, Operand(cell));
+  __ LoadP(scratch, FieldMemOperand(scratch, Cell::kValueOffset));
+  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+  __ cmp(scratch, ip);
+  __ bne(miss);
+}
+
+
+static void PushInterceptorArguments(MacroAssembler* masm, Register receiver,
+                                     Register holder, Register name,
+                                     Handle<JSObject> holder_obj) {
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsNameIndex == 0);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsInfoIndex == 1);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsThisIndex == 2);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsHolderIndex == 3);
+  STATIC_ASSERT(NamedLoadHandlerCompiler::kInterceptorArgsLength == 4);
+  __ push(name);
+  Handle<InterceptorInfo> interceptor(holder_obj->GetNamedInterceptor());
+  DCHECK(!masm->isolate()->heap()->InNewSpace(*interceptor));
+  Register scratch = name;
+  __ mov(scratch, Operand(interceptor));
+  __ push(scratch);
+  __ push(receiver);
+  __ push(holder);
+}
+
+
+static void CompileCallLoadPropertyWithInterceptor(
+    MacroAssembler* masm, Register receiver, Register holder, Register name,
+    Handle<JSObject> holder_obj, IC::UtilityId id) {
+  PushInterceptorArguments(masm, receiver, holder, name, holder_obj);
+  __ CallExternalReference(ExternalReference(IC_Utility(id), masm->isolate()),
+                           NamedLoadHandlerCompiler::kInterceptorArgsLength);
+}
+
+
+// Generate call to api function.
+void PropertyHandlerCompiler::GenerateFastApiCall(
+    MacroAssembler* masm, const CallOptimization& optimization,
+    Handle<Map> receiver_map, Register receiver, Register scratch_in,
+    bool is_store, int argc, Register* values) {
+  DCHECK(!receiver.is(scratch_in));
+  __ push(receiver);
+  // Write the arguments to stack frame.
+  for (int i = 0; i < argc; i++) {
+    Register arg = values[argc - 1 - i];
+    DCHECK(!receiver.is(arg));
+    DCHECK(!scratch_in.is(arg));
+    __ push(arg);
+  }
+  DCHECK(optimization.is_simple_api_call());
+
+  // Abi for CallApiFunctionStub.
+  Register callee = r3;
+  Register call_data = r7;
+  Register holder = r5;
+  Register api_function_address = r4;
+
+  // Put holder in place.
+  CallOptimization::HolderLookup holder_lookup;
+  Handle<JSObject> api_holder =
+      optimization.LookupHolderOfExpectedType(receiver_map, &holder_lookup);
+  switch (holder_lookup) {
+    case CallOptimization::kHolderIsReceiver:
+      __ Move(holder, receiver);
+      break;
+    case CallOptimization::kHolderFound:
+      __ Move(holder, api_holder);
+      break;
+    case CallOptimization::kHolderNotFound:
+      UNREACHABLE();
+      break;
+  }
+
+  Isolate* isolate = masm->isolate();
+  Handle<JSFunction> function = optimization.constant_function();
+  Handle<CallHandlerInfo> api_call_info = optimization.api_call_info();
+  Handle<Object> call_data_obj(api_call_info->data(), isolate);
+
+  // Put callee in place.
+  __ Move(callee, function);
+
+  bool call_data_undefined = false;
+  // Put call_data in place.
+  if (isolate->heap()->InNewSpace(*call_data_obj)) {
+    __ Move(call_data, api_call_info);
+    __ LoadP(call_data,
+             FieldMemOperand(call_data, CallHandlerInfo::kDataOffset));
+  } else if (call_data_obj->IsUndefined()) {
+    call_data_undefined = true;
+    __ LoadRoot(call_data, Heap::kUndefinedValueRootIndex);
+  } else {
+    __ Move(call_data, call_data_obj);
+  }
+
+  // Put api_function_address in place.
+  Address function_address = v8::ToCData<Address>(api_call_info->callback());
+  ApiFunction fun(function_address);
+  ExternalReference::Type type = ExternalReference::DIRECT_API_CALL;
+  ExternalReference ref = ExternalReference(&fun, type, masm->isolate());
+  __ mov(api_function_address, Operand(ref));
+
+  // Jump to stub.
+  CallApiFunctionStub stub(isolate, is_store, call_data_undefined, argc);
+  __ TailCallStub(&stub);
+}
+
+
+void NamedStoreHandlerCompiler::GenerateSlow(MacroAssembler* masm) {
+  // Push receiver, key and value for runtime call.
+  __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+          StoreDescriptor::ValueRegister());
+
+  // The slow case calls into the runtime to complete the store without causing
+  // an IC miss that would otherwise cause a transition to the generic stub.
+  ExternalReference ref =
+      ExternalReference(IC_Utility(IC::kStoreIC_Slow), masm->isolate());
+  __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void ElementHandlerCompiler::GenerateStoreSlow(MacroAssembler* masm) {
+  // Push receiver, key and value for runtime call.
+  __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+          StoreDescriptor::ValueRegister());
+
+  // The slow case calls into the runtime to complete the store without causing
+  // an IC miss that would otherwise cause a transition to the generic stub.
+  ExternalReference ref =
+      ExternalReference(IC_Utility(IC::kKeyedStoreIC_Slow), masm->isolate());
+  __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label,
+                                                    Handle<Name> name) {
+  if (!label->is_unused()) {
+    __ bind(label);
+    __ mov(this->name(), Operand(name));
+  }
+}
+
+
+void NamedStoreHandlerCompiler::GenerateRestoreNameAndMap(
+    Handle<Name> name, Handle<Map> transition) {
+  __ mov(this->name(), Operand(name));
+  __ mov(StoreTransitionDescriptor::MapRegister(), Operand(transition));
+}
+
+
+void NamedStoreHandlerCompiler::GenerateConstantCheck(Object* constant,
+                                                      Register value_reg,
+                                                      Label* miss_label) {
+  __ Move(scratch1(), handle(constant, isolate()));
+  __ cmp(value_reg, scratch1());
+  __ bne(miss_label);
+}
+
+
+void NamedStoreHandlerCompiler::GenerateFieldTypeChecks(HeapType* field_type,
+                                                        Register value_reg,
+                                                        Label* miss_label) {
+  __ JumpIfSmi(value_reg, miss_label);
+  HeapType::Iterator<Map> it = field_type->Classes();
+  if (!it.Done()) {
+    __ LoadP(scratch1(), FieldMemOperand(value_reg, HeapObject::kMapOffset));
+    Label do_store;
+    while (true) {
+      __ CompareMap(scratch1(), it.Current(), &do_store);
+      it.Advance();
+      if (it.Done()) {
+        __ bne(miss_label);
+        break;
+      }
+      __ beq(&do_store);
+    }
+    __ bind(&do_store);
+  }
+}
+
+
+Register PropertyHandlerCompiler::CheckPrototypes(
+    Register object_reg, Register holder_reg, Register scratch1,
+    Register scratch2, Handle<Name> name, Label* miss,
+    PrototypeCheckType check) {
+  Handle<Map> receiver_map(IC::TypeToMap(*type(), isolate()));
+
+  // Make sure there's no overlap between holder and object registers.
+  DCHECK(!scratch1.is(object_reg) && !scratch1.is(holder_reg));
+  DCHECK(!scratch2.is(object_reg) && !scratch2.is(holder_reg) &&
+         !scratch2.is(scratch1));
+
+  // Keep track of the current object in register reg.
+  Register reg = object_reg;
+  int depth = 0;
+
+  Handle<JSObject> current = Handle<JSObject>::null();
+  if (type()->IsConstant()) {
+    current = Handle<JSObject>::cast(type()->AsConstant()->Value());
+  }
+  Handle<JSObject> prototype = Handle<JSObject>::null();
+  Handle<Map> current_map = receiver_map;
+  Handle<Map> holder_map(holder()->map());
+  // Traverse the prototype chain and check the maps in the prototype chain for
+  // fast and global objects or do negative lookup for normal objects.
+  while (!current_map.is_identical_to(holder_map)) {
+    ++depth;
+
+    // Only global objects and objects that do not require access
+    // checks are allowed in stubs.
+    DCHECK(current_map->IsJSGlobalProxyMap() ||
+           !current_map->is_access_check_needed());
+
+    prototype = handle(JSObject::cast(current_map->prototype()));
+    if (current_map->is_dictionary_map() &&
+        !current_map->IsJSGlobalObjectMap()) {
+      DCHECK(!current_map->IsJSGlobalProxyMap());  // Proxy maps are fast.
+      if (!name->IsUniqueName()) {
+        DCHECK(name->IsString());
+        name = factory()->InternalizeString(Handle<String>::cast(name));
+      }
+      DCHECK(current.is_null() ||
+             current->property_dictionary()->FindEntry(name) ==
+                 NameDictionary::kNotFound);
+
+      GenerateDictionaryNegativeLookup(masm(), miss, reg, name, scratch1,
+                                       scratch2);
+
+      __ LoadP(scratch1, FieldMemOperand(reg, HeapObject::kMapOffset));
+      reg = holder_reg;  // From now on the object will be in holder_reg.
+      __ LoadP(reg, FieldMemOperand(scratch1, Map::kPrototypeOffset));
+    } else {
+      Register map_reg = scratch1;
+      if (depth != 1 || check == CHECK_ALL_MAPS) {
+        // CheckMap implicitly loads the map of |reg| into |map_reg|.
+        __ CheckMap(reg, map_reg, current_map, miss, DONT_DO_SMI_CHECK);
+      } else {
+        __ LoadP(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
+      }
+
+      // Check access rights to the global object.  This has to happen after
+      // the map check so that we know that the object is actually a global
+      // object.
+      // This allows us to install generated handlers for accesses to the
+      // global proxy (as opposed to using slow ICs). See corresponding code
+      // in LookupForRead().
+      if (current_map->IsJSGlobalProxyMap()) {
+        __ CheckAccessGlobalProxy(reg, scratch2, miss);
+      } else if (current_map->IsJSGlobalObjectMap()) {
+        GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
+                                  name, scratch2, miss);
+      }
+
+      reg = holder_reg;  // From now on the object will be in holder_reg.
+
+      // Two possible reasons for loading the prototype from the map:
+      // (1) Can't store references to new space in code.
+      // (2) Handler is shared for all receivers with the same prototype
+      //     map (but not necessarily the same prototype instance).
+      bool load_prototype_from_map =
+          heap()->InNewSpace(*prototype) || depth == 1;
+      if (load_prototype_from_map) {
+        __ LoadP(reg, FieldMemOperand(map_reg, Map::kPrototypeOffset));
+      } else {
+        __ mov(reg, Operand(prototype));
+      }
+    }
+
+    // Go to the next object in the prototype chain.
+    current = prototype;
+    current_map = handle(current->map());
+  }
+
+  // Log the check depth.
+  LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
+
+  if (depth != 0 || check == CHECK_ALL_MAPS) {
+    // Check the holder map.
+    __ CheckMap(reg, scratch1, current_map, miss, DONT_DO_SMI_CHECK);
+  }
+
+  // Perform security check for access to the global object.
+  DCHECK(current_map->IsJSGlobalProxyMap() ||
+         !current_map->is_access_check_needed());
+  if (current_map->IsJSGlobalProxyMap()) {
+    __ CheckAccessGlobalProxy(reg, scratch1, miss);
+  }
+
+  // Return the register containing the holder.
+  return reg;
+}
+
+
+void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
+  if (!miss->is_unused()) {
+    Label success;
+    __ b(&success);
+    __ bind(miss);
+    TailCallBuiltin(masm(), MissBuiltin(kind()));
+    __ bind(&success);
+  }
+}
+
+
+void NamedStoreHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
+  if (!miss->is_unused()) {
+    Label success;
+    __ b(&success);
+    GenerateRestoreName(miss, name);
+    TailCallBuiltin(masm(), MissBuiltin(kind()));
+    __ bind(&success);
+  }
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadConstant(Handle<Object> value) {
+  // Return the constant value.
+  __ Move(r3, value);
+  __ Ret();
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadCallback(
+    Register reg, Handle<ExecutableAccessorInfo> callback) {
+  // Build AccessorInfo::args_ list on the stack and push property name below
+  // the exit frame to make GC aware of them and store pointers to them.
+  STATIC_ASSERT(PropertyCallbackArguments::kHolderIndex == 0);
+  STATIC_ASSERT(PropertyCallbackArguments::kIsolateIndex == 1);
+  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueDefaultValueIndex == 2);
+  STATIC_ASSERT(PropertyCallbackArguments::kReturnValueOffset == 3);
+  STATIC_ASSERT(PropertyCallbackArguments::kDataIndex == 4);
+  STATIC_ASSERT(PropertyCallbackArguments::kThisIndex == 5);
+  STATIC_ASSERT(PropertyCallbackArguments::kArgsLength == 6);
+  DCHECK(!scratch2().is(reg));
+  DCHECK(!scratch3().is(reg));
+  DCHECK(!scratch4().is(reg));
+  __ push(receiver());
+  if (heap()->InNewSpace(callback->data())) {
+    __ Move(scratch3(), callback);
+    __ LoadP(scratch3(),
+             FieldMemOperand(scratch3(), ExecutableAccessorInfo::kDataOffset));
+  } else {
+    __ Move(scratch3(), Handle<Object>(callback->data(), isolate()));
+  }
+  __ push(scratch3());
+  __ LoadRoot(scratch3(), Heap::kUndefinedValueRootIndex);
+  __ mr(scratch4(), scratch3());
+  __ Push(scratch3(), scratch4());
+  __ mov(scratch4(), Operand(ExternalReference::isolate_address(isolate())));
+  __ Push(scratch4(), reg);
+  __ push(name());
+
+  // Abi for CallApiGetter
+  Register getter_address_reg = ApiGetterDescriptor::function_address();
+
+  Address getter_address = v8::ToCData<Address>(callback->getter());
+  ApiFunction fun(getter_address);
+  ExternalReference::Type type = ExternalReference::DIRECT_GETTER_CALL;
+  ExternalReference ref = ExternalReference(&fun, type, isolate());
+  __ mov(getter_address_reg, Operand(ref));
+
+  CallApiGetterStub stub(isolate());
+  __ TailCallStub(&stub);
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
+    LookupIterator* it, Register holder_reg) {
+  DCHECK(holder()->HasNamedInterceptor());
+  DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+
+  // Compile the interceptor call, followed by inline code to load the
+  // property from further up the prototype chain if the call fails.
+  // Check that the maps haven't changed.
+  DCHECK(holder_reg.is(receiver()) || holder_reg.is(scratch1()));
+
+  // Preserve the receiver register explicitly whenever it is different from the
+  // holder and it is needed should the interceptor return without any result.
+  // The ACCESSOR case needs the receiver to be passed into C++ code, the FIELD
+  // case might cause a miss during the prototype check.
+  bool must_perform_prototype_check =
+      !holder().is_identical_to(it->GetHolder<JSObject>());
+  bool must_preserve_receiver_reg =
+      !receiver().is(holder_reg) &&
+      (it->state() == LookupIterator::ACCESSOR || must_perform_prototype_check);
+
+  // Save necessary data before invoking an interceptor.
+  // Requires a frame to make GC aware of pushed pointers.
+  {
+    FrameAndConstantPoolScope frame_scope(masm(), StackFrame::INTERNAL);
+    if (must_preserve_receiver_reg) {
+      __ Push(receiver(), holder_reg, this->name());
+    } else {
+      __ Push(holder_reg, this->name());
+    }
+    // Invoke an interceptor.  Note: map checks from receiver to
+    // interceptor's holder has been compiled before (see a caller
+    // of this method.)
+    CompileCallLoadPropertyWithInterceptor(
+        masm(), receiver(), holder_reg, this->name(), holder(),
+        IC::kLoadPropertyWithInterceptorOnly);
+
+    // Check if interceptor provided a value for property.  If it's
+    // the case, return immediately.
+    Label interceptor_failed;
+    __ LoadRoot(scratch1(), Heap::kNoInterceptorResultSentinelRootIndex);
+    __ cmp(r3, scratch1());
+    __ beq(&interceptor_failed);
+    frame_scope.GenerateLeaveFrame();
+    __ Ret();
+
+    __ bind(&interceptor_failed);
+    __ pop(this->name());
+    __ pop(holder_reg);
+    if (must_preserve_receiver_reg) {
+      __ pop(receiver());
+    }
+    // Leave the internal frame.
+  }
+
+  GenerateLoadPostInterceptor(it, holder_reg);
+}
+
+
+void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
+  // Call the runtime system to load the interceptor.
+  DCHECK(holder()->HasNamedInterceptor());
+  DCHECK(!holder()->GetNamedInterceptor()->getter()->IsUndefined());
+  PushInterceptorArguments(masm(), receiver(), holder_reg, this->name(),
+                           holder());
+
+  ExternalReference ref = ExternalReference(
+      IC_Utility(IC::kLoadPropertyWithInterceptor), isolate());
+  __ TailCallExternalReference(
+      ref, NamedLoadHandlerCompiler::kInterceptorArgsLength, 1);
+}
+
+
+Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
+    Handle<JSObject> object, Handle<Name> name,
+    Handle<ExecutableAccessorInfo> callback) {
+  Register holder_reg = Frontend(receiver(), name);
+
+  __ Push(receiver(), holder_reg);  // receiver
+  __ mov(ip, Operand(callback));    // callback info
+  __ push(ip);
+  __ mov(ip, Operand(name));
+  __ Push(ip, value());
+
+  // Do tail-call to the runtime system.
+  ExternalReference store_callback_property =
+      ExternalReference(IC_Utility(IC::kStoreCallbackProperty), isolate());
+  __ TailCallExternalReference(store_callback_property, 5, 1);
+
+  // Return the generated code.
+  return GetCode(kind(), Code::FAST, name);
+}
+
+
+Handle<Code> NamedStoreHandlerCompiler::CompileStoreInterceptor(
+    Handle<Name> name) {
+  __ Push(receiver(), this->name(), value());
+
+  // Do tail-call to the runtime system.
+  ExternalReference store_ic_property = ExternalReference(
+      IC_Utility(IC::kStorePropertyWithInterceptor), isolate());
+  __ TailCallExternalReference(store_ic_property, 3, 1);
+
+  // Return the generated code.
+  return GetCode(kind(), Code::FAST, name);
+}
+
+
+Register NamedStoreHandlerCompiler::value() {
+  return StoreDescriptor::ValueRegister();
+}
+
+
+Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
+    Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) {
+  Label miss;
+  FrontendHeader(receiver(), name, &miss);
+
+  // Get the value from the cell.
+  Register result = StoreDescriptor::ValueRegister();
+  __ mov(result, Operand(cell));
+  __ LoadP(result, FieldMemOperand(result, Cell::kValueOffset));
+
+  // Check for deleted property if property can actually be deleted.
+  if (is_configurable) {
+    __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+    __ cmp(result, ip);
+    __ beq(&miss);
+  }
+
+  Counters* counters = isolate()->counters();
+  __ IncrementCounter(counters->named_load_global_stub(), 1, r4, r6);
+  __ Ret();
+
+  FrontendFooter(name, &miss);
+
+  // Return the generated code.
+  return GetCode(kind(), Code::NORMAL, name);
+}
+
+
+#undef __
+}
+}  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_ARM
diff --git a/deps/v8/src/ic/ppc/ic-compiler-ppc.cc b/deps/v8/src/ic/ppc/ic-compiler-ppc.cc
new file mode 100644 (file)
index 0000000..c868456
--- /dev/null
@@ -0,0 +1,130 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_PPC
+
+#include "src/ic/ic.h"
+#include "src/ic/ic-compiler.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+
+void PropertyICCompiler::GenerateRuntimeSetProperty(MacroAssembler* masm,
+                                                    StrictMode strict_mode) {
+  __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+          StoreDescriptor::ValueRegister());
+
+  __ mov(r0, Operand(Smi::FromInt(strict_mode)));
+  __ Push(r0);
+
+  // Do tail-call to runtime routine.
+  __ TailCallRuntime(Runtime::kSetProperty, 4, 1);
+}
+
+
+#undef __
+#define __ ACCESS_MASM(masm())
+
+
+Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
+                                                    CodeHandleList* handlers,
+                                                    Handle<Name> name,
+                                                    Code::StubType type,
+                                                    IcCheckType check) {
+  Label miss;
+
+  if (check == PROPERTY &&
+      (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
+    // In case we are compiling an IC for dictionary loads and stores, just
+    // check whether the name is unique.
+    if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
+      Register tmp = scratch1();
+      __ JumpIfSmi(this->name(), &miss);
+      __ LoadP(tmp, FieldMemOperand(this->name(), HeapObject::kMapOffset));
+      __ lbz(tmp, FieldMemOperand(tmp, Map::kInstanceTypeOffset));
+      __ JumpIfNotUniqueNameInstanceType(tmp, &miss);
+    } else {
+      __ Cmpi(this->name(), Operand(name), r0);
+      __ bne(&miss);
+    }
+  }
+
+  Label number_case;
+  Label* smi_target = IncludesNumberType(types) ? &number_case : &miss;
+  __ JumpIfSmi(receiver(), smi_target);
+
+  // Polymorphic keyed stores may use the map register
+  Register map_reg = scratch1();
+  DCHECK(kind() != Code::KEYED_STORE_IC ||
+         map_reg.is(ElementTransitionAndStoreDescriptor::MapRegister()));
+
+  int receiver_count = types->length();
+  int number_of_handled_maps = 0;
+  __ LoadP(map_reg, FieldMemOperand(receiver(), HeapObject::kMapOffset));
+  for (int current = 0; current < receiver_count; ++current) {
+    Handle<HeapType> type = types->at(current);
+    Handle<Map> map = IC::TypeToMap(*type, isolate());
+    if (!map->is_deprecated()) {
+      number_of_handled_maps++;
+      __ mov(ip, Operand(map));
+      __ cmp(map_reg, ip);
+      if (type->Is(HeapType::Number())) {
+        DCHECK(!number_case.is_unused());
+        __ bind(&number_case);
+      }
+      __ Jump(handlers->at(current), RelocInfo::CODE_TARGET, eq);
+    }
+  }
+  DCHECK(number_of_handled_maps != 0);
+
+  __ bind(&miss);
+  TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+  // Return the generated code.
+  InlineCacheState state =
+      number_of_handled_maps > 1 ? POLYMORPHIC : MONOMORPHIC;
+  return GetCode(kind(), type, name, state);
+}
+
+
+Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
+    MapHandleList* receiver_maps, CodeHandleList* handler_stubs,
+    MapHandleList* transitioned_maps) {
+  Label miss;
+  __ JumpIfSmi(receiver(), &miss);
+
+  int receiver_count = receiver_maps->length();
+  __ LoadP(scratch1(), FieldMemOperand(receiver(), HeapObject::kMapOffset));
+  for (int i = 0; i < receiver_count; ++i) {
+    __ mov(ip, Operand(receiver_maps->at(i)));
+    __ cmp(scratch1(), ip);
+    if (transitioned_maps->at(i).is_null()) {
+      __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq);
+    } else {
+      Label next_map;
+      __ bne(&next_map);
+      __ mov(transition_map(), Operand(transitioned_maps->at(i)));
+      __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, al);
+      __ bind(&next_map);
+    }
+  }
+
+  __ bind(&miss);
+  TailCallBuiltin(masm(), MissBuiltin(kind()));
+
+  // Return the generated code.
+  return GetCode(kind(), Code::NORMAL, factory()->empty_string(), POLYMORPHIC);
+}
+
+
+#undef __
+}
+}  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_PPC
diff --git a/deps/v8/src/ic/ppc/ic-ppc.cc b/deps/v8/src/ic/ppc/ic-ppc.cc
new file mode 100644 (file)
index 0000000..97c3e2f
--- /dev/null
@@ -0,0 +1,1047 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_PPC
+
+#include "src/codegen.h"
+#include "src/ic/ic.h"
+#include "src/ic/ic-compiler.h"
+#include "src/ic/stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+
+// ----------------------------------------------------------------------------
+// Static IC stub generators.
+//
+
+#define __ ACCESS_MASM(masm)
+
+
+static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type,
+                                            Label* global_object) {
+  // Register usage:
+  //   type: holds the receiver instance type on entry.
+  __ cmpi(type, Operand(JS_GLOBAL_OBJECT_TYPE));
+  __ beq(global_object);
+  __ cmpi(type, Operand(JS_BUILTINS_OBJECT_TYPE));
+  __ beq(global_object);
+  __ cmpi(type, Operand(JS_GLOBAL_PROXY_TYPE));
+  __ beq(global_object);
+}
+
+
+// Helper function used from LoadIC GenerateNormal.
+//
+// elements: Property dictionary. It is not clobbered if a jump to the miss
+//           label is done.
+// name:     Property name. It is not clobbered if a jump to the miss label is
+//           done
+// result:   Register for the result. It is only updated if a jump to the miss
+//           label is not done. Can be the same as elements or name clobbering
+//           one of these in the case of not jumping to the miss label.
+// The two scratch registers need to be different from elements, name and
+// result.
+// The generated code assumes that the receiver has slow properties,
+// is not a global object and does not have interceptors.
+static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss,
+                                   Register elements, Register name,
+                                   Register result, Register scratch1,
+                                   Register scratch2) {
+  // Main use of the scratch registers.
+  // scratch1: Used as temporary and to hold the capacity of the property
+  //           dictionary.
+  // scratch2: Used as temporary.
+  Label done;
+
+  // Probe the dictionary.
+  NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
+                                                   name, scratch1, scratch2);
+
+  // If probing finds an entry check that the value is a normal
+  // property.
+  __ bind(&done);  // scratch2 == elements + 4 * index
+  const int kElementsStartOffset =
+      NameDictionary::kHeaderSize +
+      NameDictionary::kElementsStartIndex * kPointerSize;
+  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+  __ LoadP(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
+  __ mr(r0, scratch2);
+  __ LoadSmiLiteral(scratch2, Smi::FromInt(PropertyDetails::TypeField::kMask));
+  __ and_(scratch2, scratch1, scratch2, SetRC);
+  __ bne(miss, cr0);
+  __ mr(scratch2, r0);
+
+  // Get the value at the masked, scaled index and return.
+  __ LoadP(result,
+           FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize));
+}
+
+
+// Helper function used from StoreIC::GenerateNormal.
+//
+// elements: Property dictionary. It is not clobbered if a jump to the miss
+//           label is done.
+// name:     Property name. It is not clobbered if a jump to the miss label is
+//           done
+// value:    The value to store.
+// The two scratch registers need to be different from elements, name and
+// result.
+// The generated code assumes that the receiver has slow properties,
+// is not a global object and does not have interceptors.
+static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss,
+                                    Register elements, Register name,
+                                    Register value, Register scratch1,
+                                    Register scratch2) {
+  // Main use of the scratch registers.
+  // scratch1: Used as temporary and to hold the capacity of the property
+  //           dictionary.
+  // scratch2: Used as temporary.
+  Label done;
+
+  // Probe the dictionary.
+  NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements,
+                                                   name, scratch1, scratch2);
+
+  // If probing finds an entry in the dictionary check that the value
+  // is a normal property that is not read only.
+  __ bind(&done);  // scratch2 == elements + 4 * index
+  const int kElementsStartOffset =
+      NameDictionary::kHeaderSize +
+      NameDictionary::kElementsStartIndex * kPointerSize;
+  const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
+  int kTypeAndReadOnlyMask =
+      PropertyDetails::TypeField::kMask |
+      PropertyDetails::AttributesField::encode(READ_ONLY);
+  __ LoadP(scratch1, FieldMemOperand(scratch2, kDetailsOffset));
+  __ mr(r0, scratch2);
+  __ LoadSmiLiteral(scratch2, Smi::FromInt(kTypeAndReadOnlyMask));
+  __ and_(scratch2, scratch1, scratch2, SetRC);
+  __ bne(miss, cr0);
+  __ mr(scratch2, r0);
+
+  // Store the value at the masked, scaled index and return.
+  const int kValueOffset = kElementsStartOffset + kPointerSize;
+  __ addi(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag));
+  __ StoreP(value, MemOperand(scratch2));
+
+  // Update the write barrier. Make sure not to clobber the value.
+  __ mr(scratch1, value);
+  __ RecordWrite(elements, scratch2, scratch1, kLRHasNotBeenSaved,
+                 kDontSaveFPRegs);
+}
+
+
+// Checks the receiver for special cases (value type, slow case bits).
+// Falls through for regular JS object.
+static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm,
+                                           Register receiver, Register map,
+                                           Register scratch,
+                                           int interceptor_bit, Label* slow) {
+  // Check that the object isn't a smi.
+  __ JumpIfSmi(receiver, slow);
+  // Get the map of the receiver.
+  __ LoadP(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  // Check bit field.
+  __ lbz(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
+  DCHECK(((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)) < 0x8000);
+  __ andi(r0, scratch,
+          Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)));
+  __ bne(slow, cr0);
+  // Check that the object is some kind of JS object EXCEPT JS Value type.
+  // In the case that the object is a value-wrapper object,
+  // we enter the runtime system to make sure that indexing into string
+  // objects work as intended.
+  DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE);
+  __ lbz(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
+  __ cmpi(scratch, Operand(JS_OBJECT_TYPE));
+  __ blt(slow);
+}
+
+
+// Loads an indexed element from a fast case array.
+// If not_fast_array is NULL, doesn't perform the elements map check.
+static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver,
+                                  Register key, Register elements,
+                                  Register scratch1, Register scratch2,
+                                  Register result, Label* not_fast_array,
+                                  Label* out_of_range) {
+  // Register use:
+  //
+  // receiver - holds the receiver on entry.
+  //            Unchanged unless 'result' is the same register.
+  //
+  // key      - holds the smi key on entry.
+  //            Unchanged unless 'result' is the same register.
+  //
+  // elements - holds the elements of the receiver on exit.
+  //
+  // result   - holds the result on exit if the load succeeded.
+  //            Allowed to be the the same as 'receiver' or 'key'.
+  //            Unchanged on bailout so 'receiver' and 'key' can be safely
+  //            used by further computation.
+  //
+  // Scratch registers:
+  //
+  // scratch1 - used to hold elements map and elements length.
+  //            Holds the elements map if not_fast_array branch is taken.
+  //
+  // scratch2 - used to hold the loaded value.
+
+  __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  if (not_fast_array != NULL) {
+    // Check that the object is in fast mode and writable.
+    __ LoadP(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset));
+    __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
+    __ cmp(scratch1, ip);
+    __ bne(not_fast_array);
+  } else {
+    __ AssertFastElements(elements);
+  }
+  // Check that the key (index) is within bounds.
+  __ LoadP(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset));
+  __ cmpl(key, scratch1);
+  __ bge(out_of_range);
+  // Fast case: Do the load.
+  __ addi(scratch1, elements,
+          Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  // The key is a smi.
+  __ SmiToPtrArrayOffset(scratch2, key);
+  __ LoadPX(scratch2, MemOperand(scratch2, scratch1));
+  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+  __ cmp(scratch2, ip);
+  // In case the loaded value is the_hole we have to consult GetProperty
+  // to ensure the prototype chain is searched.
+  __ beq(out_of_range);
+  __ mr(result, scratch2);
+}
+
+
+// Checks whether a key is an array index string or a unique name.
+// Falls through if a key is a unique name.
+static void GenerateKeyNameCheck(MacroAssembler* masm, Register key,
+                                 Register map, Register hash,
+                                 Label* index_string, Label* not_unique) {
+  // The key is not a smi.
+  Label unique;
+  // Is it a name?
+  __ CompareObjectType(key, map, hash, LAST_UNIQUE_NAME_TYPE);
+  __ bgt(not_unique);
+  STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE);
+  __ beq(&unique);
+
+  // Is the string an array index, with cached numeric value?
+  __ lwz(hash, FieldMemOperand(key, Name::kHashFieldOffset));
+  __ mov(r8, Operand(Name::kContainsCachedArrayIndexMask));
+  __ and_(r0, hash, r8, SetRC);
+  __ beq(index_string, cr0);
+
+  // Is the string internalized? We know it's a string, so a single
+  // bit test is enough.
+  // map: key map
+  __ lbz(hash, FieldMemOperand(map, Map::kInstanceTypeOffset));
+  STATIC_ASSERT(kInternalizedTag == 0);
+  __ andi(r0, hash, Operand(kIsNotInternalizedMask));
+  __ bne(not_unique, cr0);
+
+  __ bind(&unique);
+}
+
+
+void LoadIC::GenerateNormal(MacroAssembler* masm) {
+  Register dictionary = r3;
+  DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister()));
+  DCHECK(!dictionary.is(LoadDescriptor::NameRegister()));
+
+  Label slow;
+
+  __ LoadP(dictionary, FieldMemOperand(LoadDescriptor::ReceiverRegister(),
+                                       JSObject::kPropertiesOffset));
+  GenerateDictionaryLoad(masm, &slow, dictionary,
+                         LoadDescriptor::NameRegister(), r3, r6, r7);
+  __ Ret();
+
+  // Dictionary load failed, go slow (but don't miss).
+  __ bind(&slow);
+  GenerateRuntimeGetProperty(masm);
+}
+
+
+// A register that isn't one of the parameters to the load ic.
+static const Register LoadIC_TempRegister() { return r6; }
+
+
+void LoadIC::GenerateMiss(MacroAssembler* masm) {
+  // The return address is in lr.
+  Isolate* isolate = masm->isolate();
+
+  __ IncrementCounter(isolate->counters()->load_miss(), 1, r6, r7);
+
+  __ mr(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
+  __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
+
+  // Perform tail call to the entry.
+  ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate);
+  __ TailCallExternalReference(ref, 2, 1);
+}
+
+
+void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+  // The return address is in lr.
+
+  __ mr(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister());
+  __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister());
+
+  __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
+}
+
+
+static MemOperand GenerateMappedArgumentsLookup(
+    MacroAssembler* masm, Register object, Register key, Register scratch1,
+    Register scratch2, Register scratch3, Label* unmapped_case,
+    Label* slow_case) {
+  Heap* heap = masm->isolate()->heap();
+
+  // Check that the receiver is a JSObject. Because of the map check
+  // later, we do not need to check for interceptors or whether it
+  // requires access checks.
+  __ JumpIfSmi(object, slow_case);
+  // Check that the object is some kind of JSObject.
+  __ CompareObjectType(object, scratch1, scratch2, FIRST_JS_RECEIVER_TYPE);
+  __ blt(slow_case);
+
+  // Check that the key is a positive smi.
+  __ mov(scratch1, Operand(0x80000001));
+  __ and_(r0, key, scratch1, SetRC);
+  __ bne(slow_case, cr0);
+
+  // Load the elements into scratch1 and check its map.
+  Handle<Map> arguments_map(heap->sloppy_arguments_elements_map());
+  __ LoadP(scratch1, FieldMemOperand(object, JSObject::kElementsOffset));
+  __ CheckMap(scratch1, scratch2, arguments_map, slow_case, DONT_DO_SMI_CHECK);
+
+  // Check if element is in the range of mapped arguments. If not, jump
+  // to the unmapped lookup with the parameter map in scratch1.
+  __ LoadP(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset));
+  __ SubSmiLiteral(scratch2, scratch2, Smi::FromInt(2), r0);
+  __ cmpl(key, scratch2);
+  __ bge(unmapped_case);
+
+  // Load element index and check whether it is the hole.
+  const int kOffset =
+      FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag;
+
+  __ SmiToPtrArrayOffset(scratch3, key);
+  __ addi(scratch3, scratch3, Operand(kOffset));
+
+  __ LoadPX(scratch2, MemOperand(scratch1, scratch3));
+  __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex);
+  __ cmp(scratch2, scratch3);
+  __ beq(unmapped_case);
+
+  // Load value from context and return it. We can reuse scratch1 because
+  // we do not jump to the unmapped lookup (which requires the parameter
+  // map in scratch1).
+  __ LoadP(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
+  __ SmiToPtrArrayOffset(scratch3, scratch2);
+  __ addi(scratch3, scratch3, Operand(Context::kHeaderSize - kHeapObjectTag));
+  return MemOperand(scratch1, scratch3);
+}
+
+
+static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm,
+                                                  Register key,
+                                                  Register parameter_map,
+                                                  Register scratch,
+                                                  Label* slow_case) {
+  // Element is in arguments backing store, which is referenced by the
+  // second element of the parameter_map. The parameter_map register
+  // must be loaded with the parameter map of the arguments object and is
+  // overwritten.
+  const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize;
+  Register backing_store = parameter_map;
+  __ LoadP(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset));
+  Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map());
+  __ CheckMap(backing_store, scratch, fixed_array_map, slow_case,
+              DONT_DO_SMI_CHECK);
+  __ LoadP(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset));
+  __ cmpl(key, scratch);
+  __ bge(slow_case);
+  __ SmiToPtrArrayOffset(scratch, key);
+  __ addi(scratch, scratch, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  return MemOperand(backing_store, scratch);
+}
+
+
+void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) {
+  Register receiver = StoreDescriptor::ReceiverRegister();
+  Register key = StoreDescriptor::NameRegister();
+  Register value = StoreDescriptor::ValueRegister();
+  DCHECK(receiver.is(r4));
+  DCHECK(key.is(r5));
+  DCHECK(value.is(r3));
+
+  Label slow, notin;
+  MemOperand mapped_location = GenerateMappedArgumentsLookup(
+      masm, receiver, key, r6, r7, r8, &notin, &slow);
+  Register mapped_base = mapped_location.ra();
+  Register mapped_offset = mapped_location.rb();
+  __ StorePX(value, mapped_location);
+  __ add(r9, mapped_base, mapped_offset);
+  __ mr(r11, value);
+  __ RecordWrite(mapped_base, r9, r11, kLRHasNotBeenSaved, kDontSaveFPRegs);
+  __ Ret();
+  __ bind(&notin);
+  // The unmapped lookup expects that the parameter map is in r6.
+  MemOperand unmapped_location =
+      GenerateUnmappedArgumentsLookup(masm, key, r6, r7, &slow);
+  Register unmapped_base = unmapped_location.ra();
+  Register unmapped_offset = unmapped_location.rb();
+  __ StorePX(value, unmapped_location);
+  __ add(r9, unmapped_base, unmapped_offset);
+  __ mr(r11, value);
+  __ RecordWrite(unmapped_base, r9, r11, kLRHasNotBeenSaved, kDontSaveFPRegs);
+  __ Ret();
+  __ bind(&slow);
+  GenerateMiss(masm);
+}
+
+
+void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
+  // The return address is in lr.
+  Isolate* isolate = masm->isolate();
+
+  __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, r6, r7);
+
+  __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
+
+  // Perform tail call to the entry.
+  ExternalReference ref =
+      ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate);
+
+  __ TailCallExternalReference(ref, 2, 1);
+}
+
+
+void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
+  // The return address is in lr.
+
+  __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
+
+  __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
+}
+
+
+void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
+  // The return address is in lr.
+  Label slow, check_name, index_smi, index_name, property_array_property;
+  Label probe_dictionary, check_number_dictionary;
+
+  Register key = LoadDescriptor::NameRegister();
+  Register receiver = LoadDescriptor::ReceiverRegister();
+  DCHECK(key.is(r5));
+  DCHECK(receiver.is(r4));
+
+  Isolate* isolate = masm->isolate();
+
+  // Check that the key is a smi.
+  __ JumpIfNotSmi(key, &check_name);
+  __ bind(&index_smi);
+  // Now the key is known to be a smi. This place is also jumped to from below
+  // where a numeric string is converted to a smi.
+
+  GenerateKeyedLoadReceiverCheck(masm, receiver, r3, r6,
+                                 Map::kHasIndexedInterceptor, &slow);
+
+  // Check the receiver's map to see if it has fast elements.
+  __ CheckFastElements(r3, r6, &check_number_dictionary);
+
+  GenerateFastArrayLoad(masm, receiver, key, r3, r6, r7, r3, NULL, &slow);
+  __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, r7, r6);
+  __ Ret();
+
+  __ bind(&check_number_dictionary);
+  __ LoadP(r7, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  __ LoadP(r6, FieldMemOperand(r7, JSObject::kMapOffset));
+
+  // Check whether the elements is a number dictionary.
+  // r6: elements map
+  // r7: elements
+  __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
+  __ cmp(r6, ip);
+  __ bne(&slow);
+  __ SmiUntag(r3, key);
+  __ LoadFromNumberDictionary(&slow, r7, key, r3, r3, r6, r8);
+  __ Ret();
+
+  // Slow case, key and receiver still in r3 and r4.
+  __ bind(&slow);
+  __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(), 1, r7,
+                      r6);
+  GenerateRuntimeGetProperty(masm);
+
+  __ bind(&check_name);
+  GenerateKeyNameCheck(masm, key, r3, r6, &index_name, &slow);
+
+  GenerateKeyedLoadReceiverCheck(masm, receiver, r3, r6,
+                                 Map::kHasNamedInterceptor, &slow);
+
+  // If the receiver is a fast-case object, check the keyed lookup
+  // cache. Otherwise probe the dictionary.
+  __ LoadP(r6, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+  __ LoadP(r7, FieldMemOperand(r6, HeapObject::kMapOffset));
+  __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
+  __ cmp(r7, ip);
+  __ beq(&probe_dictionary);
+
+  // Load the map of the receiver, compute the keyed lookup cache hash
+  // based on 32 bits of the map pointer and the name hash.
+  __ LoadP(r3, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ srawi(r6, r3, KeyedLookupCache::kMapHashShift);
+  __ lwz(r7, FieldMemOperand(key, Name::kHashFieldOffset));
+  __ srawi(r7, r7, Name::kHashShift);
+  __ xor_(r6, r6, r7);
+  int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask;
+  __ mov(r7, Operand(mask));
+  __ and_(r6, r6, r7, LeaveRC);
+
+  // Load the key (consisting of map and unique name) from the cache and
+  // check for match.
+  Label load_in_object_property;
+  static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket;
+  Label hit_on_nth_entry[kEntriesPerBucket];
+  ExternalReference cache_keys =
+      ExternalReference::keyed_lookup_cache_keys(isolate);
+
+  __ mov(r7, Operand(cache_keys));
+  __ mr(r0, r5);
+  __ ShiftLeftImm(r5, r6, Operand(kPointerSizeLog2 + 1));
+  __ add(r7, r7, r5);
+  __ mr(r5, r0);
+
+  for (int i = 0; i < kEntriesPerBucket - 1; i++) {
+    Label try_next_entry;
+    // Load map and move r7 to next entry.
+    __ LoadP(r8, MemOperand(r7));
+    __ addi(r7, r7, Operand(kPointerSize * 2));
+    __ cmp(r3, r8);
+    __ bne(&try_next_entry);
+    __ LoadP(r8, MemOperand(r7, -kPointerSize));  // Load name
+    __ cmp(key, r8);
+    __ beq(&hit_on_nth_entry[i]);
+    __ bind(&try_next_entry);
+  }
+
+  // Last entry: Load map and move r7 to name.
+  __ LoadP(r8, MemOperand(r7));
+  __ addi(r7, r7, Operand(kPointerSize));
+  __ cmp(r3, r8);
+  __ bne(&slow);
+  __ LoadP(r8, MemOperand(r7));
+  __ cmp(key, r8);
+  __ bne(&slow);
+
+  // Get field offset.
+  // r3     : receiver's map
+  // r6     : lookup cache index
+  ExternalReference cache_field_offsets =
+      ExternalReference::keyed_lookup_cache_field_offsets(isolate);
+
+  // Hit on nth entry.
+  for (int i = kEntriesPerBucket - 1; i >= 0; i--) {
+    __ bind(&hit_on_nth_entry[i]);
+    __ mov(r7, Operand(cache_field_offsets));
+    if (i != 0) {
+      __ addi(r6, r6, Operand(i));
+    }
+    __ ShiftLeftImm(r8, r6, Operand(2));
+    __ lwzx(r8, MemOperand(r8, r7));
+    __ lbz(r9, FieldMemOperand(r3, Map::kInObjectPropertiesOffset));
+    __ sub(r8, r8, r9);
+    __ cmpi(r8, Operand::Zero());
+    __ bge(&property_array_property);
+    if (i != 0) {
+      __ b(&load_in_object_property);
+    }
+  }
+
+  // Load in-object property.
+  __ bind(&load_in_object_property);
+  __ lbz(r9, FieldMemOperand(r3, Map::kInstanceSizeOffset));
+  __ add(r9, r9, r8);  // Index from start of object.
+  __ subi(receiver, receiver, Operand(kHeapObjectTag));  // Remove the heap tag.
+  __ ShiftLeftImm(r3, r9, Operand(kPointerSizeLog2));
+  __ LoadPX(r3, MemOperand(r3, receiver));
+  __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1,
+                      r7, r6);
+  __ Ret();
+
+  // Load property array property.
+  __ bind(&property_array_property);
+  __ LoadP(receiver, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+  __ addi(receiver, receiver,
+          Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ ShiftLeftImm(r3, r8, Operand(kPointerSizeLog2));
+  __ LoadPX(r3, MemOperand(r3, receiver));
+  __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1,
+                      r7, r6);
+  __ Ret();
+
+  // Do a quick inline probe of the receiver's dictionary, if it
+  // exists.
+  __ bind(&probe_dictionary);
+  // r6: elements
+  __ LoadP(r3, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ lbz(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
+  GenerateGlobalInstanceTypeCheck(masm, r3, &slow);
+  // Load the property to r3.
+  GenerateDictionaryLoad(masm, &slow, r6, key, r3, r8, r7);
+  __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(), 1, r7,
+                      r6);
+  __ Ret();
+
+  __ bind(&index_name);
+  __ IndexFromHash(r6, key);
+  // Now jump to the place where smi keys are handled.
+  __ b(&index_smi);
+}
+
+
+void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
+  // Push receiver, key and value for runtime call.
+  __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+          StoreDescriptor::ValueRegister());
+
+  ExternalReference ref =
+      ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate());
+  __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+static void KeyedStoreGenerateMegamorphicHelper(
+    MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
+    KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
+    Register value, Register key, Register receiver, Register receiver_map,
+    Register elements_map, Register elements) {
+  Label transition_smi_elements;
+  Label finish_object_store, non_double_value, transition_double_elements;
+  Label fast_double_without_map_check;
+
+  // Fast case: Do the store, could be either Object or double.
+  __ bind(fast_object);
+  Register scratch_value = r7;
+  Register address = r8;
+  if (check_map == kCheckMap) {
+    __ LoadP(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
+    __ mov(scratch_value,
+           Operand(masm->isolate()->factory()->fixed_array_map()));
+    __ cmp(elements_map, scratch_value);
+    __ bne(fast_double);
+  }
+
+  // HOLECHECK: guards "A[i] = V"
+  // We have to go to the runtime if the current value is the hole because
+  // there may be a callback on the element
+  Label holecheck_passed1;
+  __ addi(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ SmiToPtrArrayOffset(scratch_value, key);
+  __ LoadPX(scratch_value, MemOperand(address, scratch_value));
+  __ Cmpi(scratch_value, Operand(masm->isolate()->factory()->the_hole_value()),
+          r0);
+  __ bne(&holecheck_passed1);
+  __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
+                                      slow);
+
+  __ bind(&holecheck_passed1);
+
+  // Smi stores don't require further checks.
+  Label non_smi_value;
+  __ JumpIfNotSmi(value, &non_smi_value);
+
+  if (increment_length == kIncrementLength) {
+    // Add 1 to receiver->length.
+    __ AddSmiLiteral(scratch_value, key, Smi::FromInt(1), r0);
+    __ StoreP(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset),
+              r0);
+  }
+  // It's irrelevant whether array is smi-only or not when writing a smi.
+  __ addi(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ SmiToPtrArrayOffset(scratch_value, key);
+  __ StorePX(value, MemOperand(address, scratch_value));
+  __ Ret();
+
+  __ bind(&non_smi_value);
+  // Escape to elements kind transition case.
+  __ CheckFastObjectElements(receiver_map, scratch_value,
+                             &transition_smi_elements);
+
+  // Fast elements array, store the value to the elements backing store.
+  __ bind(&finish_object_store);
+  if (increment_length == kIncrementLength) {
+    // Add 1 to receiver->length.
+    __ AddSmiLiteral(scratch_value, key, Smi::FromInt(1), r0);
+    __ StoreP(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset),
+              r0);
+  }
+  __ addi(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ SmiToPtrArrayOffset(scratch_value, key);
+  __ StorePUX(value, MemOperand(address, scratch_value));
+  // Update write barrier for the elements array address.
+  __ mr(scratch_value, value);  // Preserve the value which is returned.
+  __ RecordWrite(elements, address, scratch_value, kLRHasNotBeenSaved,
+                 kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+  __ Ret();
+
+  __ bind(fast_double);
+  if (check_map == kCheckMap) {
+    // Check for fast double array case. If this fails, call through to the
+    // runtime.
+    __ CompareRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex);
+    __ bne(slow);
+  }
+
+  // HOLECHECK: guards "A[i] double hole?"
+  // We have to see if the double version of the hole is present. If so
+  // go to the runtime.
+  __ addi(address, elements,
+          Operand((FixedDoubleArray::kHeaderSize + Register::kExponentOffset -
+                   kHeapObjectTag)));
+  __ SmiToDoubleArrayOffset(scratch_value, key);
+  __ lwzx(scratch_value, MemOperand(address, scratch_value));
+  __ Cmpi(scratch_value, Operand(kHoleNanUpper32), r0);
+  __ bne(&fast_double_without_map_check);
+  __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value,
+                                      slow);
+
+  __ bind(&fast_double_without_map_check);
+  __ StoreNumberToDoubleElements(value, key, elements, r6, d0,
+                                 &transition_double_elements);
+  if (increment_length == kIncrementLength) {
+    // Add 1 to receiver->length.
+    __ AddSmiLiteral(scratch_value, key, Smi::FromInt(1), r0);
+    __ StoreP(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset),
+              r0);
+  }
+  __ Ret();
+
+  __ bind(&transition_smi_elements);
+  // Transition the array appropriately depending on the value type.
+  __ LoadP(r7, FieldMemOperand(value, HeapObject::kMapOffset));
+  __ CompareRoot(r7, Heap::kHeapNumberMapRootIndex);
+  __ bne(&non_double_value);
+
+  // Value is a double. Transition FAST_SMI_ELEMENTS ->
+  // FAST_DOUBLE_ELEMENTS and complete the store.
+  __ LoadTransitionedArrayMapConditional(
+      FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, r7, slow);
+  AllocationSiteMode mode =
+      AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
+  ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
+                                                   receiver_map, mode, slow);
+  __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  __ b(&fast_double_without_map_check);
+
+  __ bind(&non_double_value);
+  // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
+  __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
+                                         receiver_map, r7, slow);
+  mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
+  ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
+      masm, receiver, key, value, receiver_map, mode, slow);
+  __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  __ b(&finish_object_store);
+
+  __ bind(&transition_double_elements);
+  // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
+  // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
+  // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
+  __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
+                                         receiver_map, r7, slow);
+  mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
+  ElementsTransitionGenerator::GenerateDoubleToObject(
+      masm, receiver, key, value, receiver_map, mode, slow);
+  __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  __ b(&finish_object_store);
+}
+
+
+void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
+                                       StrictMode strict_mode) {
+  // ---------- S t a t e --------------
+  //  -- r3     : value
+  //  -- r4     : key
+  //  -- r5     : receiver
+  //  -- lr     : return address
+  // -----------------------------------
+  Label slow, fast_object, fast_object_grow;
+  Label fast_double, fast_double_grow;
+  Label array, extra, check_if_double_array, maybe_name_key, miss;
+
+  // Register usage.
+  Register value = StoreDescriptor::ValueRegister();
+  Register key = StoreDescriptor::NameRegister();
+  Register receiver = StoreDescriptor::ReceiverRegister();
+  DCHECK(receiver.is(r4));
+  DCHECK(key.is(r5));
+  DCHECK(value.is(r3));
+  Register receiver_map = r6;
+  Register elements_map = r9;
+  Register elements = r10;  // Elements array of the receiver.
+  // r7 and r8 are used as general scratch registers.
+
+  // Check that the key is a smi.
+  __ JumpIfNotSmi(key, &maybe_name_key);
+  // Check that the object isn't a smi.
+  __ JumpIfSmi(receiver, &slow);
+  // Get the map of the object.
+  __ LoadP(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  // Check that the receiver does not require access checks and is not observed.
+  // The generic stub does not perform map checks or handle observed objects.
+  __ lbz(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
+  __ andi(r0, ip,
+          Operand(1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved));
+  __ bne(&slow, cr0);
+  // Check if the object is a JS array or not.
+  __ lbz(r7, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
+  __ cmpi(r7, Operand(JS_ARRAY_TYPE));
+  __ beq(&array);
+  // Check that the object is some kind of JSObject.
+  __ cmpi(r7, Operand(FIRST_JS_OBJECT_TYPE));
+  __ blt(&slow);
+
+  // Object case: Check key against length in the elements array.
+  __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  // Check array bounds. Both the key and the length of FixedArray are smis.
+  __ LoadP(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
+  __ cmpl(key, ip);
+  __ blt(&fast_object);
+
+  // Slow case, handle jump to runtime.
+  __ bind(&slow);
+  // Entry registers are intact.
+  // r3: value.
+  // r4: key.
+  // r5: receiver.
+  PropertyICCompiler::GenerateRuntimeSetProperty(masm, strict_mode);
+  // Never returns to here.
+
+  __ bind(&maybe_name_key);
+  __ LoadP(r7, FieldMemOperand(key, HeapObject::kMapOffset));
+  __ lbz(r7, FieldMemOperand(r7, Map::kInstanceTypeOffset));
+  __ JumpIfNotUniqueNameInstanceType(r7, &slow);
+  Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
+      Code::ComputeHandlerFlags(Code::STORE_IC));
+  masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
+                                               key, r6, r7, r8, r9);
+  // Cache miss.
+  __ b(&miss);
+
+  // Extra capacity case: Check if there is extra capacity to
+  // perform the store and update the length. Used for adding one
+  // element to the array by writing to array[array.length].
+  __ bind(&extra);
+  // Condition code from comparing key and array length is still available.
+  __ bne(&slow);  // Only support writing to writing to array[array.length].
+  // Check for room in the elements backing store.
+  // Both the key and the length of FixedArray are smis.
+  __ LoadP(ip, FieldMemOperand(elements, FixedArray::kLengthOffset));
+  __ cmpl(key, ip);
+  __ bge(&slow);
+  __ LoadP(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
+  __ mov(ip, Operand(masm->isolate()->factory()->fixed_array_map()));
+  __ cmp(elements_map, ip);  // PPC - I think I can re-use ip here
+  __ bne(&check_if_double_array);
+  __ b(&fast_object_grow);
+
+  __ bind(&check_if_double_array);
+  __ mov(ip, Operand(masm->isolate()->factory()->fixed_double_array_map()));
+  __ cmp(elements_map, ip);  // PPC - another ip re-use
+  __ bne(&slow);
+  __ b(&fast_double_grow);
+
+  // Array case: Get the length and the elements array from the JS
+  // array. Check that the array is in fast mode (and writable); if it
+  // is the length is always a smi.
+  __ bind(&array);
+  __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+
+  // Check the key against the length in the array.
+  __ LoadP(ip, FieldMemOperand(receiver, JSArray::kLengthOffset));
+  __ cmpl(key, ip);
+  __ bge(&extra);
+
+  KeyedStoreGenerateMegamorphicHelper(
+      masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
+      value, key, receiver, receiver_map, elements_map, elements);
+  KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow,
+                                      &fast_double_grow, &slow, kDontCheckMap,
+                                      kIncrementLength, value, key, receiver,
+                                      receiver_map, elements_map, elements);
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+
+void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
+  Register receiver = StoreDescriptor::ReceiverRegister();
+  Register name = StoreDescriptor::NameRegister();
+  DCHECK(receiver.is(r4));
+  DCHECK(name.is(r5));
+  DCHECK(StoreDescriptor::ValueRegister().is(r3));
+
+  // Get the receiver from the stack and probe the stub cache.
+  Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
+      Code::ComputeHandlerFlags(Code::STORE_IC));
+
+  masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
+                                               name, r6, r7, r8, r9);
+
+  // Cache miss: Jump to runtime.
+  GenerateMiss(masm);
+}
+
+
+void StoreIC::GenerateMiss(MacroAssembler* masm) {
+  __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(),
+          StoreDescriptor::ValueRegister());
+
+  // Perform tail call to the entry.
+  ExternalReference ref =
+      ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate());
+  __ TailCallExternalReference(ref, 3, 1);
+}
+
+
+void StoreIC::GenerateNormal(MacroAssembler* masm) {
+  Label miss;
+  Register receiver = StoreDescriptor::ReceiverRegister();
+  Register name = StoreDescriptor::NameRegister();
+  Register value = StoreDescriptor::ValueRegister();
+  Register dictionary = r6;
+  DCHECK(receiver.is(r4));
+  DCHECK(name.is(r5));
+  DCHECK(value.is(r3));
+
+  __ LoadP(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+
+  GenerateDictionaryStore(masm, &miss, dictionary, name, value, r7, r8);
+  Counters* counters = masm->isolate()->counters();
+  __ IncrementCounter(counters->store_normal_hit(), 1, r7, r8);
+  __ Ret();
+
+  __ bind(&miss);
+  __ IncrementCounter(counters->store_normal_miss(), 1, r7, r8);
+  GenerateMiss(masm);
+}
+
+
+#undef __
+
+
+Condition CompareIC::ComputeCondition(Token::Value op) {
+  switch (op) {
+    case Token::EQ_STRICT:
+    case Token::EQ:
+      return eq;
+    case Token::LT:
+      return lt;
+    case Token::GT:
+      return gt;
+    case Token::LTE:
+      return le;
+    case Token::GTE:
+      return ge;
+    default:
+      UNREACHABLE();
+      return kNoCondition;
+  }
+}
+
+
+bool CompareIC::HasInlinedSmiCode(Address address) {
+  // The address of the instruction following the call.
+  Address cmp_instruction_address =
+      Assembler::return_address_from_call_start(address);
+
+  // If the instruction following the call is not a cmp rx, #yyy, nothing
+  // was inlined.
+  Instr instr = Assembler::instr_at(cmp_instruction_address);
+  return Assembler::IsCmpImmediate(instr);
+}
+
+
+//
+// This code is paired with the JumpPatchSite class in full-codegen-ppc.cc
+//
+void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) {
+  Address cmp_instruction_address =
+      Assembler::return_address_from_call_start(address);
+
+  // If the instruction following the call is not a cmp rx, #yyy, nothing
+  // was inlined.
+  Instr instr = Assembler::instr_at(cmp_instruction_address);
+  if (!Assembler::IsCmpImmediate(instr)) {
+    return;
+  }
+
+  // The delta to the start of the map check instruction and the
+  // condition code uses at the patched jump.
+  int delta = Assembler::GetCmpImmediateRawImmediate(instr);
+  delta += Assembler::GetCmpImmediateRegister(instr).code() * kOff16Mask;
+  // If the delta is 0 the instruction is cmp r0, #0 which also signals that
+  // nothing was inlined.
+  if (delta == 0) {
+    return;
+  }
+
+  if (FLAG_trace_ic) {
+    PrintF("[  patching ic at %p, cmp=%p, delta=%d\n", address,
+           cmp_instruction_address, delta);
+  }
+
+  Address patch_address =
+      cmp_instruction_address - delta * Instruction::kInstrSize;
+  Instr instr_at_patch = Assembler::instr_at(patch_address);
+  Instr branch_instr =
+      Assembler::instr_at(patch_address + Instruction::kInstrSize);
+  // This is patching a conditional "jump if not smi/jump if smi" site.
+  // Enabling by changing from
+  //   cmp cr0, rx, rx
+  // to
+  //  rlwinm(r0, value, 0, 31, 31, SetRC);
+  //  bc(label, BT/BF, 2)
+  // and vice-versa to be disabled again.
+  CodePatcher patcher(patch_address, 2);
+  Register reg = Assembler::GetRA(instr_at_patch);
+  if (check == ENABLE_INLINED_SMI_CHECK) {
+    DCHECK(Assembler::IsCmpRegister(instr_at_patch));
+    DCHECK_EQ(Assembler::GetRA(instr_at_patch).code(),
+              Assembler::GetRB(instr_at_patch).code());
+    patcher.masm()->TestIfSmi(reg, r0);
+  } else {
+    DCHECK(check == DISABLE_INLINED_SMI_CHECK);
+#if V8_TARGET_ARCH_PPC64
+    DCHECK(Assembler::IsRldicl(instr_at_patch));
+#else
+    DCHECK(Assembler::IsRlwinm(instr_at_patch));
+#endif
+    patcher.masm()->cmp(reg, reg, cr0);
+  }
+  DCHECK(Assembler::IsBranch(branch_instr));
+
+  // Invert the logic of the branch
+  if (Assembler::GetCondition(branch_instr) == eq) {
+    patcher.EmitCondition(ne);
+  } else {
+    DCHECK(Assembler::GetCondition(branch_instr) == ne);
+    patcher.EmitCondition(eq);
+  }
+}
+}
+}  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_PPC
diff --git a/deps/v8/src/ic/ppc/stub-cache-ppc.cc b/deps/v8/src/ic/ppc/stub-cache-ppc.cc
new file mode 100644 (file)
index 0000000..816a2ae
--- /dev/null
@@ -0,0 +1,191 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_PPC
+
+#include "src/codegen.h"
+#include "src/ic/stub-cache.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm)
+
+
+static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
+                       Code::Flags flags, bool leave_frame,
+                       StubCache::Table table, Register receiver, Register name,
+                       // Number of the cache entry, not scaled.
+                       Register offset, Register scratch, Register scratch2,
+                       Register offset_scratch) {
+  ExternalReference key_offset(isolate->stub_cache()->key_reference(table));
+  ExternalReference value_offset(isolate->stub_cache()->value_reference(table));
+  ExternalReference map_offset(isolate->stub_cache()->map_reference(table));
+
+  uintptr_t key_off_addr = reinterpret_cast<uintptr_t>(key_offset.address());
+  uintptr_t value_off_addr =
+      reinterpret_cast<uintptr_t>(value_offset.address());
+  uintptr_t map_off_addr = reinterpret_cast<uintptr_t>(map_offset.address());
+
+  // Check the relative positions of the address fields.
+  DCHECK(value_off_addr > key_off_addr);
+  DCHECK((value_off_addr - key_off_addr) % 4 == 0);
+  DCHECK((value_off_addr - key_off_addr) < (256 * 4));
+  DCHECK(map_off_addr > key_off_addr);
+  DCHECK((map_off_addr - key_off_addr) % 4 == 0);
+  DCHECK((map_off_addr - key_off_addr) < (256 * 4));
+
+  Label miss;
+  Register base_addr = scratch;
+  scratch = no_reg;
+
+  // Multiply by 3 because there are 3 fields per entry (name, code, map).
+  __ ShiftLeftImm(offset_scratch, offset, Operand(1));
+  __ add(offset_scratch, offset, offset_scratch);
+
+  // Calculate the base address of the entry.
+  __ mov(base_addr, Operand(key_offset));
+  __ ShiftLeftImm(scratch2, offset_scratch, Operand(kPointerSizeLog2));
+  __ add(base_addr, base_addr, scratch2);
+
+  // Check that the key in the entry matches the name.
+  __ LoadP(ip, MemOperand(base_addr, 0));
+  __ cmp(name, ip);
+  __ bne(&miss);
+
+  // Check the map matches.
+  __ LoadP(ip, MemOperand(base_addr, map_off_addr - key_off_addr));
+  __ LoadP(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ cmp(ip, scratch2);
+  __ bne(&miss);
+
+  // Get the code entry from the cache.
+  Register code = scratch2;
+  scratch2 = no_reg;
+  __ LoadP(code, MemOperand(base_addr, value_off_addr - key_off_addr));
+
+  // Check that the flags match what we're looking for.
+  Register flags_reg = base_addr;
+  base_addr = no_reg;
+  __ lwz(flags_reg, FieldMemOperand(code, Code::kFlagsOffset));
+
+  DCHECK(!r0.is(flags_reg));
+  __ li(r0, Operand(Code::kFlagsNotUsedInLookup));
+  __ andc(flags_reg, flags_reg, r0);
+  __ mov(r0, Operand(flags));
+  __ cmpl(flags_reg, r0);
+  __ bne(&miss);
+
+#ifdef DEBUG
+  if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) {
+    __ b(&miss);
+  } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) {
+    __ b(&miss);
+  }
+#endif
+
+  if (leave_frame) __ LeaveFrame(StackFrame::INTERNAL);
+
+  // Jump to the first instruction in the code stub.
+  __ addi(r0, code, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ mtctr(r0);
+  __ bctr();
+
+  // Miss: fall through.
+  __ bind(&miss);
+}
+
+
+void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
+                              bool leave_frame, Register receiver,
+                              Register name, Register scratch, Register extra,
+                              Register extra2, Register extra3) {
+  Isolate* isolate = masm->isolate();
+  Label miss;
+
+#if V8_TARGET_ARCH_PPC64
+  // Make sure that code is valid. The multiplying code relies on the
+  // entry size being 24.
+  DCHECK(sizeof(Entry) == 24);
+#else
+  // Make sure that code is valid. The multiplying code relies on the
+  // entry size being 12.
+  DCHECK(sizeof(Entry) == 12);
+#endif
+
+  // Make sure the flags does not name a specific type.
+  DCHECK(Code::ExtractTypeFromFlags(flags) == 0);
+
+  // Make sure that there are no register conflicts.
+  DCHECK(!scratch.is(receiver));
+  DCHECK(!scratch.is(name));
+  DCHECK(!extra.is(receiver));
+  DCHECK(!extra.is(name));
+  DCHECK(!extra.is(scratch));
+  DCHECK(!extra2.is(receiver));
+  DCHECK(!extra2.is(name));
+  DCHECK(!extra2.is(scratch));
+  DCHECK(!extra2.is(extra));
+
+  // Check scratch, extra and extra2 registers are valid.
+  DCHECK(!scratch.is(no_reg));
+  DCHECK(!extra.is(no_reg));
+  DCHECK(!extra2.is(no_reg));
+  DCHECK(!extra3.is(no_reg));
+
+  Counters* counters = masm->isolate()->counters();
+  __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, extra2,
+                      extra3);
+
+  // Check that the receiver isn't a smi.
+  __ JumpIfSmi(receiver, &miss);
+
+  // Get the map of the receiver and compute the hash.
+  __ lwz(scratch, FieldMemOperand(name, Name::kHashFieldOffset));
+  __ LoadP(ip, FieldMemOperand(receiver, HeapObject::kMapOffset));
+  __ add(scratch, scratch, ip);
+#if V8_TARGET_ARCH_PPC64
+  // Use only the low 32 bits of the map pointer.
+  __ rldicl(scratch, scratch, 0, 32);
+#endif
+  uint32_t mask = kPrimaryTableSize - 1;
+  // We shift out the last two bits because they are not part of the hash and
+  // they are always 01 for maps.
+  __ ShiftRightImm(scratch, scratch, Operand(kCacheIndexShift));
+  // Mask down the eor argument to the minimum to keep the immediate
+  // encodable.
+  __ xori(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask));
+  // Prefer and_ to ubfx here because ubfx takes 2 cycles.
+  __ andi(scratch, scratch, Operand(mask));
+
+  // Probe the primary table.
+  ProbeTable(isolate, masm, flags, leave_frame, kPrimary, receiver, name,
+             scratch, extra, extra2, extra3);
+
+  // Primary miss: Compute hash for secondary probe.
+  __ ShiftRightImm(extra, name, Operand(kCacheIndexShift));
+  __ sub(scratch, scratch, extra);
+  uint32_t mask2 = kSecondaryTableSize - 1;
+  __ addi(scratch, scratch, Operand((flags >> kCacheIndexShift) & mask2));
+  __ andi(scratch, scratch, Operand(mask2));
+
+  // Probe the secondary table.
+  ProbeTable(isolate, masm, flags, leave_frame, kSecondary, receiver, name,
+             scratch, extra, extra2, extra3);
+
+  // Cache miss: Fall-through and let caller handle the miss by
+  // entering the runtime system.
+  __ bind(&miss);
+  __ IncrementCounter(counters->megamorphic_stub_cache_misses(), 1, extra2,
+                      extra3);
+}
+
+
+#undef __
+}
+}  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_PPC
index 7aee6f1..4223b28 100644 (file)
@@ -52,10 +52,10 @@ class StubCache {
   // Arguments extra, extra2 and extra3 may be used to pass additional scratch
   // registers. Set to no_reg if not needed.
   // If leave_frame is true, then exit a frame before the tail call.
-  void GenerateProbe(MacroAssembler* masm, Code::Flags flags, bool leave_frame,
-                     Register receiver, Register name, Register scratch,
-                     Register extra, Register extra2 = no_reg,
-                     Register extra3 = no_reg);
+  void GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
+                     Code::Flags flags, bool leave_frame, Register receiver,
+                     Register name, Register scratch, Register extra,
+                     Register extra2 = no_reg, Register extra3 = no_reg);
 
   enum Table { kPrimary, kSecondary };
 
index a782b08..46fa8cc 100644 (file)
@@ -15,6 +15,28 @@ namespace internal {
 
 #define __ ACCESS_MASM(masm)
 
+void PropertyHandlerCompiler::PushVectorAndSlot(Register vector,
+                                                Register slot) {
+  MacroAssembler* masm = this->masm();
+  __ Push(vector);
+  __ Push(slot);
+}
+
+
+void PropertyHandlerCompiler::PopVectorAndSlot(Register vector, Register slot) {
+  MacroAssembler* masm = this->masm();
+  __ Pop(slot);
+  __ Pop(vector);
+}
+
+
+void PropertyHandlerCompiler::DiscardVectorAndSlot() {
+  MacroAssembler* masm = this->masm();
+  // Remove vector and slot.
+  __ addp(rsp, Immediate(2 * kPointerSize));
+}
+
+
 void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
     MacroAssembler* masm, Label* miss_label, Register receiver,
     Handle<Name> name, Register scratch0, Register scratch1) {
@@ -56,24 +78,16 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
 
 
 void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
-    MacroAssembler* masm, int index, Register prototype, Label* miss) {
-  Isolate* isolate = masm->isolate();
-  // Get the global function with the given index.
-  Handle<JSFunction> function(
-      JSFunction::cast(isolate->native_context()->get(index)));
-
-  // Check we're still in the same context.
-  Register scratch = prototype;
+    MacroAssembler* masm, int index, Register result, Label* miss) {
   const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
-  __ movp(scratch, Operand(rsi, offset));
-  __ movp(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
-  __ Cmp(Operand(scratch, Context::SlotOffset(index)), function);
-  __ j(not_equal, miss);
-
+  __ movp(result, Operand(rsi, offset));
+  __ movp(result, FieldOperand(result, GlobalObject::kNativeContextOffset));
+  __ movp(result, Operand(result, Context::SlotOffset(index)));
   // Load its initial map. The global functions all have initial maps.
-  __ Move(prototype, Handle<Map>(function->initial_map()));
+  __ movp(result,
+          FieldOperand(result, JSFunction::kPrototypeOrInitialMapOffset));
   // Load the prototype from the initial map.
-  __ movp(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
+  __ movp(result, FieldOperand(result, Map::kPrototypeOffset));
 }
 
 
@@ -324,17 +338,38 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label,
 }
 
 
-void NamedStoreHandlerCompiler::GenerateRestoreNameAndMap(
-    Handle<Name> name, Handle<Map> transition) {
+void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
   __ Move(this->name(), name);
-  __ Move(StoreTransitionDescriptor::MapRegister(), transition);
 }
 
 
-void NamedStoreHandlerCompiler::GenerateConstantCheck(Object* constant,
+void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
+                                                   Register scratch,
+                                                   Label* miss) {
+  Handle<WeakCell> cell = Map::WeakCellForMap(transition);
+  Register map_reg = StoreTransitionDescriptor::MapRegister();
+  DCHECK(!map_reg.is(scratch));
+  __ LoadWeakValue(map_reg, cell, miss);
+  if (transition->CanBeDeprecated()) {
+    __ movl(scratch, FieldOperand(map_reg, Map::kBitField3Offset));
+    __ andl(scratch, Immediate(Map::Deprecated::kMask));
+    __ j(not_zero, miss);
+  }
+}
+
+
+void NamedStoreHandlerCompiler::GenerateConstantCheck(Register map_reg,
+                                                      int descriptor,
                                                       Register value_reg,
+                                                      Register scratch,
                                                       Label* miss_label) {
-  __ Cmp(value_reg, handle(constant, isolate()));
+  DCHECK(!map_reg.is(scratch));
+  DCHECK(!map_reg.is(value_reg));
+  DCHECK(!value_reg.is(scratch));
+  __ LoadInstanceDescriptors(map_reg, scratch);
+  __ movp(scratch,
+          FieldOperand(scratch, DescriptorArray::GetValueOffset(descriptor)));
+  __ cmpp(value_reg, scratch);
   __ j(not_equal, miss_label);
 }
 
@@ -413,18 +448,13 @@ Register PropertyHandlerCompiler::CheckPrototypes(
       reg = holder_reg;  // From now on the object will be in holder_reg.
       __ movp(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
     } else {
-      bool in_new_space = heap()->InNewSpace(*prototype);
-      // Two possible reasons for loading the prototype from the map:
-      // (1) Can't store references to new space in code.
-      // (2) Handler is shared for all receivers with the same prototype
-      //     map (but not necessarily the same prototype instance).
-      bool load_prototype_from_map = in_new_space || depth == 1;
-      if (load_prototype_from_map) {
-        // Save the map in scratch1 for later.
-        __ movp(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
-      }
+      Register map_reg = scratch1;
+      __ movp(map_reg, FieldOperand(reg, HeapObject::kMapOffset));
+
       if (depth != 1 || check == CHECK_ALL_MAPS) {
-        __ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK);
+        Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
+        __ CmpWeakValue(map_reg, cell, scratch2);
+        __ j(not_equal, miss);
       }
 
       // Check access rights to the global object.  This has to happen after
@@ -441,11 +471,7 @@ Register PropertyHandlerCompiler::CheckPrototypes(
       }
       reg = holder_reg;  // From now on the object will be in holder_reg.
 
-      if (load_prototype_from_map) {
-        __ movp(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
-      } else {
-        __ Move(reg, prototype);
-      }
+      __ movp(reg, FieldOperand(map_reg, Map::kPrototypeOffset));
     }
 
     // Go to the next object in the prototype chain.
@@ -457,8 +483,10 @@ Register PropertyHandlerCompiler::CheckPrototypes(
   LOG(isolate(), IntEvent("check-maps-depth", depth + 1));
 
   if (depth != 0 || check == CHECK_ALL_MAPS) {
-    // Check the holder map.
-    __ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK);
+    __ movp(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
+    Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
+    __ CmpWeakValue(scratch1, cell, scratch2);
+    __ j(not_equal, miss);
   }
 
   // Perform security check for access to the global object.
@@ -478,6 +506,10 @@ void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
     Label success;
     __ jmp(&success);
     __ bind(miss);
+    if (IC::ICUseVector(kind())) {
+      DCHECK(kind() == Code::LOAD_IC);
+      PopVectorAndSlot();
+    }
     TailCallBuiltin(masm(), MissBuiltin(kind()));
     __ bind(&success);
   }
@@ -576,6 +608,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
     }
     __ Push(holder_reg);
     __ Push(this->name());
+    InterceptorVectorSlotPush(holder_reg);
 
     // Invoke an interceptor.  Note: map checks from receiver to
     // interceptor's holder has been compiled before (see a caller
@@ -593,6 +626,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
     __ ret(0);
 
     __ bind(&interceptor_failed);
+    InterceptorVectorSlotPop(holder_reg);
     __ Pop(this->name());
     __ Pop(holder_reg);
     if (must_preserve_receiver_reg) {
@@ -625,7 +659,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
 Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
     Handle<JSObject> object, Handle<Name> name,
     Handle<ExecutableAccessorInfo> callback) {
-  Register holder_reg = Frontend(receiver(), name);
+  Register holder_reg = Frontend(name);
 
   __ PopReturnAddressTo(scratch1());
   __ Push(receiver());
@@ -671,11 +705,15 @@ Register NamedStoreHandlerCompiler::value() {
 Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
     Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) {
   Label miss;
+  if (IC::ICUseVector(kind())) {
+    PushVectorAndSlot();
+  }
   FrontendHeader(receiver(), name, &miss);
 
   // Get the value from the cell.
   Register result = StoreDescriptor::ValueRegister();
-  __ Move(result, cell);
+  Handle<WeakCell> weak_cell = factory()->NewWeakCell(cell);
+  __ LoadWeakValue(result, weak_cell, &miss);
   __ movp(result, FieldOperand(result, PropertyCell::kValueOffset));
 
   // Check for deleted property if property can actually be deleted.
@@ -689,6 +727,9 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
 
   Counters* counters = isolate()->counters();
   __ IncrementCounter(counters->named_load_global_stub(), 1);
+  if (IC::ICUseVector(kind())) {
+    DiscardVectorAndSlot();
+  }
   __ ret(0);
 
   FrontendFooter(name, &miss);
index a5848b6..5be9c46 100644 (file)
@@ -42,20 +42,22 @@ Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
     MapHandleList* receiver_maps, CodeHandleList* handler_stubs,
     MapHandleList* transitioned_maps) {
   Label miss;
-  __ JumpIfSmi(receiver(), &miss, Label::kNear);
+  __ JumpIfSmi(receiver(), &miss);
 
-  __ movp(scratch1(), FieldOperand(receiver(), HeapObject::kMapOffset));
+  Register map_reg = scratch1();
+  __ movp(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
   int receiver_count = receiver_maps->length();
   for (int i = 0; i < receiver_count; ++i) {
+    Handle<WeakCell> cell = Map::WeakCellForMap(receiver_maps->at(i));
     // Check map and tail call if there's a match
-    __ Cmp(scratch1(), receiver_maps->at(i));
+    __ CmpWeakValue(map_reg, cell, scratch2());
     if (transitioned_maps->at(i).is_null()) {
       __ j(equal, handler_stubs->at(i), RelocInfo::CODE_TARGET);
     } else {
       Label next_map;
       __ j(not_equal, &next_map, Label::kNear);
-      __ Move(transition_map(), transitioned_maps->at(i),
-              RelocInfo::EMBEDDED_OBJECT);
+      Handle<WeakCell> cell = Map::WeakCellForMap(transitioned_maps->at(i));
+      __ LoadWeakValue(transition_map(), cell, &miss);
       __ jmp(handler_stubs->at(i), RelocInfo::CODE_TARGET);
       __ bind(&next_map);
     }
@@ -79,9 +81,12 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
 
   if (check == PROPERTY &&
       (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
-    // In case we are compiling an IC for dictionary loads and stores, just
+    // In case we are compiling an IC for dictionary loads or stores, just
     // check whether the name is unique.
     if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
+      // Keyed loads with dictionaries shouldn't be here, they go generic.
+      // The DCHECK is to protect assumptions when --vector-ics is on.
+      DCHECK(kind() != Code::KEYED_LOAD_IC);
       Register tmp = scratch1();
       __ JumpIfSmi(this->name(), &miss);
       __ movp(tmp, FieldOperand(this->name(), HeapObject::kMapOffset));
@@ -109,8 +114,9 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
     Handle<Map> map = IC::TypeToMap(*type, isolate());
     if (!map->is_deprecated()) {
       number_of_handled_maps++;
+      Handle<WeakCell> cell = Map::WeakCellForMap(map);
       // Check map and tail call if there's a match
-      __ Cmp(map_reg, map);
+      __ CmpWeakValue(map_reg, cell, scratch2());
       if (type->Is(HeapType::Number())) {
         DCHECK(!number_case.is_unused());
         __ bind(&number_case);
index f125322..2709a85 100644 (file)
@@ -119,9 +119,8 @@ static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss_label,
       NameDictionary::kElementsStartIndex * kPointerSize;
   const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize;
   const int kTypeAndReadOnlyMask =
-      (PropertyDetails::TypeField::kMask |
-       PropertyDetails::AttributesField::encode(READ_ONLY))
-      << kSmiTagSize;
+      PropertyDetails::TypeField::kMask |
+      PropertyDetails::AttributesField::encode(READ_ONLY);
   __ Test(Operand(elements, scratch1, times_pointer_size,
                   kDetailsOffset - kHeapObjectTag),
           Smi::FromInt(kTypeAndReadOnlyMask));
@@ -590,8 +589,8 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
   __ JumpIfNotUniqueNameInstanceType(r9, &slow_with_tagged_index);
   Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
       Code::ComputeHandlerFlags(Code::STORE_IC));
-  masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
-                                               key, rbx, no_reg);
+  masm->isolate()->stub_cache()->GenerateProbe(
+      masm, Code::STORE_IC, flags, false, receiver, key, rbx, no_reg);
   // Cache miss.
   __ jmp(&miss);
 
@@ -763,11 +762,30 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
 }
 
 
-// A register that isn't one of the parameters to the load ic.
-static const Register LoadIC_TempRegister() { return rbx; }
-
+static void LoadIC_PushArgs(MacroAssembler* masm) {
+  Register receiver = LoadDescriptor::ReceiverRegister();
+  Register name = LoadDescriptor::NameRegister();
+  if (FLAG_vector_ics) {
+    Register slot = VectorLoadICDescriptor::SlotRegister();
+    Register vector = VectorLoadICDescriptor::VectorRegister();
+    DCHECK(!rdi.is(receiver) && !rdi.is(name) && !rdi.is(slot) &&
+           !rdi.is(vector));
+
+    __ PopReturnAddressTo(rdi);
+    __ Push(receiver);
+    __ Push(name);
+    __ Push(slot);
+    __ Push(vector);
+    __ PushReturnAddressFrom(rdi);
+  } else {
+    DCHECK(!rbx.is(receiver) && !rbx.is(name));
 
-static const Register KeyedLoadIC_TempRegister() { return rbx; }
+    __ PopReturnAddressTo(rbx);
+    __ Push(receiver);
+    __ Push(name);
+    __ PushReturnAddressFrom(rbx);
+  }
+}
 
 
 void LoadIC::GenerateMiss(MacroAssembler* masm) {
@@ -776,25 +794,26 @@ void LoadIC::GenerateMiss(MacroAssembler* masm) {
   Counters* counters = masm->isolate()->counters();
   __ IncrementCounter(counters->load_miss(), 1);
 
-  __ PopReturnAddressTo(LoadIC_TempRegister());
-  __ Push(LoadDescriptor::ReceiverRegister());  // receiver
-  __ Push(LoadDescriptor::NameRegister());      // name
-  __ PushReturnAddressFrom(LoadIC_TempRegister());
+  LoadIC_PushArgs(masm);
 
   // Perform tail call to the entry.
   ExternalReference ref =
       ExternalReference(IC_Utility(kLoadIC_Miss), masm->isolate());
-  __ TailCallExternalReference(ref, 2, 1);
+  int arg_count = FLAG_vector_ics ? 4 : 2;
+  __ TailCallExternalReference(ref, arg_count, 1);
 }
 
 
 void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
   // The return address is on the stack.
+  Register receiver = LoadDescriptor::ReceiverRegister();
+  Register name = LoadDescriptor::NameRegister();
+  DCHECK(!rbx.is(receiver) && !rbx.is(name));
 
-  __ PopReturnAddressTo(LoadIC_TempRegister());
-  __ Push(LoadDescriptor::ReceiverRegister());  // receiver
-  __ Push(LoadDescriptor::NameRegister());      // name
-  __ PushReturnAddressFrom(LoadIC_TempRegister());
+  __ PopReturnAddressTo(rbx);
+  __ Push(receiver);
+  __ Push(name);
+  __ PushReturnAddressFrom(rbx);
 
   // Perform tail call to the entry.
   __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
@@ -806,25 +825,26 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
   Counters* counters = masm->isolate()->counters();
   __ IncrementCounter(counters->keyed_load_miss(), 1);
 
-  __ PopReturnAddressTo(KeyedLoadIC_TempRegister());
-  __ Push(LoadDescriptor::ReceiverRegister());  // receiver
-  __ Push(LoadDescriptor::NameRegister());      // name
-  __ PushReturnAddressFrom(KeyedLoadIC_TempRegister());
+  LoadIC_PushArgs(masm);
 
   // Perform tail call to the entry.
   ExternalReference ref =
       ExternalReference(IC_Utility(kKeyedLoadIC_Miss), masm->isolate());
-  __ TailCallExternalReference(ref, 2, 1);
+  int arg_count = FLAG_vector_ics ? 4 : 2;
+  __ TailCallExternalReference(ref, arg_count, 1);
 }
 
 
 void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
   // The return address is on the stack.
+  Register receiver = LoadDescriptor::ReceiverRegister();
+  Register name = LoadDescriptor::NameRegister();
+  DCHECK(!rbx.is(receiver) && !rbx.is(name));
 
-  __ PopReturnAddressTo(KeyedLoadIC_TempRegister());
-  __ Push(LoadDescriptor::ReceiverRegister());  // receiver
-  __ Push(LoadDescriptor::NameRegister());      // name
-  __ PushReturnAddressFrom(KeyedLoadIC_TempRegister());
+  __ PopReturnAddressTo(rbx);
+  __ Push(receiver);
+  __ Push(name);
+  __ PushReturnAddressFrom(rbx);
 
   // Perform tail call to the entry.
   __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
@@ -838,7 +858,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
   Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
       Code::ComputeHandlerFlags(Code::STORE_IC));
   masm->isolate()->stub_cache()->GenerateProbe(
-      masm, flags, false, StoreDescriptor::ReceiverRegister(),
+      masm, Code::STORE_IC, flags, false, StoreDescriptor::ReceiverRegister(),
       StoreDescriptor::NameRegister(), rbx, no_reg);
 
   // Cache miss: Jump to runtime.
index 8aff1ea..f15635c 100644 (file)
@@ -7,7 +7,9 @@
 #if V8_TARGET_ARCH_X64
 
 #include "src/codegen.h"
+#include "src/ic/ic.h"
 #include "src/ic/stub-cache.h"
+#include "src/interface-descriptors.h"
 
 namespace v8 {
 namespace internal {
@@ -16,7 +18,7 @@ namespace internal {
 
 
 static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
-                       Code::Flags flags, bool leave_frame,
+                       Code::Kind ic_kind, Code::Flags flags, bool leave_frame,
                        StubCache::Table table, Register receiver, Register name,
                        // The offset is scaled by 4, based on
                        // kCacheIndexShift, which is two bits
@@ -82,10 +84,11 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
 }
 
 
-void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
-                              bool leave_frame, Register receiver,
-                              Register name, Register scratch, Register extra,
-                              Register extra2, Register extra3) {
+void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
+                              Code::Flags flags, bool leave_frame,
+                              Register receiver, Register name,
+                              Register scratch, Register extra, Register extra2,
+                              Register extra3) {
   Isolate* isolate = masm->isolate();
   Label miss;
   USE(extra);   // The register extra is not used on the X64 platform.
@@ -107,6 +110,17 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
   DCHECK(extra2.is(no_reg));
   DCHECK(extra3.is(no_reg));
 
+#ifdef DEBUG
+  // If vector-based ics are in use, ensure that scratch doesn't conflict with
+  // the vector and slot registers, which need to be preserved for a handler
+  // call or miss.
+  if (IC::ICUseVector(ic_kind)) {
+    Register vector = VectorLoadICDescriptor::VectorRegister();
+    Register slot = VectorLoadICDescriptor::SlotRegister();
+    DCHECK(!AreAliased(vector, slot, scratch));
+  }
+#endif
+
   Counters* counters = masm->isolate()->counters();
   __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1);
 
@@ -123,8 +137,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
   __ andp(scratch, Immediate((kPrimaryTableSize - 1) << kCacheIndexShift));
 
   // Probe the primary table.
-  ProbeTable(isolate, masm, flags, leave_frame, kPrimary, receiver, name,
-             scratch);
+  ProbeTable(isolate, masm, ic_kind, flags, leave_frame, kPrimary, receiver,
+             name, scratch);
 
   // Primary miss: Compute hash for secondary probe.
   __ movl(scratch, FieldOperand(name, Name::kHashFieldOffset));
@@ -136,8 +150,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
   __ andp(scratch, Immediate((kSecondaryTableSize - 1) << kCacheIndexShift));
 
   // Probe the secondary table.
-  ProbeTable(isolate, masm, flags, leave_frame, kSecondary, receiver, name,
-             scratch);
+  ProbeTable(isolate, masm, ic_kind, flags, leave_frame, kSecondary, receiver,
+             name, scratch);
 
   // Cache miss: Fall-through and let caller handle the miss by
   // entering the runtime system.
index ae637d1..2ff3595 100644 (file)
@@ -47,6 +47,28 @@ void NamedLoadHandlerCompiler::GenerateLoadViaGetter(
 }
 
 
+void PropertyHandlerCompiler::PushVectorAndSlot(Register vector,
+                                                Register slot) {
+  MacroAssembler* masm = this->masm();
+  __ push(vector);
+  __ push(slot);
+}
+
+
+void PropertyHandlerCompiler::PopVectorAndSlot(Register vector, Register slot) {
+  MacroAssembler* masm = this->masm();
+  __ pop(slot);
+  __ pop(vector);
+}
+
+
+void PropertyHandlerCompiler::DiscardVectorAndSlot() {
+  MacroAssembler* masm = this->masm();
+  // Remove vector and slot.
+  __ add(esp, Immediate(2 * kPointerSize));
+}
+
+
 void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
     MacroAssembler* masm, Label* miss_label, Register receiver,
     Handle<Name> name, Register scratch0, Register scratch1) {
@@ -88,28 +110,23 @@ void PropertyHandlerCompiler::GenerateDictionaryNegativeLookup(
 
 
 void NamedLoadHandlerCompiler::GenerateDirectLoadGlobalFunctionPrototype(
-    MacroAssembler* masm, int index, Register prototype, Label* miss) {
-  // Get the global function with the given index.
-  Handle<JSFunction> function(
-      JSFunction::cast(masm->isolate()->native_context()->get(index)));
-  // Check we're still in the same context.
-  Register scratch = prototype;
+    MacroAssembler* masm, int index, Register result, Label* miss) {
   const int offset = Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX);
-  __ mov(scratch, Operand(esi, offset));
-  __ mov(scratch, FieldOperand(scratch, GlobalObject::kNativeContextOffset));
-  __ cmp(Operand(scratch, Context::SlotOffset(index)), function);
-  __ j(not_equal, miss);
-
+  __ mov(result, Operand(esi, offset));
+  __ mov(result, FieldOperand(result, GlobalObject::kNativeContextOffset));
+  __ mov(result, Operand(result, Context::SlotOffset(index)));
   // Load its initial map. The global functions all have initial maps.
-  __ Move(prototype, Immediate(Handle<Map>(function->initial_map())));
+  __ mov(result,
+         FieldOperand(result, JSFunction::kPrototypeOrInitialMapOffset));
   // Load the prototype from the initial map.
-  __ mov(prototype, FieldOperand(prototype, Map::kPrototypeOffset));
+  __ mov(result, FieldOperand(result, Map::kPrototypeOffset));
 }
 
 
 void NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(
     MacroAssembler* masm, Register receiver, Register scratch1,
     Register scratch2, Label* miss_label) {
+  DCHECK(!FLAG_vector_ics);
   __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
   __ mov(eax, scratch1);
   __ ret(0);
@@ -329,17 +346,38 @@ void NamedStoreHandlerCompiler::GenerateRestoreName(Label* label,
 }
 
 
-void NamedStoreHandlerCompiler::GenerateRestoreNameAndMap(
-    Handle<Name> name, Handle<Map> transition) {
+void NamedStoreHandlerCompiler::GenerateRestoreName(Handle<Name> name) {
   __ mov(this->name(), Immediate(name));
-  __ mov(StoreTransitionDescriptor::MapRegister(), Immediate(transition));
 }
 
 
-void NamedStoreHandlerCompiler::GenerateConstantCheck(Object* constant,
+void NamedStoreHandlerCompiler::GenerateRestoreMap(Handle<Map> transition,
+                                                   Register scratch,
+                                                   Label* miss) {
+  Handle<WeakCell> cell = Map::WeakCellForMap(transition);
+  Register map_reg = StoreTransitionDescriptor::MapRegister();
+  DCHECK(!map_reg.is(scratch));
+  __ LoadWeakValue(map_reg, cell, miss);
+  if (transition->CanBeDeprecated()) {
+    __ mov(scratch, FieldOperand(map_reg, Map::kBitField3Offset));
+    __ and_(scratch, Immediate(Map::Deprecated::kMask));
+    __ j(not_zero, miss);
+  }
+}
+
+
+void NamedStoreHandlerCompiler::GenerateConstantCheck(Register map_reg,
+                                                      int descriptor,
                                                       Register value_reg,
+                                                      Register scratch,
                                                       Label* miss_label) {
-  __ CmpObject(value_reg, handle(constant, isolate()));
+  DCHECK(!map_reg.is(scratch));
+  DCHECK(!map_reg.is(value_reg));
+  DCHECK(!value_reg.is(scratch));
+  __ LoadInstanceDescriptors(map_reg, scratch);
+  __ mov(scratch,
+         FieldOperand(scratch, DescriptorArray::GetValueOffset(descriptor)));
+  __ cmp(value_reg, scratch);
   __ j(not_equal, miss_label);
 }
 
@@ -415,14 +453,12 @@ Register PropertyHandlerCompiler::CheckPrototypes(
       reg = holder_reg;  // From now on the object will be in holder_reg.
       __ mov(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
     } else {
-      bool in_new_space = heap()->InNewSpace(*prototype);
-      // Two possible reasons for loading the prototype from the map:
-      // (1) Can't store references to new space in code.
-      // (2) Handler is shared for all receivers with the same prototype
-      //     map (but not necessarily the same prototype instance).
-      bool load_prototype_from_map = in_new_space || depth == 1;
+      Register map_reg = scratch1;
+      __ mov(map_reg, FieldOperand(reg, HeapObject::kMapOffset));
       if (depth != 1 || check == CHECK_ALL_MAPS) {
-        __ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK);
+        Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
+        __ CmpWeakValue(map_reg, cell, scratch2);
+        __ j(not_equal, miss);
       }
 
       // Check access rights to the global object.  This has to happen after
@@ -432,24 +468,15 @@ Register PropertyHandlerCompiler::CheckPrototypes(
       // global proxy (as opposed to using slow ICs). See corresponding code
       // in LookupForRead().
       if (current_map->IsJSGlobalProxyMap()) {
-        __ CheckAccessGlobalProxy(reg, scratch1, scratch2, miss);
+        __ CheckAccessGlobalProxy(reg, map_reg, scratch2, miss);
+        // Restore map_reg.
+        __ mov(map_reg, FieldOperand(reg, HeapObject::kMapOffset));
       } else if (current_map->IsJSGlobalObjectMap()) {
         GenerateCheckPropertyCell(masm(), Handle<JSGlobalObject>::cast(current),
                                   name, scratch2, miss);
       }
-
-      if (load_prototype_from_map) {
-        // Save the map in scratch1 for later.
-        __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
-      }
-
       reg = holder_reg;  // From now on the object will be in holder_reg.
-
-      if (load_prototype_from_map) {
-        __ mov(reg, FieldOperand(scratch1, Map::kPrototypeOffset));
-      } else {
-        __ mov(reg, prototype);
-      }
+      __ mov(reg, FieldOperand(map_reg, Map::kPrototypeOffset));
     }
 
     // Go to the next object in the prototype chain.
@@ -462,7 +489,10 @@ Register PropertyHandlerCompiler::CheckPrototypes(
 
   if (depth != 0 || check == CHECK_ALL_MAPS) {
     // Check the holder map.
-    __ CheckMap(reg, current_map, miss, DONT_DO_SMI_CHECK);
+    __ mov(scratch1, FieldOperand(reg, HeapObject::kMapOffset));
+    Handle<WeakCell> cell = Map::WeakCellForMap(current_map);
+    __ CmpWeakValue(scratch1, cell, scratch2);
+    __ j(not_equal, miss);
   }
 
   // Perform security check for access to the global object.
@@ -482,6 +512,10 @@ void NamedLoadHandlerCompiler::FrontendFooter(Handle<Name> name, Label* miss) {
     Label success;
     __ jmp(&success);
     __ bind(miss);
+    if (IC::ICUseVector(kind())) {
+      DCHECK(kind() == Code::LOAD_IC);
+      PopVectorAndSlot();
+    }
     TailCallBuiltin(masm(), MissBuiltin(kind()));
     __ bind(&success);
   }
@@ -581,7 +615,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
     }
     __ push(holder_reg);
     __ push(this->name());
-
+    InterceptorVectorSlotPush(holder_reg);
     // Invoke an interceptor.  Note: map checks from receiver to
     // interceptor's holder has been compiled before (see a caller
     // of this method.)
@@ -605,6 +639,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptorWithFollowup(
       __ mov(this->name(), Immediate(bit_cast<int32_t>(kZapValue)));
     }
 
+    InterceptorVectorSlotPop(holder_reg);
     __ pop(this->name());
     __ pop(holder_reg);
     if (must_preserve_receiver_reg) {
@@ -637,7 +672,7 @@ void NamedLoadHandlerCompiler::GenerateLoadInterceptor(Register holder_reg) {
 Handle<Code> NamedStoreHandlerCompiler::CompileStoreCallback(
     Handle<JSObject> object, Handle<Name> name,
     Handle<ExecutableAccessorInfo> callback) {
-  Register holder_reg = Frontend(receiver(), name);
+  Register holder_reg = Frontend(name);
 
   __ pop(scratch1());  // remove the return address
   __ push(receiver());
@@ -683,16 +718,15 @@ Register NamedStoreHandlerCompiler::value() {
 Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
     Handle<PropertyCell> cell, Handle<Name> name, bool is_configurable) {
   Label miss;
-
+  if (IC::ICUseVector(kind())) {
+    PushVectorAndSlot();
+  }
   FrontendHeader(receiver(), name, &miss);
   // Get the value from the cell.
   Register result = StoreDescriptor::ValueRegister();
-  if (masm()->serializer_enabled()) {
-    __ mov(result, Immediate(cell));
-    __ mov(result, FieldOperand(result, PropertyCell::kValueOffset));
-  } else {
-    __ mov(result, Operand::ForCell(cell));
-  }
+  Handle<WeakCell> weak_cell = factory()->NewWeakCell(cell);
+  __ LoadWeakValue(result, weak_cell, &miss);
+  __ mov(result, FieldOperand(result, PropertyCell::kValueOffset));
 
   // Check for deleted property if property can actually be deleted.
   if (is_configurable) {
@@ -706,6 +740,9 @@ Handle<Code> NamedLoadHandlerCompiler::CompileLoadGlobal(
   Counters* counters = isolate()->counters();
   __ IncrementCounter(counters->named_load_global_stub(), 1);
   // The code above already loads the result into the return register.
+  if (IC::ICUseVector(kind())) {
+    DiscardVectorAndSlot();
+  }
   __ ret(0);
 
   FrontendFooter(name, &miss);
index 20b47e7..89bd937 100644 (file)
@@ -44,10 +44,13 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
   Label miss;
 
   if (check == PROPERTY &&
-      (kind() == Code::KEYED_LOAD_IC || kind() == Code::KEYED_STORE_IC)) {
-    // In case we are compiling an IC for dictionary loads and stores, just
+      (kind() == Code::KEYED_STORE_IC || kind() == Code::KEYED_LOAD_IC)) {
+    // In case we are compiling an IC for dictionary loads or stores, just
     // check whether the name is unique.
     if (name.is_identical_to(isolate()->factory()->normal_ic_symbol())) {
+      // Keyed loads with dictionaries shouldn't be here, they go generic.
+      // The DCHECK is to protect assumptions when --vector-ics is on.
+      DCHECK(kind() != Code::KEYED_LOAD_IC);
       Register tmp = scratch1();
       __ JumpIfSmi(this->name(), &miss);
       __ mov(tmp, FieldOperand(this->name(), HeapObject::kMapOffset));
@@ -75,7 +78,8 @@ Handle<Code> PropertyICCompiler::CompilePolymorphic(TypeHandleList* types,
     Handle<Map> map = IC::TypeToMap(*type, isolate());
     if (!map->is_deprecated()) {
       number_of_handled_maps++;
-      __ cmp(map_reg, map);
+      Handle<WeakCell> cell = Map::WeakCellForMap(map);
+      __ CmpWeakValue(map_reg, cell, scratch2());
       if (type->Is(HeapType::Number())) {
         DCHECK(!number_case.is_unused());
         __ bind(&number_case);
@@ -100,15 +104,18 @@ Handle<Code> PropertyICCompiler::CompileKeyedStorePolymorphic(
     MapHandleList* transitioned_maps) {
   Label miss;
   __ JumpIfSmi(receiver(), &miss, Label::kNear);
-  __ mov(scratch1(), FieldOperand(receiver(), HeapObject::kMapOffset));
+  Register map_reg = scratch1();
+  __ mov(map_reg, FieldOperand(receiver(), HeapObject::kMapOffset));
   for (int i = 0; i < receiver_maps->length(); ++i) {
-    __ cmp(scratch1(), receiver_maps->at(i));
+    Handle<WeakCell> cell = Map::WeakCellForMap(receiver_maps->at(i));
+    __ CmpWeakValue(map_reg, cell, scratch2());
     if (transitioned_maps->at(i).is_null()) {
       __ j(equal, handler_stubs->at(i));
     } else {
       Label next_map;
       __ j(not_equal, &next_map, Label::kNear);
-      __ mov(transition_map(), Immediate(transitioned_maps->at(i)));
+      Handle<WeakCell> cell = Map::WeakCellForMap(transitioned_maps->at(i));
+      __ LoadWeakValue(transition_map(), cell, &miss);
       __ jmp(handler_stubs->at(i), RelocInfo::CODE_TARGET);
       __ bind(&next_map);
     }
index 959b8b6..1004ac0 100644 (file)
@@ -694,8 +694,8 @@ void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
   __ JumpIfNotUniqueNameInstanceType(ebx, &slow);
   Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
       Code::ComputeHandlerFlags(Code::STORE_IC));
-  masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver,
-                                               key, ebx, no_reg);
+  masm->isolate()->stub_cache()->GenerateProbe(
+      masm, Code::STORE_IC, flags, false, receiver, key, ebx, no_reg);
   // Cache miss.
   __ jmp(&miss);
 
@@ -769,31 +769,52 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
 static void LoadIC_PushArgs(MacroAssembler* masm) {
   Register receiver = LoadDescriptor::ReceiverRegister();
   Register name = LoadDescriptor::NameRegister();
-  DCHECK(!ebx.is(receiver) && !ebx.is(name));
+  if (FLAG_vector_ics) {
+    Register slot = VectorLoadICDescriptor::SlotRegister();
+    Register vector = VectorLoadICDescriptor::VectorRegister();
+    DCHECK(!edi.is(receiver) && !edi.is(name) && !edi.is(slot) &&
+           !edi.is(vector));
+
+    __ pop(edi);
+    __ push(receiver);
+    __ push(name);
+    __ push(slot);
+    __ push(vector);
+    __ push(edi);
+  } else {
+    DCHECK(!ebx.is(receiver) && !ebx.is(name));
 
-  __ pop(ebx);
-  __ push(receiver);
-  __ push(name);
-  __ push(ebx);
+    __ pop(ebx);
+    __ push(receiver);
+    __ push(name);
+    __ push(ebx);
+  }
 }
 
 
 void LoadIC::GenerateMiss(MacroAssembler* masm) {
   // Return address is on the stack.
   __ IncrementCounter(masm->isolate()->counters()->load_miss(), 1);
-
   LoadIC_PushArgs(masm);
 
   // Perform tail call to the entry.
   ExternalReference ref =
       ExternalReference(IC_Utility(kLoadIC_Miss), masm->isolate());
-  __ TailCallExternalReference(ref, 2, 1);
+  int arg_count = FLAG_vector_ics ? 4 : 2;
+  __ TailCallExternalReference(ref, arg_count, 1);
 }
 
 
 void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
   // Return address is on the stack.
-  LoadIC_PushArgs(masm);
+  Register receiver = LoadDescriptor::ReceiverRegister();
+  Register name = LoadDescriptor::NameRegister();
+  DCHECK(!ebx.is(receiver) && !ebx.is(name));
+
+  __ pop(ebx);
+  __ push(receiver);
+  __ push(name);
+  __ push(ebx);
 
   // Perform tail call to the entry.
   __ TailCallRuntime(Runtime::kGetProperty, 2, 1);
@@ -809,13 +830,21 @@ void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) {
   // Perform tail call to the entry.
   ExternalReference ref =
       ExternalReference(IC_Utility(kKeyedLoadIC_Miss), masm->isolate());
-  __ TailCallExternalReference(ref, 2, 1);
+  int arg_count = FLAG_vector_ics ? 4 : 2;
+  __ TailCallExternalReference(ref, arg_count, 1);
 }
 
 
 void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
   // Return address is on the stack.
-  LoadIC_PushArgs(masm);
+  Register receiver = LoadDescriptor::ReceiverRegister();
+  Register name = LoadDescriptor::NameRegister();
+  DCHECK(!ebx.is(receiver) && !ebx.is(name));
+
+  __ pop(ebx);
+  __ push(receiver);
+  __ push(name);
+  __ push(ebx);
 
   // Perform tail call to the entry.
   __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
@@ -827,7 +856,7 @@ void StoreIC::GenerateMegamorphic(MacroAssembler* masm) {
   Code::Flags flags = Code::RemoveTypeAndHolderFromFlags(
       Code::ComputeHandlerFlags(Code::STORE_IC));
   masm->isolate()->stub_cache()->GenerateProbe(
-      masm, flags, false, StoreDescriptor::ReceiverRegister(),
+      masm, Code::STORE_IC, flags, false, StoreDescriptor::ReceiverRegister(),
       StoreDescriptor::NameRegister(), ebx, no_reg);
 
   // Cache miss: Jump to runtime.
index 0291ef3..be456ce 100644 (file)
@@ -7,7 +7,9 @@
 #if V8_TARGET_ARCH_X87
 
 #include "src/codegen.h"
+#include "src/ic/ic.h"
 #include "src/ic/stub-cache.h"
+#include "src/interface-descriptors.h"
 
 namespace v8 {
 namespace internal {
@@ -16,7 +18,7 @@ namespace internal {
 
 
 static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
-                       Code::Flags flags, bool leave_frame,
+                       Code::Kind ic_kind, Code::Flags flags, bool leave_frame,
                        StubCache::Table table, Register name, Register receiver,
                        // Number of the cache entry pointer-size scaled.
                        Register offset, Register extra) {
@@ -56,6 +58,13 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
     }
 #endif
 
+    if (IC::ICUseVector(ic_kind)) {
+      // The vector and slot were pushed onto the stack before starting the
+      // probe, and need to be dropped before calling the handler.
+      __ pop(VectorLoadICDescriptor::VectorRegister());
+      __ pop(VectorLoadICDescriptor::SlotRegister());
+    }
+
     if (leave_frame) __ leave();
 
     // Jump to the first instruction in the code stub.
@@ -100,6 +109,18 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
     __ pop(offset);
     __ mov(offset, Operand::StaticArray(offset, times_1, value_offset));
 
+    if (IC::ICUseVector(ic_kind)) {
+      // The vector and slot were pushed onto the stack before starting the
+      // probe, and need to be dropped before calling the handler.
+      Register vector = VectorLoadICDescriptor::VectorRegister();
+      Register slot = VectorLoadICDescriptor::SlotRegister();
+      DCHECK(!offset.is(vector) && !offset.is(slot));
+
+      __ pop(vector);
+      __ pop(slot);
+    }
+
+
     if (leave_frame) __ leave();
 
     // Jump to the first instruction in the code stub.
@@ -113,10 +134,11 @@ static void ProbeTable(Isolate* isolate, MacroAssembler* masm,
 }
 
 
-void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
-                              bool leave_frame, Register receiver,
-                              Register name, Register scratch, Register extra,
-                              Register extra2, Register extra3) {
+void StubCache::GenerateProbe(MacroAssembler* masm, Code::Kind ic_kind,
+                              Code::Flags flags, bool leave_frame,
+                              Register receiver, Register name,
+                              Register scratch, Register extra, Register extra2,
+                              Register extra3) {
   Label miss;
 
   // Assert that code is valid.  The multiplying code relies on the entry size
@@ -159,8 +181,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
   DCHECK(kCacheIndexShift == kPointerSizeLog2);
 
   // Probe the primary table.
-  ProbeTable(isolate(), masm, flags, leave_frame, kPrimary, name, receiver,
-             offset, extra);
+  ProbeTable(isolate(), masm, ic_kind, flags, leave_frame, kPrimary, name,
+             receiver, offset, extra);
 
   // Primary miss: Compute hash for secondary probe.
   __ mov(offset, FieldOperand(name, Name::kHashFieldOffset));
@@ -172,8 +194,8 @@ void StubCache::GenerateProbe(MacroAssembler* masm, Code::Flags flags,
   __ and_(offset, (kSecondaryTableSize - 1) << kCacheIndexShift);
 
   // Probe the secondary table.
-  ProbeTable(isolate(), masm, flags, leave_frame, kSecondary, name, receiver,
-             offset, extra);
+  ProbeTable(isolate(), masm, ic_kind, flags, leave_frame, kSecondary, name,
+             receiver, offset, extra);
 
   // Cache miss: Fall-through and let caller handle the miss by
   // entering the runtime system.
index 19f89ce..1843f47 100644 (file)
@@ -123,7 +123,7 @@ class Interface : public ZoneObject {
     return exports ? exports->occupancy() : 0;
   }
 
-  // The context slot in the hosting global context pointing to this module.
+  // The context slot in the hosting script context pointing to this module.
   int Index() {
     DCHECK(IsModule() && IsFrozen());
     return Chase()->index_;
index 2595d2f..b24182b 100644 (file)
@@ -118,6 +118,9 @@ base::Thread::LocalStorageKey Isolate::per_isolate_thread_data_key_;
 base::LazyMutex Isolate::thread_data_table_mutex_ = LAZY_MUTEX_INITIALIZER;
 Isolate::ThreadDataTable* Isolate::thread_data_table_ = NULL;
 base::Atomic32 Isolate::isolate_counter_ = 0;
+#if DEBUG
+base::Atomic32 Isolate::isolate_key_created_ = 0;
+#endif
 
 Isolate::PerIsolateThreadData*
     Isolate::FindOrAllocatePerThreadDataForThisThread() {
@@ -157,6 +160,9 @@ void Isolate::InitializeOncePerProcess() {
   base::LockGuard<base::Mutex> lock_guard(thread_data_table_mutex_.Pointer());
   CHECK(thread_data_table_ == NULL);
   isolate_key_ = base::Thread::CreateThreadLocalKey();
+#if DEBUG
+  base::NoBarrier_Store(&isolate_key_created_, 1);
+#endif
   thread_id_key_ = base::Thread::CreateThreadLocalKey();
   per_isolate_thread_data_key_ = base::Thread::CreateThreadLocalKey();
   thread_data_table_ = new Isolate::ThreadDataTable();
@@ -796,14 +802,26 @@ static MayAccessDecision MayAccessPreCheck(Isolate* isolate,
 }
 
 
+bool Isolate::IsInternallyUsedPropertyName(Handle<Object> name) {
+  return name.is_identical_to(factory()->hidden_string()) ||
+         name.is_identical_to(factory()->prototype_users_symbol());
+}
+
+
+bool Isolate::IsInternallyUsedPropertyName(Object* name) {
+  return name == heap()->hidden_string() ||
+         name == heap()->prototype_users_symbol();
+}
+
+
 bool Isolate::MayNamedAccess(Handle<JSObject> receiver,
                              Handle<Object> key,
                              v8::AccessType type) {
   DCHECK(receiver->IsJSGlobalProxy() || receiver->IsAccessCheckNeeded());
 
-  // Skip checks for hidden properties access.  Note, we do not
+  // Skip checks for internally used properties. Note, we do not
   // require existence of a context in this case.
-  if (key.is_identical_to(factory()->hidden_string())) return true;
+  if (IsInternallyUsedPropertyName(key)) return true;
 
   // Check for compatibility between the security tokens in the
   // current lexical context and the accessed object.
@@ -911,22 +929,26 @@ void Isolate::CancelTerminateExecution() {
 }
 
 
-void Isolate::InvokeApiInterruptCallback() {
-  // Note: callback below should be called outside of execution access lock.
-  InterruptCallback callback = NULL;
-  void* data = NULL;
-  {
-    ExecutionAccess access(this);
-    callback = api_interrupt_callback_;
-    data = api_interrupt_callback_data_;
-    api_interrupt_callback_ = NULL;
-    api_interrupt_callback_data_ = NULL;
-  }
+void Isolate::RequestInterrupt(InterruptCallback callback, void* data) {
+  ExecutionAccess access(this);
+  api_interrupts_queue_.push(InterruptEntry(callback, data));
+  stack_guard()->RequestApiInterrupt();
+}
 
-  if (callback != NULL) {
+
+void Isolate::InvokeApiInterruptCallbacks() {
+  // Note: callback below should be called outside of execution access lock.
+  while (true) {
+    InterruptEntry entry;
+    {
+      ExecutionAccess access(this);
+      if (api_interrupts_queue_.empty()) return;
+      entry = api_interrupts_queue_.front();
+      api_interrupts_queue_.pop();
+    }
     VMState<EXTERNAL> state(this);
     HandleScope handle_scope(this);
-    callback(reinterpret_cast<v8::Isolate*>(this), data);
+    entry.first(reinterpret_cast<v8::Isolate*>(this), entry.second);
   }
 }
 
@@ -1485,11 +1507,6 @@ Handle<Context> Isolate::native_context() {
 }
 
 
-Handle<Context> Isolate::global_context() {
-  return handle(context()->global_object()->global_context());
-}
-
-
 Handle<Context> Isolate::GetCallingNativeContext() {
   JavaScriptFrameIterator it(this);
   if (debug_->in_debug_scope()) {
@@ -1650,6 +1667,9 @@ Isolate::Isolate(bool enable_serializer)
       optimizing_compiler_thread_(NULL),
       stress_deopt_count_(0),
       next_optimization_id_(0),
+#if TRACE_MAPS
+      next_unique_sfi_id_(0),
+#endif
       use_counter_callback_(NULL),
       basic_block_profiler_(NULL) {
   {
@@ -1745,11 +1765,7 @@ void Isolate::Deinit() {
     heap_.mark_compact_collector()->EnsureSweepingCompleted();
   }
 
-  if (turbo_statistics() != NULL) {
-    OFStream os(stdout);
-    os << *turbo_statistics() << std::endl;
-  }
-  if (FLAG_hydrogen_stats) GetHStatistics()->Print();
+  DumpAndResetCompilationStats();
 
   if (FLAG_print_deopt_stress) {
     PrintF(stdout, "=== Stress deopt counter: %u\n", stress_deopt_count_);
@@ -2148,6 +2164,8 @@ bool Isolate::Init(Deserializer* des) {
 
   initialized_from_snapshot_ = (des != NULL);
 
+  if (!FLAG_inline_new) heap_.DisableInlineAllocation();
+
   return true;
 }
 
@@ -2251,6 +2269,19 @@ void Isolate::UnlinkDeferredHandles(DeferredHandles* deferred) {
 }
 
 
+void Isolate::DumpAndResetCompilationStats() {
+  if (turbo_statistics() != nullptr) {
+    OFStream os(stdout);
+    os << *turbo_statistics() << std::endl;
+  }
+  if (hstatistics() != nullptr) hstatistics()->Print();
+  delete turbo_statistics_;
+  turbo_statistics_ = nullptr;
+  delete hstatistics_;
+  hstatistics_ = nullptr;
+}
+
+
 HStatistics* Isolate::GetHStatistics() {
   if (hstatistics() == NULL) set_hstatistics(new HStatistics());
   return hstatistics();
@@ -2350,13 +2381,13 @@ Handle<JSObject> Isolate::GetSymbolRegistry() {
     Handle<JSObject> registry = factory()->NewJSObjectFromMap(map);
     heap()->set_symbol_registry(*registry);
 
-    static const char* nested[] = {
-      "for", "for_api", "for_intern", "keyFor", "private_api", "private_intern"
-    };
+    static const char* nested[] = {"for", "for_api", "keyFor", "private_api",
+                                   "private_intern"};
     for (unsigned i = 0; i < arraysize(nested); ++i) {
       Handle<String> name = factory()->InternalizeUtf8String(nested[i]);
       Handle<JSObject> obj = factory()->NewJSObjectFromMap(map);
-      JSObject::NormalizeProperties(obj, KEEP_INOBJECT_PROPERTIES, 8);
+      JSObject::NormalizeProperties(obj, KEEP_INOBJECT_PROPERTIES, 8,
+                                    "SetupSymbolRegistry");
       JSObject::SetProperty(registry, name, obj, STRICT).Assert();
     }
   }
index 3551632..42a814a 100644 (file)
@@ -5,6 +5,7 @@
 #ifndef V8_ISOLATE_H_
 #define V8_ISOLATE_H_
 
+#include <queue>
 #include "include/v8-debug.h"
 #include "src/allocation.h"
 #include "src/assert-scope.h"
@@ -390,8 +391,6 @@ typedef List<HeapObject*> DebugObjectCache;
   V(bool, fp_stubs_generated, false)                                           \
   V(int, max_available_threads, 0)                                             \
   V(uint32_t, per_isolate_assert_data, 0xFFFFFFFFu)                            \
-  V(InterruptCallback, api_interrupt_callback, NULL)                           \
-  V(void*, api_interrupt_callback_data, NULL)                                  \
   V(PromiseRejectCallback, promise_reject_callback, NULL)                      \
   ISOLATE_INIT_SIMULATOR_LIST(V)
 
@@ -486,6 +485,7 @@ class Isolate {
 
   // Returns the isolate inside which the current thread is running.
   INLINE(static Isolate* Current()) {
+    DCHECK(base::NoBarrier_Load(&isolate_key_created_) == 1);
     Isolate* isolate = reinterpret_cast<Isolate*>(
         base::Thread::GetExistingThreadLocal(isolate_key_));
     DCHECK(isolate != NULL);
@@ -493,6 +493,7 @@ class Isolate {
   }
 
   INLINE(static Isolate* UncheckedCurrent()) {
+    DCHECK(base::NoBarrier_Load(&isolate_key_created_) == 1);
     return reinterpret_cast<Isolate*>(
         base::Thread::GetThreadLocal(isolate_key_));
   }
@@ -760,6 +761,8 @@ class Isolate {
   bool MayIndexedAccess(Handle<JSObject> receiver,
                         uint32_t index,
                         v8::AccessType type);
+  bool IsInternallyUsedPropertyName(Handle<Object> name);
+  bool IsInternallyUsedPropertyName(Object* name);
 
   void SetFailedAccessCheckCallback(v8::FailedAccessCheckCallback callback);
   void ReportFailedAccessCheck(Handle<JSObject> receiver, v8::AccessType type);
@@ -812,7 +815,8 @@ class Isolate {
   Object* TerminateExecution();
   void CancelTerminateExecution();
 
-  void InvokeApiInterruptCallback();
+  void RequestInterrupt(InterruptCallback callback, void* data);
+  void InvokeApiInterruptCallbacks();
 
   // Administration
   void Iterate(ObjectVisitor* v);
@@ -821,9 +825,8 @@ class Isolate {
   void IterateThread(ThreadVisitor* v, char* t);
 
 
-  // Returns the current native and global context.
+  // Returns the current native context.
   Handle<Context> native_context();
-  Handle<Context> global_context();
 
   // Returns the native context of the calling JavaScript code.  That
   // is, the native context of the top-most JavaScript frame.
@@ -1065,6 +1068,8 @@ class Isolate {
   HTracer* GetHTracer();
   CodeTracer* GetCodeTracer();
 
+  void DumpAndResetCompilationStats();
+
   FunctionEntryHook function_entry_hook() { return function_entry_hook_; }
   void set_function_entry_hook(FunctionEntryHook function_entry_hook) {
     function_entry_hook_ = function_entry_hook;
@@ -1109,6 +1114,10 @@ class Isolate {
 
   std::string GetTurboCfgFileName();
 
+#if TRACE_MAPS
+  int GetNextUniqueSharedFunctionInfoId() { return next_unique_sfi_id_++; }
+#endif
+
  private:
   explicit Isolate(bool enable_serializer);
 
@@ -1172,6 +1181,10 @@ class Isolate {
   // A global counter for all generated Isolates, might overflow.
   static base::Atomic32 isolate_counter_;
 
+#if DEBUG
+  static base::Atomic32 isolate_key_created_;
+#endif
+
   void Deinit();
 
   static void SetIsolateThreadLocals(Isolate* isolate,
@@ -1221,7 +1234,6 @@ class Isolate {
   Counters* counters_;
   CodeRange* code_range_;
   base::RecursiveMutex break_access_;
-  base::Atomic32 debugger_initialized_;
   Logger* logger_;
   StackGuard stack_guard_;
   StatsTable* stats_table_;
@@ -1282,6 +1294,9 @@ class Isolate {
   HeapProfiler* heap_profiler_;
   FunctionEntryHook function_entry_hook_;
 
+  typedef std::pair<InterruptCallback, void*> InterruptEntry;
+  std::queue<InterruptEntry> api_interrupts_queue_;
+
 #define GLOBAL_BACKING_STORE(type, name, initialvalue)                         \
   type name##_;
   ISOLATE_INIT_LIST(GLOBAL_BACKING_STORE)
@@ -1311,6 +1326,10 @@ class Isolate {
 
   int next_optimization_id_;
 
+#if TRACE_MAPS
+  int next_unique_sfi_id_;
+#endif
+
   // List of callbacks when a Call completes.
   List<CallCompletedCallback> call_completed_callbacks_;
 
index 2993249..5ebbcdd 100644 (file)
@@ -400,7 +400,8 @@ Handle<Object> JsonParser<seq_one_byte>::ParseJsonObject() {
                            descriptor)->NowContains(value)) {
               Handle<HeapType> value_type(value->OptimalType(
                       isolate(), expected_representation));
-              Map::GeneralizeFieldType(target, descriptor, value_type);
+              Map::GeneralizeFieldType(target, descriptor,
+                                       expected_representation, value_type);
             }
             DCHECK(target->instance_descriptors()->GetFieldType(
                     descriptor)->NowContains(value));
index f89a19f..393551d 100644 (file)
@@ -8,6 +8,7 @@
 #include "src/v8.h"
 
 #include "src/conversions.h"
+#include "src/string-builder.h"
 #include "src/utils.h"
 
 namespace v8 {
@@ -24,42 +25,8 @@ class BasicJsonStringifier BASE_EMBEDDED {
       Handle<String> object));
 
  private:
-  static const int kInitialPartLength = 32;
-  static const int kMaxPartLength = 16 * 1024;
-  static const int kPartLengthGrowthFactor = 2;
-
   enum Result { UNCHANGED, SUCCESS, EXCEPTION };
 
-  void Accumulate();
-
-  void Extend();
-
-  void ChangeEncoding();
-
-  INLINE(void ShrinkCurrentPart());
-
-  template <bool is_one_byte, typename Char>
-  INLINE(void Append_(Char c));
-
-  template <bool is_one_byte, typename Char>
-  INLINE(void Append_(const Char* chars));
-
-  INLINE(void Append(uint8_t c)) {
-    if (is_one_byte_) {
-      Append_<true>(c);
-    } else {
-      Append_<false>(c);
-    }
-  }
-
-  INLINE(void AppendOneByte(const char* chars)) {
-    if (is_one_byte_) {
-      Append_<true>(reinterpret_cast<const uint8_t*>(chars));
-    } else {
-      Append_<false>(reinterpret_cast<const uint8_t*>(chars));
-    }
-  }
-
   MUST_USE_RESULT MaybeHandle<Object> ApplyToJsonFunction(
       Handle<Object> object,
       Handle<Object> key);
@@ -69,14 +36,9 @@ class BasicJsonStringifier BASE_EMBEDDED {
                           bool deferred_comma,
                           bool deferred_key);
 
-  template <typename ResultType, typename Char>
-  INLINE(static Handle<String> StringifyString_(Isolate* isolate,
-                                                Vector<Char> vector,
-                                                Handle<String> result));
-
   // Entry point to serialize the object.
   INLINE(Result SerializeObject(Handle<Object> obj)) {
-    return Serialize_<false>(obj, false, factory_->empty_string());
+    return Serialize_<false>(obj, false, factory()->empty_string());
   }
 
   // Serialize an array element.
@@ -103,9 +65,9 @@ class BasicJsonStringifier BASE_EMBEDDED {
   Result Serialize_(Handle<Object> object, bool comma, Handle<Object> key);
 
   void SerializeDeferredKey(bool deferred_comma, Handle<Object> deferred_key) {
-    if (deferred_comma) Append(',');
+    if (deferred_comma) builder_.AppendCharacter(',');
     SerializeString(Handle<String>::cast(deferred_key));
-    Append(':');
+    builder_.AppendCharacter(':');
   }
 
   Result SerializeSmi(Smi* object);
@@ -125,42 +87,25 @@ class BasicJsonStringifier BASE_EMBEDDED {
   void SerializeString(Handle<String> object);
 
   template <typename SrcChar, typename DestChar>
-  INLINE(static int SerializeStringUnchecked_(const SrcChar* src,
-                                              DestChar* dest,
-                                              int length));
+  INLINE(static void SerializeStringUnchecked_(
+      Vector<const SrcChar> src,
+      IncrementalStringBuilder::NoExtend<DestChar>* dest));
 
-  template <bool is_one_byte, typename Char>
+  template <typename SrcChar, typename DestChar>
   INLINE(void SerializeString_(Handle<String> string));
 
   template <typename Char>
   INLINE(static bool DoNotEscape(Char c));
 
-  template <typename Char>
-  INLINE(static Vector<const Char> GetCharVector(Handle<String> string));
-
   Result StackPush(Handle<Object> object);
   void StackPop();
 
-  INLINE(Handle<String> accumulator()) {
-    return Handle<String>(String::cast(accumulator_store_->value()), isolate_);
-  }
-
-  INLINE(void set_accumulator(Handle<String> string)) {
-    return accumulator_store_->set_value(*string);
-  }
+  Factory* factory() { return isolate_->factory(); }
 
   Isolate* isolate_;
-  Factory* factory_;
-  // We use a value wrapper for the string accumulator to keep the
-  // (indirect) handle to it in the outermost handle scope.
-  Handle<JSValue> accumulator_store_;
-  Handle<String> current_part_;
+  IncrementalStringBuilder builder_;
   Handle<String> tojson_string_;
   Handle<JSArray> stack_;
-  int current_index_;
-  int part_length_;
-  bool is_one_byte_;
-  bool overflowed_;
 
   static const int kJsonEscapeTableEntrySize = 8;
   static const char* const JsonEscapeTable;
@@ -237,31 +182,16 @@ const char* const BasicJsonStringifier::JsonEscapeTable =
 
 
 BasicJsonStringifier::BasicJsonStringifier(Isolate* isolate)
-    : isolate_(isolate),
-      current_index_(0),
-      is_one_byte_(true),
-      overflowed_(false) {
-  factory_ = isolate_->factory();
-  accumulator_store_ = Handle<JSValue>::cast(
-      Object::ToObject(isolate, factory_->empty_string()).ToHandleChecked());
-  part_length_ = kInitialPartLength;
-  current_part_ = factory_->NewRawOneByteString(part_length_).ToHandleChecked();
-  tojson_string_ = factory_->toJSON_string();
-  stack_ = factory_->NewJSArray(8);
+    : isolate_(isolate), builder_(isolate) {
+  tojson_string_ = factory()->toJSON_string();
+  stack_ = factory()->NewJSArray(8);
 }
 
 
 MaybeHandle<Object> BasicJsonStringifier::Stringify(Handle<Object> object) {
   Result result = SerializeObject(object);
-  if (result == UNCHANGED) return isolate_->factory()->undefined_value();
-  if (result == SUCCESS) {
-    ShrinkCurrentPart();
-    Accumulate();
-    if (overflowed_) {
-      THROW_NEW_ERROR(isolate_, NewInvalidStringLengthError(), Object);
-    }
-    return accumulator();
-  }
+  if (result == UNCHANGED) return factory()->undefined_value();
+  if (result == SUCCESS) return builder_.Finish();
   DCHECK(result == EXCEPTION);
   return MaybeHandle<Object>();
 }
@@ -281,58 +211,29 @@ MaybeHandle<Object> BasicJsonStringifier::StringifyString(
 
   object = String::Flatten(object);
   DCHECK(object->IsFlat());
+  Handle<SeqString> result;
   if (object->IsOneByteRepresentationUnderneath()) {
-    Handle<String> result = isolate->factory()->NewRawOneByteString(
-        worst_case_length).ToHandleChecked();
-    DisallowHeapAllocation no_gc;
-    return StringifyString_<SeqOneByteString>(
-        isolate,
-        object->GetFlatContent().ToOneByteVector(),
-        result);
+    result = isolate->factory()
+                 ->NewRawOneByteString(worst_case_length)
+                 .ToHandleChecked();
+    IncrementalStringBuilder::NoExtendString<uint8_t> no_extend(
+        result, worst_case_length);
+    no_extend.Append('\"');
+    SerializeStringUnchecked_(object->GetFlatContent().ToOneByteVector(),
+                              &no_extend);
+    no_extend.Append('\"');
   } else {
-    Handle<String> result = isolate->factory()->NewRawTwoByteString(
-        worst_case_length).ToHandleChecked();
-    DisallowHeapAllocation no_gc;
-    return StringifyString_<SeqTwoByteString>(
-        isolate,
-        object->GetFlatContent().ToUC16Vector(),
-        result);
+    result = isolate->factory()
+                 ->NewRawTwoByteString(worst_case_length)
+                 .ToHandleChecked();
+    IncrementalStringBuilder::NoExtendString<uc16> no_extend(result,
+                                                             worst_case_length);
+    no_extend.Append('\"');
+    SerializeStringUnchecked_(object->GetFlatContent().ToUC16Vector(),
+                              &no_extend);
+    no_extend.Append('\"');
   }
-}
-
-
-template <typename ResultType, typename Char>
-Handle<String> BasicJsonStringifier::StringifyString_(Isolate* isolate,
-                                                      Vector<Char> vector,
-                                                      Handle<String> result) {
-  DisallowHeapAllocation no_gc;
-  int final_size = 0;
-  ResultType* dest = ResultType::cast(*result);
-  dest->Set(final_size++, '\"');
-  final_size += SerializeStringUnchecked_(vector.start(),
-                                          dest->GetChars() + 1,
-                                          vector.length());
-  dest->Set(final_size++, '\"');
-  return SeqString::Truncate(Handle<SeqString>::cast(result), final_size);
-}
-
-
-template <bool is_one_byte, typename Char>
-void BasicJsonStringifier::Append_(Char c) {
-  if (is_one_byte) {
-    SeqOneByteString::cast(*current_part_)->SeqOneByteStringSet(
-        current_index_++, c);
-  } else {
-    SeqTwoByteString::cast(*current_part_)->SeqTwoByteStringSet(
-        current_index_++, c);
-  }
-  if (current_index_ == part_length_) Extend();
-}
-
-
-template <bool is_one_byte, typename Char>
-void BasicJsonStringifier::Append_(const Char* chars) {
-  for (; *chars != '\0'; chars++) Append_<is_one_byte, Char>(*chars);
+  return result;
 }
 
 
@@ -345,7 +246,7 @@ MaybeHandle<Object> BasicJsonStringifier::ApplyToJsonFunction(
   if (!fun->IsJSFunction()) return object;
 
   // Call toJSON function.
-  if (key->IsSmi()) key = factory_->NumberToString(key);
+  if (key->IsSmi()) key = factory()->NumberToString(key);
   Handle<Object> argv[] = { key };
   HandleScope scope(isolate_);
   ASSIGN_RETURN_ON_EXCEPTION(
@@ -372,7 +273,7 @@ BasicJsonStringifier::Result BasicJsonStringifier::StackPush(
       if (elements->get(i) == *object) {
         AllowHeapAllocation allow_to_return_error;
         Handle<Object> error;
-        MaybeHandle<Object> maybe_error = factory_->NewTypeError(
+        MaybeHandle<Object> maybe_error = factory()->NewTypeError(
             "circular_structure", HandleVector<Object>(NULL, 0));
         if (maybe_error.ToHandle(&error)) isolate_->Throw(*error);
         return EXCEPTION;
@@ -416,15 +317,15 @@ BasicJsonStringifier::Result BasicJsonStringifier::Serialize_(
       switch (Oddball::cast(*object)->kind()) {
         case Oddball::kFalse:
           if (deferred_string_key) SerializeDeferredKey(comma, key);
-          AppendOneByte("false");
+          builder_.AppendCString("false");
           return SUCCESS;
         case Oddball::kTrue:
           if (deferred_string_key) SerializeDeferredKey(comma, key);
-          AppendOneByte("true");
+          builder_.AppendCString("true");
           return SUCCESS;
         case Oddball::kNull:
           if (deferred_string_key) SerializeDeferredKey(comma, key);
-          AppendOneByte("null");
+          builder_.AppendCString("null");
           return SUCCESS;
         default:
           return UNCHANGED;
@@ -472,23 +373,11 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeGeneric(
       EXCEPTION);
   if (result->IsUndefined()) return UNCHANGED;
   if (deferred_key) {
-    if (key->IsSmi()) key = factory_->NumberToString(key);
+    if (key->IsSmi()) key = factory()->NumberToString(key);
     SerializeDeferredKey(deferred_comma, key);
   }
 
-  Handle<String> result_string = Handle<String>::cast(result);
-  // Shrink current part, attach it to the accumulator, also attach the result
-  // string to the accumulator, and allocate a new part.
-  ShrinkCurrentPart();  // Shrink.
-  part_length_ = kInitialPartLength;  // Allocate conservatively.
-  Extend();             // Attach current part and allocate new part.
-  // Attach result string to the accumulator.
-  Handle<String> cons;
-  ASSIGN_RETURN_ON_EXCEPTION_VALUE(
-      isolate_, cons,
-      factory_->NewConsString(accumulator(), result_string),
-      EXCEPTION);
-  set_accumulator(cons);
+  builder_.AppendString(Handle<String>::cast(result));
   return SUCCESS;
 }
 
@@ -511,7 +400,7 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSValue(
     DCHECK(class_name == isolate_->heap()->Boolean_string());
     Object* value = JSValue::cast(*object)->value();
     DCHECK(value->IsBoolean());
-    AppendOneByte(value->IsTrue() ? "true" : "false");
+    builder_.AppendCString(value->IsTrue() ? "true" : "false");
   }
   return SUCCESS;
 }
@@ -521,7 +410,7 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeSmi(Smi* object) {
   static const int kBufferSize = 100;
   char chars[kBufferSize];
   Vector<char> buffer(chars, kBufferSize);
-  AppendOneByte(IntToCString(object->value(), buffer));
+  builder_.AppendCString(IntToCString(object->value(), buffer));
   return SUCCESS;
 }
 
@@ -529,13 +418,13 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeSmi(Smi* object) {
 BasicJsonStringifier::Result BasicJsonStringifier::SerializeDouble(
     double number) {
   if (std::isinf(number) || std::isnan(number)) {
-    AppendOneByte("null");
+    builder_.AppendCString("null");
     return SUCCESS;
   }
   static const int kBufferSize = 100;
   char chars[kBufferSize];
   Vector<char> buffer(chars, kBufferSize);
-  AppendOneByte(DoubleToCString(number, buffer));
+  builder_.AppendCString(DoubleToCString(number, buffer));
   return SUCCESS;
 }
 
@@ -547,13 +436,13 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSArray(
   if (stack_push != SUCCESS) return stack_push;
   uint32_t length = 0;
   CHECK(object->length()->ToArrayIndex(&length));
-  Append('[');
+  builder_.AppendCharacter('[');
   switch (object->GetElementsKind()) {
     case FAST_SMI_ELEMENTS: {
       Handle<FixedArray> elements(
           FixedArray::cast(object->elements()), isolate_);
       for (uint32_t i = 0; i < length; i++) {
-        if (i > 0) Append(',');
+        if (i > 0) builder_.AppendCharacter(',');
         SerializeSmi(Smi::cast(elements->get(i)));
       }
       break;
@@ -564,7 +453,7 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSArray(
       Handle<FixedDoubleArray> elements(
           FixedDoubleArray::cast(object->elements()), isolate_);
       for (uint32_t i = 0; i < length; i++) {
-        if (i > 0) Append(',');
+        if (i > 0) builder_.AppendCharacter(',');
         SerializeDouble(elements->get_scalar(i));
       }
       break;
@@ -573,14 +462,14 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSArray(
       Handle<FixedArray> elements(
           FixedArray::cast(object->elements()), isolate_);
       for (uint32_t i = 0; i < length; i++) {
-        if (i > 0) Append(',');
+        if (i > 0) builder_.AppendCharacter(',');
         Result result =
             SerializeElement(isolate_,
                              Handle<Object>(elements->get(i), isolate_),
                              i);
         if (result == SUCCESS) continue;
         if (result == UNCHANGED) {
-          AppendOneByte("null");
+          builder_.AppendCString("null");
         } else {
           return result;
         }
@@ -596,9 +485,8 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSArray(
       break;
     }
   }
-  Append(']');
+  builder_.AppendCharacter(']');
   StackPop();
-  current_part_ = handle_scope.CloseAndEscape(current_part_);
   return SUCCESS;
 }
 
@@ -606,19 +494,19 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSArray(
 BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSArraySlow(
     Handle<JSArray> object, uint32_t length) {
   for (uint32_t i = 0; i < length; i++) {
-    if (i > 0) Append(',');
+    if (i > 0) builder_.AppendCharacter(',');
     Handle<Object> element;
     ASSIGN_RETURN_ON_EXCEPTION_VALUE(
         isolate_, element,
         Object::GetElement(isolate_, object, i),
         EXCEPTION);
     if (element->IsUndefined()) {
-      AppendOneByte("null");
+      builder_.AppendCString("null");
     } else {
       Result result = SerializeElement(isolate_, element, i);
       if (result == SUCCESS) continue;
       if (result == UNCHANGED) {
-        AppendOneByte("null");
+        builder_.AppendCString("null");
       } else {
         return result;
       }
@@ -635,7 +523,7 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSObject(
   if (stack_push != SUCCESS) return stack_push;
   DCHECK(!object->IsJSGlobalProxy() && !object->IsGlobalObject());
 
-  Append('{');
+  builder_.AppendCharacter('{');
   bool comma = false;
 
   if (object->HasFastProperties() &&
@@ -652,8 +540,15 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSObject(
       if (details.IsDontEnum()) continue;
       Handle<Object> property;
       if (details.type() == FIELD && *map == object->map()) {
-        property = Handle<Object>(object->RawFastPropertyAt(
-            FieldIndex::ForDescriptor(*map, i)), isolate_);
+        FieldIndex field_index = FieldIndex::ForDescriptor(*map, i);
+        Isolate* isolate = object->GetIsolate();
+        if (object->IsUnboxedDoubleField(field_index)) {
+          double value = object->RawFastDoublePropertyAt(field_index);
+          property = isolate->factory()->NewHeapNumber(value);
+
+        } else {
+          property = handle(object->RawFastPropertyAt(field_index), isolate);
+        }
       } else {
         ASSIGN_RETURN_ON_EXCEPTION_VALUE(
             isolate_, property,
@@ -680,7 +575,7 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSObject(
         maybe_property = Object::GetPropertyOrElement(object, key_handle);
       } else {
         DCHECK(key->IsNumber());
-        key_handle = factory_->NumberToString(Handle<Object>(key, isolate_));
+        key_handle = factory()->NumberToString(Handle<Object>(key, isolate_));
         uint32_t index;
         if (key->IsSmi()) {
           maybe_property = Object::GetElement(
@@ -700,130 +595,59 @@ BasicJsonStringifier::Result BasicJsonStringifier::SerializeJSObject(
     }
   }
 
-  Append('}');
+  builder_.AppendCharacter('}');
   StackPop();
-  current_part_ = handle_scope.CloseAndEscape(current_part_);
   return SUCCESS;
 }
 
 
-void BasicJsonStringifier::ShrinkCurrentPart() {
-  DCHECK(current_index_ < part_length_);
-  current_part_ = SeqString::Truncate(Handle<SeqString>::cast(current_part_),
-                                      current_index_);
-}
-
-
-void BasicJsonStringifier::Accumulate() {
-  if (accumulator()->length() + current_part_->length() > String::kMaxLength) {
-    // Screw it.  Simply set the flag and carry on.  Throw exception at the end.
-    set_accumulator(factory_->empty_string());
-    overflowed_ = true;
-  } else {
-    set_accumulator(factory_->NewConsString(accumulator(),
-                                            current_part_).ToHandleChecked());
-  }
-}
-
-
-void BasicJsonStringifier::Extend() {
-  Accumulate();
-  if (part_length_ <= kMaxPartLength / kPartLengthGrowthFactor) {
-    part_length_ *= kPartLengthGrowthFactor;
-  }
-  if (is_one_byte_) {
-    current_part_ =
-        factory_->NewRawOneByteString(part_length_).ToHandleChecked();
-  } else {
-    current_part_ =
-        factory_->NewRawTwoByteString(part_length_).ToHandleChecked();
-  }
-  DCHECK(!current_part_.is_null());
-  current_index_ = 0;
-}
-
-
-void BasicJsonStringifier::ChangeEncoding() {
-  ShrinkCurrentPart();
-  Accumulate();
-  current_part_ =
-      factory_->NewRawTwoByteString(part_length_).ToHandleChecked();
-  DCHECK(!current_part_.is_null());
-  current_index_ = 0;
-  is_one_byte_ = false;
-}
-
-
 template <typename SrcChar, typename DestChar>
-int BasicJsonStringifier::SerializeStringUnchecked_(const SrcChar* src,
-                                                    DestChar* dest,
-                                                    int length) {
-  DestChar* dest_start = dest;
-
+void BasicJsonStringifier::SerializeStringUnchecked_(
+    Vector<const SrcChar> src,
+    IncrementalStringBuilder::NoExtend<DestChar>* dest) {
   // Assert that uc16 character is not truncated down to 8 bit.
   // The <uc16, char> version of this method must not be called.
-  DCHECK(sizeof(*dest) >= sizeof(*src));
+  DCHECK(sizeof(DestChar) >= sizeof(SrcChar));
 
-  for (int i = 0; i < length; i++) {
+  for (int i = 0; i < src.length(); i++) {
     SrcChar c = src[i];
     if (DoNotEscape(c)) {
-      *(dest++) = static_cast<DestChar>(c);
+      dest->Append(c);
     } else {
-      const uint8_t* chars = reinterpret_cast<const uint8_t*>(
-          &JsonEscapeTable[c * kJsonEscapeTableEntrySize]);
-      while (*chars != '\0') *(dest++) = *(chars++);
+      dest->AppendCString(&JsonEscapeTable[c * kJsonEscapeTableEntrySize]);
     }
   }
-
-  return static_cast<int>(dest - dest_start);
 }
 
 
-template <bool is_one_byte, typename Char>
+template <typename SrcChar, typename DestChar>
 void BasicJsonStringifier::SerializeString_(Handle<String> string) {
   int length = string->length();
-  Append_<is_one_byte, char>('"');
+  builder_.Append<uint8_t, DestChar>('"');
   // We make a rough estimate to find out if the current string can be
   // serialized without allocating a new string part. The worst case length of
   // an escaped character is 6.  Shifting the remainin string length right by 3
   // is a more pessimistic estimate, but faster to calculate.
-
-  if (((part_length_ - current_index_) >> 3) > length) {
+  int worst_case_length = length << 3;
+  if (builder_.CurrentPartCanFit(worst_case_length)) {
     DisallowHeapAllocation no_gc;
-    Vector<const Char> vector = GetCharVector<Char>(string);
-    if (is_one_byte) {
-      current_index_ += SerializeStringUnchecked_(
-          vector.start(),
-          SeqOneByteString::cast(*current_part_)->GetChars() + current_index_,
-          length);
-    } else {
-      current_index_ += SerializeStringUnchecked_(
-          vector.start(),
-          SeqTwoByteString::cast(*current_part_)->GetChars() + current_index_,
-          length);
-    }
+    Vector<const SrcChar> vector = string->GetCharVector<SrcChar>();
+    IncrementalStringBuilder::NoExtendBuilder<DestChar> no_extend(
+        &builder_, worst_case_length);
+    SerializeStringUnchecked_(vector, &no_extend);
   } else {
-    String* string_location = NULL;
-    Vector<const Char> vector(NULL, 0);
-    for (int i = 0; i < length; i++) {
-      // If GC moved the string, we need to refresh the vector.
-      if (*string != string_location) {
-        DisallowHeapAllocation no_gc;
-        // This does not actually prevent the string from being relocated later.
-        vector = GetCharVector<Char>(string);
-        string_location = *string;
-      }
-      Char c = vector[i];
+    FlatStringReader reader(isolate_, string);
+    for (int i = 0; i < reader.length(); i++) {
+      SrcChar c = reader.Get<SrcChar>(i);
       if (DoNotEscape(c)) {
-        Append_<is_one_byte, Char>(c);
+        builder_.Append<SrcChar, DestChar>(c);
       } else {
-        Append_<is_one_byte, uint8_t>(reinterpret_cast<const uint8_t*>(
-            &JsonEscapeTable[c * kJsonEscapeTableEntrySize]));
+        builder_.AppendCString(&JsonEscapeTable[c * kJsonEscapeTableEntrySize]);
       }
     }
   }
 
-  Append_<is_one_byte, uint8_t>('"');
+  builder_.Append<uint8_t, DestChar>('"');
 }
 
 
@@ -839,37 +663,20 @@ bool BasicJsonStringifier::DoNotEscape(uint16_t c) {
 }
 
 
-template <>
-Vector<const uint8_t> BasicJsonStringifier::GetCharVector(
-    Handle<String> string) {
-  String::FlatContent flat = string->GetFlatContent();
-  DCHECK(flat.IsOneByte());
-  return flat.ToOneByteVector();
-}
-
-
-template <>
-Vector<const uc16> BasicJsonStringifier::GetCharVector(Handle<String> string) {
-  String::FlatContent flat = string->GetFlatContent();
-  DCHECK(flat.IsTwoByte());
-  return flat.ToUC16Vector();
-}
-
-
 void BasicJsonStringifier::SerializeString(Handle<String> object) {
   object = String::Flatten(object);
-  if (is_one_byte_) {
+  if (builder_.CurrentEncoding() == String::ONE_BYTE_ENCODING) {
     if (object->IsOneByteRepresentationUnderneath()) {
-      SerializeString_<true, uint8_t>(object);
+      SerializeString_<uint8_t, uint8_t>(object);
     } else {
-      ChangeEncoding();
+      builder_.ChangeEncoding();
       SerializeString(object);
     }
   } else {
     if (object->IsOneByteRepresentationUnderneath()) {
-      SerializeString_<false, uint8_t>(object);
+      SerializeString_<uint8_t, uc16>(object);
     } else {
-      SerializeString_<false, uc16>(object);
+      SerializeString_<uc16, uc16>(object);
     }
   }
 }
index d078d07..81ad080 100644 (file)
@@ -58,28 +58,6 @@ MaybeHandle<Object> RegExpImpl::CreateRegExpLiteral(
 }
 
 
-static JSRegExp::Flags RegExpFlagsFromString(Handle<String> str) {
-  int flags = JSRegExp::NONE;
-  for (int i = 0; i < str->length(); i++) {
-    switch (str->Get(i)) {
-      case 'i':
-        flags |= JSRegExp::IGNORE_CASE;
-        break;
-      case 'g':
-        flags |= JSRegExp::GLOBAL;
-        break;
-      case 'm':
-        flags |= JSRegExp::MULTILINE;
-        break;
-      case 'y':
-        if (FLAG_harmony_regexps) flags |= JSRegExp::STICKY;
-        break;
-    }
-  }
-  return JSRegExp::Flags(flags);
-}
-
-
 MUST_USE_RESULT
 static inline MaybeHandle<Object> ThrowRegExpException(
     Handle<JSRegExp> re,
@@ -156,10 +134,9 @@ static bool HasFewDifferentCharacters(Handle<String> pattern) {
 
 MaybeHandle<Object> RegExpImpl::Compile(Handle<JSRegExp> re,
                                         Handle<String> pattern,
-                                        Handle<String> flag_str) {
+                                        JSRegExp::Flags flags) {
   Isolate* isolate = re->GetIsolate();
   Zone zone(isolate);
-  JSRegExp::Flags flags = RegExpFlagsFromString(flag_str);
   CompilationCache* compilation_cache = isolate->compilation_cache();
   MaybeHandle<FixedArray> maybe_cached =
       compilation_cache->LookupRegExp(pattern, flags);
@@ -1027,6 +1004,8 @@ class RegExpCompiler {
 
   inline bool ignore_case() { return ignore_case_; }
   inline bool one_byte() { return one_byte_; }
+  inline bool optimize() { return optimize_; }
+  inline void set_optimize(bool value) { optimize_ = value; }
   FrequencyCollator* frequency_collator() { return &frequency_collator_; }
 
   int current_expansion_factor() { return current_expansion_factor_; }
@@ -1047,6 +1026,7 @@ class RegExpCompiler {
   bool ignore_case_;
   bool one_byte_;
   bool reg_exp_too_big_;
+  bool optimize_;
   int current_expansion_factor_;
   FrequencyCollator frequency_collator_;
   Zone* zone_;
@@ -1079,6 +1059,7 @@ RegExpCompiler::RegExpCompiler(int capture_count, bool ignore_case,
       ignore_case_(ignore_case),
       one_byte_(one_byte),
       reg_exp_too_big_(false),
+      optimize_(FLAG_regexp_optimization),
       current_expansion_factor_(1),
       frequency_collator_(),
       zone_(zone) {
@@ -1094,16 +1075,6 @@ RegExpEngine::CompilationResult RegExpCompiler::Assemble(
     Handle<String> pattern) {
   Heap* heap = pattern->GetHeap();
 
-  bool use_slow_safe_regexp_compiler = false;
-  if (heap->total_regexp_code_generated() >
-          RegExpImpl::kRegWxpCompiledLimit &&
-      heap->isolate()->memory_allocator()->SizeExecutable() >
-          RegExpImpl::kRegExpExecutableMemoryLimit) {
-    use_slow_safe_regexp_compiler = true;
-  }
-
-  macro_assembler->set_slow_safe(use_slow_safe_regexp_compiler);
-
 #ifdef DEBUG
   if (FLAG_trace_regexp_assembler)
     macro_assembler_ = new RegExpMacroAssemblerTracer(macro_assembler);
@@ -1127,12 +1098,14 @@ RegExpEngine::CompilationResult RegExpCompiler::Assemble(
   Handle<HeapObject> code = macro_assembler_->GetCode(pattern);
   heap->IncreaseTotalRegexpCodeGenerated(code->Size());
   work_list_ = NULL;
-#ifdef DEBUG
+#ifdef ENABLE_DISASSEMBLER
   if (FLAG_print_code) {
     CodeTracer::Scope trace_scope(heap->isolate()->GetCodeTracer());
     OFStream os(trace_scope.file());
     Handle<Code>::cast(code)->Disassemble(pattern->ToCString().get(), os);
   }
+#endif
+#ifdef DEBUG
   if (FLAG_trace_regexp_assembler) {
     delete macro_assembler_;
   }
@@ -2257,8 +2230,7 @@ RegExpNode::LimitResult RegExpNode::LimitVersions(RegExpCompiler* compiler,
   // We are being asked to make a non-generic version.  Keep track of how many
   // non-generic versions we generate so as not to overdo it.
   trace_count_++;
-  if (FLAG_regexp_optimization &&
-      trace_count_ < kMaxCopiesCodeGenerated &&
+  if (compiler->optimize() && trace_count_ < kMaxCopiesCodeGenerated &&
       compiler->recursion_depth() <= RegExpCompiler::kMaxRecursion) {
     return CONTINUE;
   }
@@ -4137,15 +4109,12 @@ void ChoiceNode::EmitChoices(RegExpCompiler* compiler,
     }
     alt_gen->expects_preload = preload->preload_is_current_;
     bool generate_full_check_inline = false;
-    if (FLAG_regexp_optimization &&
+    if (compiler->optimize() &&
         try_to_emit_quick_check_for_alternative(i == 0) &&
-        alternative.node()->EmitQuickCheck(compiler,
-                                           trace,
-                                           &new_trace,
-                                           preload->preload_has_checked_bounds_,
-                                           &alt_gen->possible_success,
-                                           &alt_gen->quick_check_details,
-                                           fall_through_on_failure)) {
+        alternative.node()->EmitQuickCheck(
+            compiler, trace, &new_trace, preload->preload_has_checked_bounds_,
+            &alt_gen->possible_success, &alt_gen->quick_check_details,
+            fall_through_on_failure)) {
       // Quick check was generated for this choice.
       preload->preload_is_current_ = true;
       preload->preload_has_checked_bounds_ = true;
@@ -4943,7 +4912,7 @@ RegExpNode* RegExpQuantifier::ToNode(int min,
 
   if (body_can_be_empty) {
     body_start_reg = compiler->AllocateRegister();
-  } else if (FLAG_regexp_optimization && !needs_capture_clearing) {
+  } else if (compiler->optimize() && !needs_capture_clearing) {
     // Only unroll if there are no captures and the body can't be
     // empty.
     {
@@ -6041,6 +6010,8 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(
   }
   RegExpCompiler compiler(data->capture_count, ignore_case, is_one_byte, zone);
 
+  compiler.set_optimize(!TooMuchRegExpCode(pattern));
+
   // Sample some characters from the middle of the string.
   static const int kSampleSize = 128;
 
@@ -6143,6 +6114,8 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(
   RegExpMacroAssemblerIrregexp macro_assembler(codes, zone);
 #endif  // V8_INTERPRETED_REGEXP
 
+  macro_assembler.set_slow_safe(TooMuchRegExpCode(pattern));
+
   // Inserted here, instead of in Assembler, because it depends on information
   // in the AST that isn't replicated in the Node structure.
   static const int kMaxBacksearchLimit = 1024;
@@ -6166,4 +6139,14 @@ RegExpEngine::CompilationResult RegExpEngine::Compile(
 }
 
 
+bool RegExpEngine::TooMuchRegExpCode(Handle<String> pattern) {
+  Heap* heap = pattern->GetHeap();
+  bool too_much = pattern->length() > RegExpImpl::kRegExpTooLargeToOptimize;
+  if (heap->total_regexp_code_generated() > RegExpImpl::kRegExpCompiledLimit &&
+      heap->isolate()->memory_allocator()->SizeExecutable() >
+          RegExpImpl::kRegExpExecutableMemoryLimit) {
+    too_much = true;
+  }
+  return too_much;
+}
 }}  // namespace v8::internal
index c65adea..4b84c95 100644 (file)
@@ -46,10 +46,9 @@ class RegExpImpl {
   // generic data and choice of implementation - as well as what
   // the implementation wants to store in the data field.
   // Returns false if compilation fails.
-  MUST_USE_RESULT static MaybeHandle<Object> Compile(
-      Handle<JSRegExp> re,
-      Handle<String> pattern,
-      Handle<String> flags);
+  MUST_USE_RESULT static MaybeHandle<Object> Compile(Handle<JSRegExp> re,
+                                                     Handle<String> pattern,
+                                                     JSRegExp::Flags flags);
 
   // See ECMA-262 section 15.10.6.2.
   // This function calls the garbage collector if necessary.
@@ -213,7 +212,8 @@ class RegExpImpl {
   // total regexp code compiled including code that has subsequently been freed
   // and the total executable memory at any point.
   static const int kRegExpExecutableMemoryLimit = 16 * MB;
-  static const int kRegWxpCompiledLimit = 1 * MB;
+  static const int kRegExpCompiledLimit = 1 * MB;
+  static const int kRegExpTooLargeToOptimize = 10 * KB;
 
  private:
   static bool CompileIrregexp(Handle<JSRegExp> re,
@@ -1666,6 +1666,8 @@ class RegExpEngine: public AllStatic {
                                    Handle<String> sample_subject,
                                    bool is_one_byte, Zone* zone);
 
+  static bool TooMuchRegExpCode(Handle<String> pattern);
+
   static void DotPrint(const char* label, RegExpNode* node, bool ignore_case);
 };
 
diff --git a/deps/v8/src/layout-descriptor-inl.h b/deps/v8/src/layout-descriptor-inl.h
new file mode 100644 (file)
index 0000000..3352312
--- /dev/null
@@ -0,0 +1,191 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_LAYOUT_DESCRIPTOR_INL_H_
+#define V8_LAYOUT_DESCRIPTOR_INL_H_
+
+#include "src/layout-descriptor.h"
+
+namespace v8 {
+namespace internal {
+
+LayoutDescriptor* LayoutDescriptor::FromSmi(Smi* smi) {
+  return LayoutDescriptor::cast(smi);
+}
+
+
+Handle<LayoutDescriptor> LayoutDescriptor::New(Isolate* isolate, int length) {
+  if (length <= kSmiValueSize) {
+    // The whole bit vector fits into a smi.
+    return handle(LayoutDescriptor::FromSmi(Smi::FromInt(0)), isolate);
+  }
+
+  length = (length + kNumberOfBits - 1) / kNumberOfBits;
+  DCHECK(length > 0);
+
+  if (SmiValuesAre32Bits() && (length & 1)) {
+    // On 64-bit systems if the length is odd then the half-word space would be
+    // lost anyway (due to alignment and the fact that we are allocating
+    // uint32-typed array), so we increase the length of allocated array
+    // to utilize that "lost" space which could also help to avoid layout
+    // descriptor reallocations.
+    ++length;
+  }
+  return Handle<LayoutDescriptor>::cast(
+      isolate->factory()->NewFixedTypedArray(length, kExternalUint32Array));
+}
+
+
+bool LayoutDescriptor::InobjectUnboxedField(int inobject_properties,
+                                            PropertyDetails details) {
+  if (details.type() != FIELD || !details.representation().IsDouble()) {
+    return false;
+  }
+  // We care only about in-object properties.
+  return details.field_index() < inobject_properties;
+}
+
+
+LayoutDescriptor* LayoutDescriptor::FastPointerLayout() {
+  return LayoutDescriptor::FromSmi(Smi::FromInt(0));
+}
+
+
+bool LayoutDescriptor::GetIndexes(int field_index, int* layout_word_index,
+                                  int* layout_bit_index) {
+  if (static_cast<unsigned>(field_index) >= static_cast<unsigned>(capacity())) {
+    return false;
+  }
+
+  *layout_word_index = field_index / kNumberOfBits;
+  CHECK((!IsSmi() && (*layout_word_index < length())) ||
+        (IsSmi() && (*layout_word_index < 1)));
+
+  *layout_bit_index = field_index % kNumberOfBits;
+  return true;
+}
+
+
+LayoutDescriptor* LayoutDescriptor::SetTagged(int field_index, bool tagged) {
+  int layout_word_index;
+  int layout_bit_index;
+
+  if (!GetIndexes(field_index, &layout_word_index, &layout_bit_index)) {
+    CHECK(false);
+    return this;
+  }
+  uint32_t layout_mask = static_cast<uint32_t>(1) << layout_bit_index;
+
+  if (IsSlowLayout()) {
+    uint32_t value = get_scalar(layout_word_index);
+    if (tagged) {
+      value &= ~layout_mask;
+    } else {
+      value |= layout_mask;
+    }
+    set(layout_word_index, value);
+    return this;
+  } else {
+    uint32_t value = static_cast<uint32_t>(Smi::cast(this)->value());
+    if (tagged) {
+      value &= ~layout_mask;
+    } else {
+      value |= layout_mask;
+    }
+    return LayoutDescriptor::FromSmi(Smi::FromInt(static_cast<int>(value)));
+  }
+}
+
+
+bool LayoutDescriptor::IsTagged(int field_index) {
+  if (IsFastPointerLayout()) return true;
+
+  int layout_word_index;
+  int layout_bit_index;
+
+  if (!GetIndexes(field_index, &layout_word_index, &layout_bit_index)) {
+    // All bits after Out of bounds queries
+    return true;
+  }
+  uint32_t layout_mask = static_cast<uint32_t>(1) << layout_bit_index;
+
+  if (IsSlowLayout()) {
+    uint32_t value = get_scalar(layout_word_index);
+    return (value & layout_mask) == 0;
+  } else {
+    uint32_t value = static_cast<uint32_t>(Smi::cast(this)->value());
+    return (value & layout_mask) == 0;
+  }
+}
+
+
+bool LayoutDescriptor::IsFastPointerLayout() {
+  return this == FastPointerLayout();
+}
+
+
+bool LayoutDescriptor::IsFastPointerLayout(Object* layout_descriptor) {
+  return layout_descriptor == FastPointerLayout();
+}
+
+
+bool LayoutDescriptor::IsSlowLayout() { return !IsSmi(); }
+
+
+int LayoutDescriptor::capacity() {
+  return IsSlowLayout() ? (length() * kNumberOfBits) : kSmiValueSize;
+}
+
+
+LayoutDescriptor* LayoutDescriptor::cast_gc_safe(Object* object) {
+  if (object->IsSmi()) {
+    // Fast mode layout descriptor.
+    return reinterpret_cast<LayoutDescriptor*>(object);
+  }
+
+  // This is a mixed descriptor which is a fixed typed array.
+  MapWord map_word = reinterpret_cast<HeapObject*>(object)->map_word();
+  if (map_word.IsForwardingAddress()) {
+    // Mark-compact has already moved layout descriptor.
+    object = map_word.ToForwardingAddress();
+  }
+  return LayoutDescriptor::cast(object);
+}
+
+
+// InobjectPropertiesHelper is a helper class for querying whether inobject
+// property at offset is Double or not.
+LayoutDescriptorHelper::LayoutDescriptorHelper(Map* map)
+    : all_fields_tagged_(true),
+      header_size_(0),
+      layout_descriptor_(LayoutDescriptor::FastPointerLayout()) {
+  if (!FLAG_unbox_double_fields) return;
+
+  layout_descriptor_ = map->layout_descriptor_gc_safe();
+  if (layout_descriptor_->IsFastPointerLayout()) {
+    return;
+  }
+
+  int inobject_properties = map->inobject_properties();
+  DCHECK(inobject_properties > 0);
+  header_size_ = map->instance_size() - (inobject_properties * kPointerSize);
+  DCHECK(header_size_ >= 0);
+
+  all_fields_tagged_ = false;
+}
+
+
+bool LayoutDescriptorHelper::IsTagged(int offset_in_bytes) {
+  DCHECK(IsAligned(offset_in_bytes, kPointerSize));
+  if (all_fields_tagged_) return true;
+  // Object headers do not contain non-tagged fields.
+  if (offset_in_bytes < header_size_) return true;
+  int field_index = (offset_in_bytes - header_size_) / kPointerSize;
+
+  return layout_descriptor_->IsTagged(field_index);
+}
+}
+}  // namespace v8::internal
+
+#endif  // V8_LAYOUT_DESCRIPTOR_INL_H_
diff --git a/deps/v8/src/layout-descriptor.cc b/deps/v8/src/layout-descriptor.cc
new file mode 100644 (file)
index 0000000..77b8ec4
--- /dev/null
@@ -0,0 +1,256 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <sstream>
+
+#include "src/v8.h"
+
+#include "src/base/bits.h"
+#include "src/layout-descriptor.h"
+
+using v8::base::bits::CountTrailingZeros32;
+
+namespace v8 {
+namespace internal {
+
+Handle<LayoutDescriptor> LayoutDescriptor::New(
+    Handle<Map> map, Handle<DescriptorArray> descriptors, int num_descriptors) {
+  Isolate* isolate = descriptors->GetIsolate();
+  if (!FLAG_unbox_double_fields) return handle(FastPointerLayout(), isolate);
+
+  int inobject_properties = map->inobject_properties();
+  if (inobject_properties == 0) return handle(FastPointerLayout(), isolate);
+
+  DCHECK(num_descriptors <= descriptors->number_of_descriptors());
+
+  int layout_descriptor_length;
+  const int kMaxWordsPerField = kDoubleSize / kPointerSize;
+
+  if (num_descriptors <= kSmiValueSize / kMaxWordsPerField) {
+    // Even in the "worst" case (all fields are doubles) it would fit into
+    // a Smi, so no need to calculate length.
+    layout_descriptor_length = kSmiValueSize;
+
+  } else {
+    layout_descriptor_length = 0;
+
+    for (int i = 0; i < num_descriptors; i++) {
+      PropertyDetails details = descriptors->GetDetails(i);
+      if (!InobjectUnboxedField(inobject_properties, details)) continue;
+      int field_index = details.field_index();
+      int field_width_in_words = details.field_width_in_words();
+      layout_descriptor_length =
+          Max(layout_descriptor_length, field_index + field_width_in_words);
+    }
+
+    if (layout_descriptor_length == 0) {
+      // No double fields were found, use fast pointer layout.
+      return handle(FastPointerLayout(), isolate);
+    }
+  }
+  layout_descriptor_length = Min(layout_descriptor_length, inobject_properties);
+
+  // Initially, layout descriptor corresponds to an object with all fields
+  // tagged.
+  Handle<LayoutDescriptor> layout_descriptor_handle =
+      LayoutDescriptor::New(isolate, layout_descriptor_length);
+
+  DisallowHeapAllocation no_allocation;
+  LayoutDescriptor* layout_descriptor = *layout_descriptor_handle;
+
+  for (int i = 0; i < num_descriptors; i++) {
+    PropertyDetails details = descriptors->GetDetails(i);
+    if (!InobjectUnboxedField(inobject_properties, details)) continue;
+    int field_index = details.field_index();
+    layout_descriptor = layout_descriptor->SetRawData(field_index);
+    if (details.field_width_in_words() > 1) {
+      layout_descriptor = layout_descriptor->SetRawData(field_index + 1);
+    }
+  }
+  return handle(layout_descriptor, isolate);
+}
+
+
+Handle<LayoutDescriptor> LayoutDescriptor::Append(Handle<Map> map,
+                                                  PropertyDetails details) {
+  Isolate* isolate = map->GetIsolate();
+  Handle<LayoutDescriptor> layout_descriptor(map->GetLayoutDescriptor(),
+                                             isolate);
+
+  if (!InobjectUnboxedField(map->inobject_properties(), details)) {
+    return layout_descriptor;
+  }
+  int field_index = details.field_index();
+  layout_descriptor = LayoutDescriptor::EnsureCapacity(
+      isolate, layout_descriptor, field_index + details.field_width_in_words());
+
+  DisallowHeapAllocation no_allocation;
+  LayoutDescriptor* layout_desc = *layout_descriptor;
+  layout_desc = layout_desc->SetRawData(field_index);
+  if (details.field_width_in_words() > 1) {
+    layout_desc = layout_desc->SetRawData(field_index + 1);
+  }
+  return handle(layout_desc, isolate);
+}
+
+
+Handle<LayoutDescriptor> LayoutDescriptor::AppendIfFastOrUseFull(
+    Handle<Map> map, PropertyDetails details,
+    Handle<LayoutDescriptor> full_layout_descriptor) {
+  DisallowHeapAllocation no_allocation;
+  LayoutDescriptor* layout_descriptor = map->layout_descriptor();
+  if (layout_descriptor->IsSlowLayout()) {
+    return full_layout_descriptor;
+  }
+  if (!InobjectUnboxedField(map->inobject_properties(), details)) {
+    return handle(layout_descriptor, map->GetIsolate());
+  }
+  int field_index = details.field_index();
+  int new_capacity = field_index + details.field_width_in_words();
+  if (new_capacity > layout_descriptor->capacity()) {
+    // Current map's layout descriptor runs out of space, so use the full
+    // layout descriptor.
+    return full_layout_descriptor;
+  }
+
+  layout_descriptor = layout_descriptor->SetRawData(field_index);
+  if (details.field_width_in_words() > 1) {
+    layout_descriptor = layout_descriptor->SetRawData(field_index + 1);
+  }
+  return handle(layout_descriptor, map->GetIsolate());
+}
+
+
+Handle<LayoutDescriptor> LayoutDescriptor::EnsureCapacity(
+    Isolate* isolate, Handle<LayoutDescriptor> layout_descriptor,
+    int new_capacity) {
+  int old_capacity = layout_descriptor->capacity();
+  if (new_capacity <= old_capacity) {
+    // Nothing to do with layout in Smi-form.
+    return layout_descriptor;
+  }
+  Handle<LayoutDescriptor> new_layout_descriptor =
+      LayoutDescriptor::New(isolate, new_capacity);
+  DCHECK(new_layout_descriptor->IsSlowLayout());
+
+  if (layout_descriptor->IsSlowLayout()) {
+    memcpy(new_layout_descriptor->DataPtr(), layout_descriptor->DataPtr(),
+           layout_descriptor->DataSize());
+    return new_layout_descriptor;
+  } else {
+    // Fast layout.
+    uint32_t value =
+        static_cast<uint32_t>(Smi::cast(*layout_descriptor)->value());
+    new_layout_descriptor->set(0, value);
+    return new_layout_descriptor;
+  }
+}
+
+
+bool LayoutDescriptor::IsTagged(int field_index, int max_sequence_length,
+                                int* out_sequence_length) {
+  DCHECK(max_sequence_length > 0);
+  if (IsFastPointerLayout()) {
+    *out_sequence_length = max_sequence_length;
+    return true;
+  }
+
+  int layout_word_index;
+  int layout_bit_index;
+
+  if (!GetIndexes(field_index, &layout_word_index, &layout_bit_index)) {
+    // Out of bounds queries are considered tagged.
+    *out_sequence_length = max_sequence_length;
+    return true;
+  }
+  uint32_t layout_mask = static_cast<uint32_t>(1) << layout_bit_index;
+
+  uint32_t value = IsSlowLayout()
+                       ? get_scalar(layout_word_index)
+                       : static_cast<uint32_t>(Smi::cast(this)->value());
+
+  bool is_tagged = (value & layout_mask) == 0;
+  if (!is_tagged) value = ~value;  // Count set bits instead of cleared bits.
+  value = value & ~(layout_mask - 1);  // Clear bits we are not interested in.
+  int sequence_length = CountTrailingZeros32(value) - layout_bit_index;
+
+  if (layout_bit_index + sequence_length == kNumberOfBits) {
+    // This is a contiguous sequence till the end of current word, proceed
+    // counting in the subsequent words.
+    if (IsSlowLayout()) {
+      int len = length();
+      ++layout_word_index;
+      for (; layout_word_index < len; layout_word_index++) {
+        value = get_scalar(layout_word_index);
+        bool cur_is_tagged = (value & 1) == 0;
+        if (cur_is_tagged != is_tagged) break;
+        if (!is_tagged) value = ~value;  // Count set bits instead.
+        int cur_sequence_length = CountTrailingZeros32(value);
+        sequence_length += cur_sequence_length;
+        if (sequence_length >= max_sequence_length) break;
+        if (cur_sequence_length != kNumberOfBits) break;
+      }
+    }
+    if (is_tagged && (field_index + sequence_length == capacity())) {
+      // The contiguous sequence of tagged fields lasts till the end of the
+      // layout descriptor which means that all the fields starting from
+      // field_index are tagged.
+      sequence_length = std::numeric_limits<int>::max();
+    }
+  }
+  *out_sequence_length = Min(sequence_length, max_sequence_length);
+  return is_tagged;
+}
+
+
+Handle<LayoutDescriptor> LayoutDescriptor::NewForTesting(Isolate* isolate,
+                                                         int length) {
+  return New(isolate, length);
+}
+
+
+LayoutDescriptor* LayoutDescriptor::SetTaggedForTesting(int field_index,
+                                                        bool tagged) {
+  return SetTagged(field_index, tagged);
+}
+
+
+bool LayoutDescriptorHelper::IsTagged(
+    int offset_in_bytes, int end_offset,
+    int* out_end_of_contiguous_region_offset) {
+  DCHECK(IsAligned(offset_in_bytes, kPointerSize));
+  DCHECK(IsAligned(end_offset, kPointerSize));
+  DCHECK(offset_in_bytes < end_offset);
+  if (all_fields_tagged_) {
+    *out_end_of_contiguous_region_offset = end_offset;
+    DCHECK(offset_in_bytes < *out_end_of_contiguous_region_offset);
+    return true;
+  }
+  int max_sequence_length = (end_offset - offset_in_bytes) / kPointerSize;
+  int field_index = Max(0, (offset_in_bytes - header_size_) / kPointerSize);
+  int sequence_length;
+  bool tagged = layout_descriptor_->IsTagged(field_index, max_sequence_length,
+                                             &sequence_length);
+  DCHECK(sequence_length > 0);
+  if (offset_in_bytes < header_size_) {
+    // Object headers do not contain non-tagged fields. Check if the contiguous
+    // region continues after the header.
+    if (tagged) {
+      // First field is tagged, calculate end offset from there.
+      *out_end_of_contiguous_region_offset =
+          header_size_ + sequence_length * kPointerSize;
+
+    } else {
+      *out_end_of_contiguous_region_offset = header_size_;
+    }
+    DCHECK(offset_in_bytes < *out_end_of_contiguous_region_offset);
+    return true;
+  }
+  *out_end_of_contiguous_region_offset =
+      offset_in_bytes + sequence_length * kPointerSize;
+  DCHECK(offset_in_bytes < *out_end_of_contiguous_region_offset);
+  return tagged;
+}
+}
+}  // namespace v8::internal
diff --git a/deps/v8/src/layout-descriptor.h b/deps/v8/src/layout-descriptor.h
new file mode 100644 (file)
index 0000000..cc2666a
--- /dev/null
@@ -0,0 +1,141 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_LAYOUT_DESCRIPTOR_H_
+#define V8_LAYOUT_DESCRIPTOR_H_
+
+#include <iosfwd>
+
+#include "src/objects.h"
+
+namespace v8 {
+namespace internal {
+
+// LayoutDescriptor is a bit vector defining which fields contain non-tagged
+// values. It could either be a fixed typed array (slow form) or a Smi
+// if the length fits (fast form).
+// Each bit in the layout represents a FIELD. The bits are referenced by
+// field_index which is a field number. If the bit is set then the corresponding
+// field contains a non-tagged value and therefore must be skipped by GC.
+// Otherwise the field is considered tagged. If the queried bit lays "outside"
+// of the descriptor then the field is also considered tagged.
+// Once a layout descriptor is created it is allowed only to append properties
+// to it.
+class LayoutDescriptor : public FixedTypedArray<Uint32ArrayTraits> {
+ public:
+  V8_INLINE bool IsTagged(int field_index);
+
+  // Queries the contiguous region of fields that are either tagged or not.
+  // Returns true if the given field is tagged or false otherwise and writes
+  // the length of the contiguous region to |out_sequence_length|.
+  // If the sequence is longer than |max_sequence_length| then
+  // |out_sequence_length| is set to |max_sequence_length|.
+  bool IsTagged(int field_index, int max_sequence_length,
+                int* out_sequence_length);
+
+  // Returns true if this is a layout of the object having only tagged fields.
+  V8_INLINE bool IsFastPointerLayout();
+  V8_INLINE static bool IsFastPointerLayout(Object* layout_descriptor);
+
+  // Returns true if the layout descriptor is in non-Smi form.
+  V8_INLINE bool IsSlowLayout();
+
+  V8_INLINE static LayoutDescriptor* cast(Object* object);
+  V8_INLINE static const LayoutDescriptor* cast(const Object* object);
+
+  V8_INLINE static LayoutDescriptor* cast_gc_safe(Object* object);
+
+  // Builds layout descriptor optimized for given |map| by |num_descriptors|
+  // elements of given descriptors array. The |map|'s descriptors could be
+  // different.
+  static Handle<LayoutDescriptor> New(Handle<Map> map,
+                                      Handle<DescriptorArray> descriptors,
+                                      int num_descriptors);
+
+  // Creates new layout descriptor by appending property with |details| to
+  // |map|'s layout descriptor.
+  static Handle<LayoutDescriptor> Append(Handle<Map> map,
+                                         PropertyDetails details);
+
+  // Creates new layout descriptor by appending property with |details| to
+  // |map|'s layout descriptor and if it is still fast then returns it.
+  // Otherwise the |full_layout_descriptor| is returned.
+  static Handle<LayoutDescriptor> AppendIfFastOrUseFull(
+      Handle<Map> map, PropertyDetails details,
+      Handle<LayoutDescriptor> full_layout_descriptor);
+
+  // Layout descriptor that corresponds to an object all fields of which are
+  // tagged (FastPointerLayout).
+  V8_INLINE static LayoutDescriptor* FastPointerLayout();
+
+#ifdef DEBUG
+  // Check that this layout descriptor corresponds to given map.
+  bool IsConsistentWithMap(Map* map);
+#endif
+
+#ifdef OBJECT_PRINT
+  // For our gdb macros, we should perhaps change these in the future.
+  void Print();
+
+  void Print(std::ostream& os);  // NOLINT
+#endif
+
+  // Capacity of layout descriptors in bits.
+  V8_INLINE int capacity();
+
+  static Handle<LayoutDescriptor> NewForTesting(Isolate* isolate, int length);
+  LayoutDescriptor* SetTaggedForTesting(int field_index, bool tagged);
+
+ private:
+  static const int kNumberOfBits = 32;
+
+  V8_INLINE static Handle<LayoutDescriptor> New(Isolate* isolate, int length);
+  V8_INLINE static LayoutDescriptor* FromSmi(Smi* smi);
+
+  V8_INLINE static bool InobjectUnboxedField(int inobject_properties,
+                                             PropertyDetails details);
+
+  static Handle<LayoutDescriptor> EnsureCapacity(
+      Isolate* isolate, Handle<LayoutDescriptor> layout_descriptor,
+      int new_capacity);
+
+  // Returns false if requested field_index is out of bounds.
+  V8_INLINE bool GetIndexes(int field_index, int* layout_word_index,
+                            int* layout_bit_index);
+
+  V8_INLINE MUST_USE_RESULT LayoutDescriptor* SetRawData(int field_index) {
+    return SetTagged(field_index, false);
+  }
+
+  V8_INLINE MUST_USE_RESULT LayoutDescriptor* SetTagged(int field_index,
+                                                        bool tagged);
+};
+
+
+// LayoutDescriptorHelper is a helper class for querying layout descriptor
+// about whether the field at given offset is tagged or not.
+class LayoutDescriptorHelper {
+ public:
+  inline explicit LayoutDescriptorHelper(Map* map);
+
+  bool all_fields_tagged() { return all_fields_tagged_; }
+  inline bool IsTagged(int offset_in_bytes);
+
+  // Queries the contiguous region of fields that are either tagged or not.
+  // Returns true if fields starting at |offset_in_bytes| are tagged or false
+  // otherwise and writes the offset of the end of the contiguous region to
+  // |out_end_of_contiguous_region_offset|. The |end_offset| value is the
+  // upper bound for |out_end_of_contiguous_region_offset|.
+  bool IsTagged(int offset_in_bytes, int end_offset,
+                int* out_end_of_contiguous_region_offset);
+
+ private:
+  bool all_fields_tagged_;
+  int header_size_;
+  LayoutDescriptor* layout_descriptor_;
+};
+}
+}  // namespace v8::internal
+
+#endif  // V8_LAYOUT_DESCRIPTOR_H_
index 5529498..21ba9bd 100644 (file)
@@ -37,7 +37,7 @@ class DefaultPlatform : public Platform {
       Task* task, ExpectedRuntime expected_runtime) OVERRIDE;
   virtual void CallOnForegroundThread(v8::Isolate* isolate,
                                       Task* task) OVERRIDE;
-  virtual double MonotonicallyIncreasingTime() OVERRIDE;
+  double MonotonicallyIncreasingTime() OVERRIDE;
 
  private:
   static const int kMaxThreadPoolSize;
index 67f086d..cf77e61 100644 (file)
@@ -22,7 +22,7 @@ class WorkerThread : public base::Thread {
   virtual ~WorkerThread();
 
   // Thread implementation.
-  virtual void Run() OVERRIDE;
+  void Run() OVERRIDE;
 
  private:
   friend class QuitTask;
index 07214f9..eaa2383 100644 (file)
@@ -19,6 +19,9 @@
 // All unchanged functions have their positions updated accordingly.
 //
 // LiveEdit namespace is declared inside a single function constructor.
+
+"use strict";
+
 Debug.LiveEdit = new function() {
 
   // Forward declaration for minifier.
@@ -953,7 +956,7 @@ Debug.LiveEdit = new function() {
 
   FunctionPatchabilityStatus.SymbolName = function(code) {
     var enumeration = FunctionPatchabilityStatus;
-    for (name in enumeration) {
+    for (var name in enumeration) {
       if (enumeration[name] == code) {
         return name;
       }
index b1476a0..29eaa97 100644 (file)
@@ -605,17 +605,17 @@ static int GetArrayLength(Handle<JSArray> array) {
 }
 
 
-void FunctionInfoWrapper::SetInitialProperties(
-    Handle<String> name, int start_position, int end_position, int param_num,
-    int literal_count, int slot_count, int ic_slot_count, int parent_index) {
+void FunctionInfoWrapper::SetInitialProperties(Handle<String> name,
+                                               int start_position,
+                                               int end_position, int param_num,
+                                               int literal_count,
+                                               int parent_index) {
   HandleScope scope(isolate());
   this->SetField(kFunctionNameOffset_, name);
   this->SetSmiValueField(kStartPositionOffset_, start_position);
   this->SetSmiValueField(kEndPositionOffset_, end_position);
   this->SetSmiValueField(kParamNumOffset_, param_num);
   this->SetSmiValueField(kLiteralNumOffset_, literal_count);
-  this->SetSmiValueField(kSlotNumOffset_, slot_count);
-  this->SetSmiValueField(kICSlotNumOffset_, ic_slot_count);
   this->SetSmiValueField(kParentIndexOffset_, parent_index);
 }
 
@@ -646,26 +646,18 @@ Handle<Code> FunctionInfoWrapper::GetFunctionCode() {
 }
 
 
-Handle<TypeFeedbackVector> FunctionInfoWrapper::GetFeedbackVector() {
+MaybeHandle<TypeFeedbackVector> FunctionInfoWrapper::GetFeedbackVector() {
   Handle<Object> element = this->GetField(kSharedFunctionInfoOffset_);
-  Handle<TypeFeedbackVector> result;
   if (element->IsJSValue()) {
     Handle<JSValue> value_wrapper = Handle<JSValue>::cast(element);
     Handle<Object> raw_result = UnwrapJSValue(value_wrapper);
     Handle<SharedFunctionInfo> shared =
         Handle<SharedFunctionInfo>::cast(raw_result);
-    result = Handle<TypeFeedbackVector>(shared->feedback_vector(), isolate());
-    CHECK_EQ(result->Slots(), GetSlotCount());
-    CHECK_EQ(result->ICSlots(), GetICSlotCount());
+    return Handle<TypeFeedbackVector>(shared->feedback_vector(), isolate());
   } else {
-    // Scripts may never have a SharedFunctionInfo created, so
-    // create a type feedback vector here.
-    int slot_count = GetSlotCount();
-    int ic_slot_count = GetICSlotCount();
-    result =
-        isolate()->factory()->NewTypeFeedbackVector(slot_count, ic_slot_count);
+    // Scripts may never have a SharedFunctionInfo created.
+    return MaybeHandle<TypeFeedbackVector>();
   }
-  return result;
 }
 
 
@@ -706,10 +698,10 @@ class FunctionInfoListener {
   void FunctionStarted(FunctionLiteral* fun) {
     HandleScope scope(isolate());
     FunctionInfoWrapper info = FunctionInfoWrapper::Create(isolate());
-    info.SetInitialProperties(
-        fun->name(), fun->start_position(), fun->end_position(),
-        fun->parameter_count(), fun->materialized_literal_count(),
-        fun->slot_count(), fun->ic_slot_count(), current_parent_index_);
+    info.SetInitialProperties(fun->name(), fun->start_position(),
+                              fun->end_position(), fun->parameter_count(),
+                              fun->materialized_literal_count(),
+                              current_parent_index_);
     current_parent_index_ = len_;
     SetElementSloppy(result_, len_, info.GetJSArray());
     len_++;
@@ -1201,10 +1193,12 @@ void LiveEdit::ReplaceFunctionCode(
       shared_info->set_scope_info(ScopeInfo::cast(*code_scope_info));
     }
     shared_info->DisableOptimization(kLiveEdit);
-    // Update the type feedback vector
-    Handle<TypeFeedbackVector> feedback_vector =
+    // Update the type feedback vector, if needed.
+    MaybeHandle<TypeFeedbackVector> feedback_vector =
         compile_info_wrapper.GetFeedbackVector();
-    shared_info->set_feedback_vector(*feedback_vector);
+    if (!feedback_vector.is_null()) {
+      shared_info->set_feedback_vector(*feedback_vector.ToHandleChecked());
+    }
   }
 
   if (shared_info->debug_info()->IsDebugInfo()) {
index 6534b7e..65fe1a6 100644 (file)
@@ -282,7 +282,6 @@ class FunctionInfoWrapper : public JSArrayBasedStruct<FunctionInfoWrapper> {
 
   void SetInitialProperties(Handle<String> name, int start_position,
                             int end_position, int param_num, int literal_count,
-                            int slot_count, int ic_slot_count,
                             int parent_index);
 
   void SetFunctionCode(Handle<Code> function_code,
@@ -304,7 +303,7 @@ class FunctionInfoWrapper : public JSArrayBasedStruct<FunctionInfoWrapper> {
 
   Handle<Code> GetFunctionCode();
 
-  Handle<TypeFeedbackVector> GetFeedbackVector();
+  MaybeHandle<TypeFeedbackVector> GetFeedbackVector();
 
   Handle<Object> GetCodeScopeInfo();
 
@@ -314,12 +313,6 @@ class FunctionInfoWrapper : public JSArrayBasedStruct<FunctionInfoWrapper> {
 
   int GetEndPosition() { return this->GetSmiValueField(kEndPositionOffset_); }
 
-  int GetSlotCount() {
-    return this->GetSmiValueField(kSlotNumOffset_);
-  }
-
-  int GetICSlotCount() { return this->GetSmiValueField(kICSlotNumOffset_); }
-
  private:
   static const int kFunctionNameOffset_ = 0;
   static const int kStartPositionOffset_ = 1;
@@ -331,9 +324,7 @@ class FunctionInfoWrapper : public JSArrayBasedStruct<FunctionInfoWrapper> {
   static const int kParentIndexOffset_ = 7;
   static const int kSharedFunctionInfoOffset_ = 8;
   static const int kLiteralNumOffset_ = 9;
-  static const int kSlotNumOffset_ = 10;
-  static const int kICSlotNumOffset_ = 11;
-  static const int kSize_ = 12;
+  static const int kSize_ = 10;
 
   friend class JSArrayBasedStruct<FunctionInfoWrapper>;
 };
index c94d07a..278cff1 100644 (file)
@@ -6,6 +6,7 @@
 
 #include "src/log-utils.h"
 #include "src/string-stream.h"
+#include "src/version.h"
 
 namespace v8 {
 namespace internal {
@@ -49,6 +50,14 @@ void Log::Initialize(const char* log_file_name) {
     } else {
       OpenFile(log_file_name);
     }
+
+    if (output_handle_ != nullptr) {
+      Log::MessageBuilder msg(this);
+      msg.Append("v8-version,%d,%d,%d,%d,%d", Version::GetMajor(),
+                 Version::GetMinor(), Version::GetBuild(), Version::GetPatch(),
+                 Version::IsCandidate());
+      msg.WriteToLogFile();
+    }
   }
 }
 
index 0dcf6bb..3eede36 100644 (file)
@@ -1300,7 +1300,7 @@ void Logger::CodeDisableOptEvent(Code* code,
   SmartArrayPointer<char> name =
       shared->DebugName()->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
   msg.Append("\"%s\",", name.get());
-  msg.Append("\"%s\"", GetBailoutReason(shared->DisableOptimizationReason()));
+  msg.Append("\"%s\"", GetBailoutReason(shared->disable_optimization_reason()));
   msg.WriteToLogFile();
 }
 
index d4777a0..0c9cc91 100644 (file)
@@ -63,12 +63,10 @@ LookupIterator::State LookupIterator::LookupInHolder(Map* map,
         property_details_ = descriptors->GetDetails(number_);
       }
       has_property_ = true;
-      switch (property_details_.type()) {
-        case v8::internal::CONSTANT:
-        case v8::internal::FIELD:
-        case v8::internal::NORMAL:
+      switch (property_details_.kind()) {
+        case v8::internal::DATA:
           return DATA;
-        case v8::internal::CALLBACKS:
+        case v8::internal::ACCESSOR:
           return ACCESSOR;
       }
     case ACCESSOR:
index 84eb6d4..8088f4d 100644 (file)
@@ -102,7 +102,7 @@ void LookupIterator::ReconfigureDataProperty(Handle<Object> value,
   DCHECK(HolderIsReceiverOrHiddenPrototype());
   Handle<JSObject> holder = GetHolder<JSObject>();
   if (holder_map_->is_dictionary_map()) {
-    PropertyDetails details(attributes, NORMAL, 0);
+    PropertyDetails details(attributes, FIELD, 0);
     JSObject::SetNormalizedProperty(holder, name(), value, details);
   } else {
     holder_map_ = Map::ReconfigureDataProperty(holder_map_, descriptor_number(),
@@ -127,7 +127,7 @@ void LookupIterator::PrepareTransitionToDataProperty(
   // observable.
   Handle<JSObject> receiver = GetStoreTarget();
 
-  if (!name().is_identical_to(isolate()->factory()->hidden_string()) &&
+  if (!isolate()->IsInternallyUsedPropertyName(name()) &&
       !receiver->map()->is_extensible()) {
     return;
   }
@@ -289,7 +289,7 @@ Handle<Object> LookupIterator::GetDataValue() const {
 }
 
 
-void LookupIterator::WriteDataValue(Handle<Object> value) {
+Handle<Object> LookupIterator::WriteDataValue(Handle<Object> value) {
   DCHECK_EQ(DATA, state_);
   Handle<JSObject> holder = GetHolder<JSObject>();
   if (holder_map_->is_dictionary_map()) {
@@ -297,7 +297,7 @@ void LookupIterator::WriteDataValue(Handle<Object> value) {
     if (holder->IsGlobalObject()) {
       Handle<PropertyCell> cell(
           PropertyCell::cast(property_dictionary->ValueAt(dictionary_entry())));
-      PropertyCell::SetValueInferType(cell, value);
+      value = PropertyCell::SetValueInferType(cell, value);
     } else {
       property_dictionary->ValueAtPut(dictionary_entry(), *value);
     }
@@ -306,6 +306,7 @@ void LookupIterator::WriteDataValue(Handle<Object> value) {
   } else {
     DCHECK_EQ(v8::internal::CONSTANT, property_details_.type());
   }
+  return value;
 }
 
 
index 52231e5..a2e0d4d 100644 (file)
@@ -46,7 +46,7 @@ class LookupIterator FINAL BASE_EMBEDDED {
                  Configuration configuration = PROTOTYPE_CHAIN)
       : configuration_(ComputeConfiguration(configuration, name)),
         state_(NOT_FOUND),
-        property_details_(NONE, NORMAL, Representation::None()),
+        property_details_(NONE, FIELD, 0),
         isolate_(name->GetIsolate()),
         name_(name),
         receiver_(receiver),
@@ -61,7 +61,7 @@ class LookupIterator FINAL BASE_EMBEDDED {
                  Configuration configuration = PROTOTYPE_CHAIN)
       : configuration_(ComputeConfiguration(configuration, name)),
         state_(NOT_FOUND),
-        property_details_(NONE, NORMAL, Representation::None()),
+        property_details_(NONE, FIELD, 0),
         isolate_(name->GetIsolate()),
         name_(name),
         holder_map_(holder->map(), isolate_),
@@ -136,7 +136,9 @@ class LookupIterator FINAL BASE_EMBEDDED {
   Handle<PropertyCell> GetPropertyCell() const;
   Handle<Object> GetAccessors() const;
   Handle<Object> GetDataValue() const;
-  void WriteDataValue(Handle<Object> value);
+  // Usually returns the value that was passed in, but may perform
+  // non-observable modifications on it, such as internalize strings.
+  Handle<Object> WriteDataValue(Handle<Object> value);
 
   // Checks whether the receiver is an indexed exotic object
   // and name is a special numeric index.
@@ -175,7 +177,8 @@ class LookupIterator FINAL BASE_EMBEDDED {
   static Configuration ComputeConfiguration(
       Configuration configuration, Handle<Name> name) {
     if (name->IsOwn()) {
-      return static_cast<Configuration>(configuration & HIDDEN);
+      return static_cast<Configuration>(configuration &
+                                        HIDDEN_SKIP_INTERCEPTOR);
     } else {
       return configuration;
     }
index d8741f7..1571144 100644 (file)
@@ -168,6 +168,7 @@ macro TO_OBJECT_INLINE(arg) = (IS_SPEC_OBJECT(%IS_VAR(arg)) ? arg : ToObject(arg
 macro JSON_NUMBER_TO_STRING(arg) = ((%_IsSmi(%IS_VAR(arg)) || arg - arg == 0) ? %_NumberToString(arg) : "null");
 macro HAS_OWN_PROPERTY(obj, index) = (%_CallFunction(obj, index, ObjectHasOwnProperty));
 macro SHOULD_CREATE_WRAPPER(functionName, receiver) = (!IS_SPEC_OBJECT(receiver) && %IsSloppyModeFunction(functionName));
+macro HAS_INDEX(array, index, is_array) = ((is_array && %_HasFastPackedElements(%IS_VAR(array))) ? (index < array.length) : (index in array));
 
 # Private names.
 # GET_PRIVATE should only be used if the property is known to exists on obj
@@ -291,3 +292,5 @@ const ITERATOR_KIND_ENTRIES = 3;
 
 # Check whether debug is active.
 const DEBUG_IS_ACTIVE = (%_DebugIsActive() != 0);
+macro DEBUG_IS_STEPPING(function) = (%_DebugIsActive() != 0 && %DebugCallbackSupportsStepping(function));
+macro DEBUG_PREPARE_STEP_IN_IF_STEPPING(function) = if (DEBUG_IS_STEPPING(function)) %DebugPrepareStepInIfStepping(function);
index 860b62f..cc478d3 100644 (file)
@@ -227,17 +227,6 @@ function MathAtanh(x) {
   return 0.5 * MathLog((1 + x) / (1 - x));
 }
 
-// ES6 draft 09-27-13, section 20.2.2.21.
-function MathLog10(x) {
-  return MathLog(x) * 0.434294481903251828;  // log10(x) = log(x)/log(10).
-}
-
-
-// ES6 draft 09-27-13, section 20.2.2.22.
-function MathLog2(x) {
-  return MathLog(x) * 1.442695040888963407;  // log2(x) = log(x)/log(2).
-}
-
 // ES6 draft 09-27-13, section 20.2.2.17.
 function MathHypot(x, y) {  // Function length is 2.
   // We may want to introduce fast paths for two arguments and when
@@ -369,8 +358,8 @@ function SetUpMath() {
     "asinh", MathAsinh,
     "acosh", MathAcosh,
     "atanh", MathAtanh,
-    "log10", MathLog10,
-    "log2", MathLog2,
+    "log10", MathLog10,   // implemented by third_party/fdlibm
+    "log2", MathLog2,     // implemented by third_party/fdlibm
     "hypot", MathHypot,
     "fround", MathFroundJS,
     "clz32", MathClz32,
index b15ccc6..39887d6 100644 (file)
@@ -9,9 +9,8 @@ var kMessages = {
   cyclic_proto:                  ["Cyclic __proto__ value"],
   code_gen_from_strings:         ["%0"],
   constructor_special_method:    ["Class constructor may not be an accessor"],
-  generator_running:             ["Generator is already running"],
-  generator_finished:            ["Generator has already finished"],
   // TypeError
+  generator_running:             ["Generator is already running"],
   unexpected_token:              ["Unexpected token ", "%0"],
   unexpected_token_number:       ["Unexpected number"],
   unexpected_token_string:       ["Unexpected string"],
@@ -19,9 +18,12 @@ var kMessages = {
   unexpected_reserved:           ["Unexpected reserved word"],
   unexpected_strict_reserved:    ["Unexpected strict mode reserved word"],
   unexpected_eos:                ["Unexpected end of input"],
+  unexpected_template_string:    ["Unexpected template string"],
   malformed_regexp:              ["Invalid regular expression: /", "%0", "/: ", "%1"],
   malformed_regexp_flags:        ["Invalid regular expression flags"],
   unterminated_regexp:           ["Invalid regular expression: missing /"],
+  unterminated_template:         ["Unterminated template literal"],
+  unterminated_template_expr:    ["Missing } in template expression"],
   regexp_flags:                  ["Cannot supply flags when constructing one RegExp from another"],
   incompatible_method_receiver:  ["Method ", "%0", " called on incompatible receiver ", "%1"],
   multiple_defaults_in_switch:   ["More than one default clause in switch statement"],
@@ -50,6 +52,7 @@ var kMessages = {
   apply_wrong_args:              ["Function.prototype.apply: Arguments list has wrong type"],
   toMethod_non_function:         ["Function.prototype.toMethod was called on ", "%0", ", which is a ", "%1", " and not a function"],
   toMethod_non_object:           ["Function.prototype.toMethod: home object ", "%0", " is not an object"],
+  flags_getter_non_object:       ["RegExp.prototype.flags getter called on non-object ", "%0"],
   invalid_in_operator_use:       ["Cannot use 'in' operator to search for '", "%0", "' in ", "%1"],
   instanceof_function_expected:  ["Expecting a function in instanceof check, but got ", "%0"],
   instanceof_nonobject_proto:    ["Function has non-object prototype '", "%0", "' in instanceof check"],
@@ -151,6 +154,7 @@ var kMessages = {
   too_many_variables:            ["Too many variables declared (only 4194303 allowed)"],
   strict_param_dupe:             ["Strict mode function may not have duplicate parameter names"],
   strict_octal_literal:          ["Octal literals are not allowed in strict mode."],
+  template_octal_literal:        ["Octal literals are not allowed in template strings."],
   strict_duplicate_property:     ["Duplicate data property in object literal not allowed in strict mode"],
   accessor_data_property:        ["Object literal may not have data and accessor property with the same name"],
   accessor_get_set:              ["Object literal may not have multiple get/set accessors with the same name"],
@@ -178,7 +182,9 @@ var kMessages = {
   unexpected_super:              ["'super' keyword unexpected here"],
   extends_value_not_a_function:  ["Class extends value ", "%0", " is not a function or null"],
   prototype_parent_not_an_object: ["Class extends value does not have valid prototype property ", "%0"],
-  duplicate_constructor:         ["A class may only have one constructor"]
+  duplicate_constructor:         ["A class may only have one constructor"],
+  sloppy_lexical:                ["Block-scoped declarations (let, const, function, class) not yet supported outside strict mode"],
+  super_constructor_call:        ["A 'super' constructor call may only appear as the first statement of a function, and its arguments may not access 'this'. Other forms are not yet supported."]
 };
 
 
@@ -392,34 +398,26 @@ function MakeReferenceErrorEmbedded(type, arg) {
        else the line number.
  */
 function ScriptLineFromPosition(position) {
-  var lower = 0;
-  var upper = this.lineCount() - 1;
   var line_ends = this.line_ends;
+  var upper = line_ends.length - 1;
+  if (upper < 0) return -1;
 
   // We'll never find invalid positions so bail right away.
-  if (position > line_ends[upper]) {
-    return -1;
-  }
-
-  // This means we don't have to safe-guard indexing line_ends[i - 1].
-  if (position <= line_ends[0]) {
-    return 0;
-  }
-
-  // Binary search to find line # from position range.
-  while (upper >= 1) {
-    var i = (lower + upper) >> 1;
-
-    if (position > line_ends[i]) {
-      lower = i + 1;
-    } else if (position <= line_ends[i - 1]) {
-      upper = i - 1;
+  if (position > line_ends[upper]) return -1;
+  if (position <= line_ends[0]) return 0;
+
+  var lower = 1;
+  // Binary search.
+  while (true) {
+    var mid = (upper + lower) >> 1;
+    if (position <= line_ends[mid - 1]) {
+      upper = mid - 1;
+    } else if (position > line_ends[mid]){
+      lower = mid + 1;
     } else {
-      return i;
+      return mid;
     }
   }
-
-  return -1;
 }
 
 /**
@@ -1155,7 +1153,7 @@ var StackTraceGetter = function() {
       if (IS_UNDEFINED(stack_trace)) {
         // Neither formatted nor structured stack trace available.
         // Look further up the prototype chain.
-        holder = %GetPrototype(holder);
+        holder = %_GetPrototype(holder);
         continue;
       }
       formatted_stack_trace = FormatStackTrace(holder, stack_trace);
@@ -1221,14 +1219,13 @@ function SetUpError() {
     %AddNamedProperty(f.prototype, "name", name, DONT_ENUM);
     %SetCode(f, function(m) {
       if (%_IsConstructCall()) {
+        try { captureStackTrace(this, f); } catch (e) { }
         // Define all the expected properties directly on the error
         // object. This avoids going through getters and setters defined
         // on prototype objects.
-        %AddNamedProperty(this, 'stack', UNDEFINED, DONT_ENUM);
         if (!IS_UNDEFINED(m)) {
           %AddNamedProperty(this, 'message', ToString(m), DONT_ENUM);
         }
-        try { captureStackTrace(this, f); } catch (e) { }
       } else {
         return new f(m);
       }
@@ -1260,7 +1257,7 @@ function GetPropertyWithoutInvokingMonkeyGetters(error, name) {
   var current = error;
   // Climb the prototype chain until we find the holder.
   while (current && !%HasOwnProperty(current, name)) {
-    current = %GetPrototype(current);
+    current = %_GetPrototype(current);
   }
   if (IS_NULL(current)) return UNDEFINED;
   if (!IS_OBJECT(current)) return error[name];
index df6dc53..100195b 100644 (file)
@@ -382,21 +382,21 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
         MemOperand bit_field3 = FieldMemOperand(a2, Map::kBitField3Offset);
         // Check if slack tracking is enabled.
         __ lw(t0, bit_field3);
-        __ DecodeField<Map::ConstructionCount>(t2, t0);
-        __ Branch(&allocate, eq, t2, Operand(JSFunction::kNoSlackTracking));
+        __ DecodeField<Map::Counter>(t2, t0);
+        __ Branch(&allocate, lt, t2, Operand(Map::kSlackTrackingCounterEnd));
         // Decrease generous allocation count.
-        __ Subu(t0, t0, Operand(1 << Map::ConstructionCount::kShift));
-        __ Branch(USE_DELAY_SLOT,
-            &allocate, ne, t2, Operand(JSFunction::kFinishSlackTracking));
+        __ Subu(t0, t0, Operand(1 << Map::Counter::kShift));
+        __ Branch(USE_DELAY_SLOT, &allocate, ne, t2,
+                  Operand(Map::kSlackTrackingCounterEnd));
         __ sw(t0, bit_field3);  // In delay slot.
 
         __ Push(a1, a2, a1);  // a1 = Constructor.
         __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
 
         __ Pop(a1, a2);
-        // Slack tracking counter is kNoSlackTracking after runtime call.
-        DCHECK(JSFunction::kNoSlackTracking == 0);
-        __ mov(t2, zero_reg);
+        // Slack tracking counter is Map::kSlackTrackingCounterEnd after runtime
+        // call.
+        __ li(t2, Map::kSlackTrackingCounterEnd);
 
         __ bind(&allocate);
       }
@@ -443,8 +443,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
         Label no_inobject_slack_tracking;
 
         // Check if slack tracking is enabled.
-        __ Branch(&no_inobject_slack_tracking,
-            eq, t2, Operand(JSFunction::kNoSlackTracking));
+        __ Branch(&no_inobject_slack_tracking, lt, t2,
+                  Operand(Map::kSlackTrackingCounterEnd));
 
         // Allocate object with a slack.
         __ lbu(a0, FieldMemOperand(a2, Map::kPreAllocatedPropertyFieldsOffset));
index c8db844..97eed74 100644 (file)
@@ -272,66 +272,6 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
 }
 
 
-void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(
-    Isolate* isolate) {
-  WriteInt32ToHeapNumberStub stub1(isolate, a1, v0, a2, a3);
-  WriteInt32ToHeapNumberStub stub2(isolate, a2, v0, a3, a0);
-  stub1.GetCode();
-  stub2.GetCode();
-}
-
-
-// See comment for class, this does NOT work for int32's that are in Smi range.
-void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
-  Label max_negative_int;
-  // the_int_ has the answer which is a signed int32 but not a Smi.
-  // We test for the special value that has a different exponent.
-  STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
-  // Test sign, and save for later conditionals.
-  __ And(sign(), the_int(), Operand(0x80000000u));
-  __ Branch(&max_negative_int, eq, the_int(), Operand(0x80000000u));
-
-  // Set up the correct exponent in scratch_.  All non-Smi int32s have the same.
-  // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
-  uint32_t non_smi_exponent =
-      (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
-  __ li(scratch(), Operand(non_smi_exponent));
-  // Set the sign bit in scratch_ if the value was negative.
-  __ or_(scratch(), scratch(), sign());
-  // Subtract from 0 if the value was negative.
-  __ subu(at, zero_reg, the_int());
-  __ Movn(the_int(), at, sign());
-  // We should be masking the implict first digit of the mantissa away here,
-  // but it just ends up combining harmlessly with the last digit of the
-  // exponent that happens to be 1.  The sign bit is 0 so we shift 10 to get
-  // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
-  DCHECK(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
-  const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
-  __ srl(at, the_int(), shift_distance);
-  __ or_(scratch(), scratch(), at);
-  __ sw(scratch(), FieldMemOperand(the_heap_number(),
-                                   HeapNumber::kExponentOffset));
-  __ sll(scratch(), the_int(), 32 - shift_distance);
-  __ Ret(USE_DELAY_SLOT);
-  __ sw(scratch(), FieldMemOperand(the_heap_number(),
-                                   HeapNumber::kMantissaOffset));
-
-  __ bind(&max_negative_int);
-  // The max negative int32 is stored as a positive number in the mantissa of
-  // a double because it uses a sign bit instead of using two's complement.
-  // The actual mantissa bits stored are all 0 because the implicit most
-  // significant 1 bit is not stored.
-  non_smi_exponent += 1 << HeapNumber::kExponentShift;
-  __ li(scratch(), Operand(HeapNumber::kSignMask | non_smi_exponent));
-  __ sw(scratch(),
-        FieldMemOperand(the_heap_number(), HeapNumber::kExponentOffset));
-  __ mov(scratch(), zero_reg);
-  __ Ret(USE_DELAY_SLOT);
-  __ sw(scratch(),
-        FieldMemOperand(the_heap_number(), HeapNumber::kMantissaOffset));
-}
-
-
 // Handle the case where the lhs and rhs are the same object.
 // Equality is almost reflexive (everything but NaN), so this is a test
 // for "identity and not NaN".
@@ -904,7 +844,6 @@ void MathPowStub::Generate(MacroAssembler* masm) {
       // compile time and uses DoMathPowHalf instead.  We then skip this check
       // for non-constant cases of +/-0.5 as these hardly occur.
       Label not_plus_half;
-
       // Test for 0.5.
       __ Move(double_scratch, 0.5);
       __ BranchF(USE_DELAY_SLOT,
@@ -916,7 +855,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
       // double_scratch can be overwritten in the delay slot.
       // Calculates square root of base.  Check for the special case of
       // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
-      __ Move(double_scratch, -V8_INFINITY);
+      __ Move(double_scratch, static_cast<double>(-V8_INFINITY));
       __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
       __ neg_d(double_result, double_scratch);
 
@@ -936,13 +875,13 @@ void MathPowStub::Generate(MacroAssembler* masm) {
       // double_scratch can be overwritten in the delay slot.
       // Calculates square root of base.  Check for the special case of
       // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
-      __ Move(double_scratch, -V8_INFINITY);
+      __ Move(double_scratch, static_cast<double>(-V8_INFINITY));
       __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
       __ Move(double_result, kDoubleRegZero);
 
       // Add +0 to convert -0 to +0.
       __ add_d(double_scratch, double_base, kDoubleRegZero);
-      __ Move(double_result, 1);
+      __ Move(double_result, 1.);
       __ sqrt_d(double_scratch, double_scratch);
       __ div_d(double_result, double_result, double_scratch);
       __ jmp(&done);
@@ -1058,7 +997,6 @@ bool CEntryStub::NeedsImmovableCode() {
 
 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
   CEntryStub::GenerateAheadOfTime(isolate);
-  WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate);
   StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
   StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
   ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
@@ -1409,10 +1347,16 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
 
   Register receiver = LoadDescriptor::ReceiverRegister();
   Register index = LoadDescriptor::NameRegister();
-  Register scratch = a3;
+  Register scratch = t1;
   Register result = v0;
   DCHECK(!scratch.is(receiver) && !scratch.is(index));
+  DCHECK(!FLAG_vector_ics ||
+         (!scratch.is(VectorLoadICDescriptor::VectorRegister()) &&
+          result.is(VectorLoadICDescriptor::SlotRegister())));
 
+  // StringCharAtGenerator doesn't use the result register until it's passed
+  // the different miss possibilities. If it did, we would have a conflict
+  // when FLAG_vector_ics is true.
   StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
                                           &miss,  // When not a string.
                                           &miss,  // When not a number.
@@ -1626,8 +1570,14 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
 void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
   Label miss;
   Register receiver = LoadDescriptor::ReceiverRegister();
-  NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, a3,
-                                                          t0, &miss);
+  // Ensure that the vector and slot registers won't be clobbered before
+  // calling the miss handler.
+  DCHECK(!FLAG_vector_ics ||
+         !AreAliased(t0, t1, VectorLoadICDescriptor::VectorRegister(),
+                     VectorLoadICDescriptor::SlotRegister()));
+
+  NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, t0,
+                                                          t1, &miss);
   __ bind(&miss);
   PropertyAccessCompiler::TailCallBuiltin(
       masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
@@ -2829,8 +2779,12 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
 
 
 void CallICStub::Generate(MacroAssembler* masm) {
-  // r1 - function
-  // r3 - slot id (Smi)
+  // a1 - function
+  // a3 - slot id (Smi)
+  const int with_types_offset =
+      FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
+  const int generic_offset =
+      FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
   Label extra_checks_or_miss, slow_start;
   Label slow, non_function, wrap, cont;
   Label have_js_function;
@@ -2869,38 +2823,71 @@ void CallICStub::Generate(MacroAssembler* masm) {
   }
 
   __ bind(&extra_checks_or_miss);
-  Label miss;
+  Label uninitialized, miss;
 
   __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
   __ Branch(&slow_start, eq, t0, Operand(at));
-  __ LoadRoot(at, Heap::kuninitialized_symbolRootIndex);
-  __ Branch(&miss, eq, t0, Operand(at));
-
-  if (!FLAG_trace_ic) {
-    // We are going megamorphic. If the feedback is a JSFunction, it is fine
-    // to handle it here. More complex cases are dealt with in the runtime.
-    __ AssertNotSmi(t0);
-    __ GetObjectType(t0, t1, t1);
-    __ Branch(&miss, ne, t1, Operand(JS_FUNCTION_TYPE));
-    __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
-    __ Addu(t0, a2, Operand(t0));
-    __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
-    __ sw(at, FieldMemOperand(t0, FixedArray::kHeaderSize));
-    // We have to update statistics for runtime profiling.
-    const int with_types_offset =
-        FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
-    __ lw(t0, FieldMemOperand(a2, with_types_offset));
-    __ Subu(t0, t0, Operand(Smi::FromInt(1)));
-    __ sw(t0, FieldMemOperand(a2, with_types_offset));
-    const int generic_offset =
-        FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
-    __ lw(t0, FieldMemOperand(a2, generic_offset));
-    __ Addu(t0, t0, Operand(Smi::FromInt(1)));
-    __ sw(t0, FieldMemOperand(a2, generic_offset));
-    __ Branch(&slow_start);
+
+  // The following cases attempt to handle MISS cases without going to the
+  // runtime.
+  if (FLAG_trace_ic) {
+    __ Branch(&miss);
   }
 
-  // We are here because tracing is on or we are going monomorphic.
+  __ LoadRoot(at, Heap::kuninitialized_symbolRootIndex);
+  __ Branch(&uninitialized, eq, t0, Operand(at));
+
+  // We are going megamorphic. If the feedback is a JSFunction, it is fine
+  // to handle it here. More complex cases are dealt with in the runtime.
+  __ AssertNotSmi(t0);
+  __ GetObjectType(t0, t1, t1);
+  __ Branch(&miss, ne, t1, Operand(JS_FUNCTION_TYPE));
+  __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(t0, a2, Operand(t0));
+  __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
+  __ sw(at, FieldMemOperand(t0, FixedArray::kHeaderSize));
+  // We have to update statistics for runtime profiling.
+  __ lw(t0, FieldMemOperand(a2, with_types_offset));
+  __ Subu(t0, t0, Operand(Smi::FromInt(1)));
+  __ sw(t0, FieldMemOperand(a2, with_types_offset));
+  __ lw(t0, FieldMemOperand(a2, generic_offset));
+  __ Addu(t0, t0, Operand(Smi::FromInt(1)));
+  __ Branch(USE_DELAY_SLOT, &slow_start);
+  __ sw(t0, FieldMemOperand(a2, generic_offset));  // In delay slot.
+
+  __ bind(&uninitialized);
+
+  // We are going monomorphic, provided we actually have a JSFunction.
+  __ JumpIfSmi(a1, &miss);
+
+  // Goto miss case if we do not have a function.
+  __ GetObjectType(a1, t0, t0);
+  __ Branch(&miss, ne, t0, Operand(JS_FUNCTION_TYPE));
+
+  // Make sure the function is not the Array() function, which requires special
+  // behavior on MISS.
+  __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, t0);
+  __ Branch(&miss, eq, a1, Operand(t0));
+
+  // Update stats.
+  __ lw(t0, FieldMemOperand(a2, with_types_offset));
+  __ Addu(t0, t0, Operand(Smi::FromInt(1)));
+  __ sw(t0, FieldMemOperand(a2, with_types_offset));
+
+  // Store the function.
+  __ sll(t0, a3, kPointerSizeLog2 - kSmiTagSize);
+  __ Addu(t0, a2, Operand(t0));
+  __ Addu(t0, t0, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ sw(a1, MemOperand(t0, 0));
+
+  // Update the write barrier.
+  __ mov(t1, a1);
+  __ RecordWrite(a2, t0, t1, kRAHasNotBeenSaved, kDontSaveFPRegs,
+                 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+  __ Branch(&have_js_function);
+
+  // We are here because tracing is on or we encountered a MISS case we can't
+  // handle here.
   __ bind(&miss);
   GenerateMiss(masm);
 
@@ -3353,20 +3340,43 @@ void SubStringStub::Generate(MacroAssembler* masm) {
 
 void ToNumberStub::Generate(MacroAssembler* masm) {
   // The ToNumber stub takes one argument in a0.
-  Label check_heap_number, call_builtin;
-  __ JumpIfNotSmi(a0, &check_heap_number);
+  Label not_smi;
+  __ JumpIfNotSmi(a0, &not_smi);
   __ Ret(USE_DELAY_SLOT);
   __ mov(v0, a0);
+  __ bind(&not_smi);
 
-  __ bind(&check_heap_number);
+  Label not_heap_number;
   __ lw(a1, FieldMemOperand(a0, HeapObject::kMapOffset));
-  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
-  __ Branch(&call_builtin, ne, a1, Operand(at));
+  __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
+  // a0: object
+  // a1: instance type.
+  __ Branch(&not_heap_number, ne, a1, Operand(HEAP_NUMBER_TYPE));
+  __ Ret(USE_DELAY_SLOT);
+  __ mov(v0, a0);
+  __ bind(&not_heap_number);
+
+  Label not_string, slow_string;
+  __ Branch(&not_string, hs, a1, Operand(FIRST_NONSTRING_TYPE));
+  // Check if string has a cached array index.
+  __ lw(a2, FieldMemOperand(a0, String::kHashFieldOffset));
+  __ And(at, a2, Operand(String::kContainsCachedArrayIndexMask));
+  __ Branch(&slow_string, ne, at, Operand(zero_reg));
+  __ IndexFromHash(a2, a0);
   __ Ret(USE_DELAY_SLOT);
   __ mov(v0, a0);
+  __ bind(&slow_string);
+  __ push(a0);  // Push argument.
+  __ TailCallRuntime(Runtime::kStringToNumber, 1, 1);
+  __ bind(&not_string);
+
+  Label not_oddball;
+  __ Branch(&not_oddball, ne, a1, Operand(ODDBALL_TYPE));
+  __ Ret(USE_DELAY_SLOT);
+  __ lw(v0, FieldMemOperand(a0, Oddball::kToNumberOffset));
+  __ bind(&not_oddball);
 
-  __ bind(&call_builtin);
-  __ push(a0);
+  __ push(a0);  // Push argument.
   __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
 }
 
index afad32b..9796fd4 100644 (file)
@@ -73,55 +73,6 @@ class RestoreRegistersStateStub: public PlatformCodeStub {
 };
 
 
-// This stub can convert a signed int32 to a heap number (double).  It does
-// not work for int32s that are in Smi range!  No GC occurs during this stub
-// so you don't have to set up the frame.
-class WriteInt32ToHeapNumberStub : public PlatformCodeStub {
- public:
-  WriteInt32ToHeapNumberStub(Isolate* isolate, Register the_int,
-                             Register the_heap_number, Register scratch,
-                             Register scratch2)
-      : PlatformCodeStub(isolate) {
-    minor_key_ = IntRegisterBits::encode(the_int.code()) |
-                 HeapNumberRegisterBits::encode(the_heap_number.code()) |
-                 ScratchRegisterBits::encode(scratch.code()) |
-                 SignRegisterBits::encode(scratch2.code());
-    DCHECK(IntRegisterBits::is_valid(the_int.code()));
-    DCHECK(HeapNumberRegisterBits::is_valid(the_heap_number.code()));
-    DCHECK(ScratchRegisterBits::is_valid(scratch.code()));
-    DCHECK(SignRegisterBits::is_valid(scratch2.code()));
-  }
-
-  static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
-
- private:
-  Register the_int() const {
-    return Register::from_code(IntRegisterBits::decode(minor_key_));
-  }
-
-  Register the_heap_number() const {
-    return Register::from_code(HeapNumberRegisterBits::decode(minor_key_));
-  }
-
-  Register scratch() const {
-    return Register::from_code(ScratchRegisterBits::decode(minor_key_));
-  }
-
-  Register sign() const {
-    return Register::from_code(SignRegisterBits::decode(minor_key_));
-  }
-
-  // Minor key encoding in 16 bits.
-  class IntRegisterBits: public BitField<int, 0, 4> {};
-  class HeapNumberRegisterBits: public BitField<int, 4, 4> {};
-  class ScratchRegisterBits: public BitField<int, 8, 4> {};
-  class SignRegisterBits: public BitField<int, 12, 4> {};
-
-  DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
-  DEFINE_PLATFORM_CODE_STUB(WriteInt32ToHeapNumber, PlatformCodeStub);
-};
-
-
 class RecordWriteStub: public PlatformCodeStub {
  public:
   RecordWriteStub(Isolate* isolate,
@@ -150,7 +101,7 @@ class RecordWriteStub: public PlatformCodeStub {
     INCREMENTAL_COMPACTION
   };
 
-  virtual bool SometimesSetsUpAFrame() { return false; }
+  bool SometimesSetsUpAFrame() OVERRIDE { return false; }
 
   static void PatchBranchIntoNop(MacroAssembler* masm, int pos) {
     const unsigned offset = masm->instr_at(pos) & kImm16Mask;
@@ -277,9 +228,9 @@ class RecordWriteStub: public PlatformCodeStub {
     kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
   };
 
-  virtual inline Major MajorKey() const FINAL OVERRIDE { return RecordWrite; }
+  inline Major MajorKey() const FINAL { return RecordWrite; }
 
-  virtual void Generate(MacroAssembler* masm) OVERRIDE;
+  void Generate(MacroAssembler* masm) OVERRIDE;
   void GenerateIncremental(MacroAssembler* masm, Mode mode);
   void CheckNeedsToInformIncrementalMarker(
       MacroAssembler* masm,
@@ -287,7 +238,7 @@ class RecordWriteStub: public PlatformCodeStub {
       Mode mode);
   void InformIncrementalMarker(MacroAssembler* masm);
 
-  void Activate(Code* code) {
+  void Activate(Code* code) OVERRIDE {
     code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
   }
 
@@ -335,7 +286,7 @@ class DirectCEntryStub: public PlatformCodeStub {
   void GenerateCall(MacroAssembler* masm, Register target);
 
  private:
-  bool NeedsImmovableCode() { return true; }
+  bool NeedsImmovableCode() OVERRIDE { return true; }
 
   DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
   DEFINE_PLATFORM_CODE_STUB(DirectCEntry, PlatformCodeStub);
@@ -367,7 +318,7 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
                                      Register r0,
                                      Register r1);
 
-  virtual bool SometimesSetsUpAFrame() { return false; }
+  bool SometimesSetsUpAFrame() OVERRIDE { return false; }
 
  private:
   static const int kInlinedProbes = 4;
index 599aace..fbd4044 100644 (file)
@@ -1145,7 +1145,7 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
   // Mov 1 in double_scratch2 as math_exp_constants_array[8] == 1.
   DCHECK(*reinterpret_cast<double*>
          (ExternalReference::math_exp_constants(8).address()) == 1);
-  __ Move(double_scratch2, 1);
+  __ Move(double_scratch2, 1.);
   __ add_d(result, result, double_scratch2);
   __ srl(temp1, temp2, 11);
   __ Ext(temp2, temp2, 0, 11);
index c2eb4ca..668481e 100644 (file)
@@ -560,52 +560,49 @@ enum SecondaryField {
 // because 'NegateCondition' function flips LSB to negate condition.
 enum Condition {
   // Any value < 0 is considered no_condition.
-  kNoCondition  = -1,
-
-  overflow      =  0,
-  no_overflow   =  1,
-  Uless         =  2,
-  Ugreater_equal=  3,
-  equal         =  4,
-  not_equal     =  5,
-  Uless_equal   =  6,
-  Ugreater      =  7,
-  negative      =  8,
-  positive      =  9,
-  parity_even   = 10,
-  parity_odd    = 11,
-  less          = 12,
+  kNoCondition = -1,
+  overflow = 0,
+  no_overflow = 1,
+  Uless = 2,
+  Ugreater_equal = 3,
+  equal = 4,
+  not_equal = 5,
+  Uless_equal = 6,
+  Ugreater = 7,
+  negative = 8,
+  positive = 9,
+  parity_even = 10,
+  parity_odd = 11,
+  less = 12,
   greater_equal = 13,
-  less_equal    = 14,
-  greater       = 15,
-  ueq           = 16,  // Unordered or Equal.
-  nue           = 17,  // Not (Unordered or Equal).
-
-  cc_always     = 18,
+  less_equal = 14,
+  greater = 15,
+  ueq = 16,  // Unordered or Equal.
+  nue = 17,  // Not (Unordered or Equal).
+  cc_always = 18,
 
   // Aliases.
-  carry         = Uless,
-  not_carry     = Ugreater_equal,
-  zero          = equal,
-  eq            = equal,
-  not_zero      = not_equal,
-  ne            = not_equal,
-  nz            = not_equal,
-  sign          = negative,
-  not_sign      = positive,
-  mi            = negative,
-  pl            = positive,
-  hi            = Ugreater,
-  ls            = Uless_equal,
-  ge            = greater_equal,
-  lt            = less,
-  gt            = greater,
-  le            = less_equal,
-  hs            = Ugreater_equal,
-  lo            = Uless,
-  al            = cc_always,
-
-  cc_default    = kNoCondition
+  carry = Uless,
+  not_carry = Ugreater_equal,
+  zero = equal,
+  eq = equal,
+  not_zero = not_equal,
+  ne = not_equal,
+  nz = not_equal,
+  sign = negative,
+  not_sign = positive,
+  mi = negative,
+  pl = positive,
+  hi = Ugreater,
+  ls = Uless_equal,
+  ge = greater_equal,
+  lt = less,
+  gt = greater,
+  le = less_equal,
+  hs = Ugreater_equal,
+  lo = Uless,
+  al = cc_always,
+  cc_default = kNoCondition
 };
 
 
index b40d7f4..e39b368 100644 (file)
@@ -20,6 +20,12 @@ int Deoptimizer::patch_size() {
 }
 
 
+void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
+  // Empty because there is no need for relocation information for the code
+  // patching in Deoptimizer::PatchCodeForDeoptimization below.
+}
+
+
 void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
   Address code_start_address = code->instruction_start();
   // Invalidate the relocation information, as it will become invalid by the
index e685cc9..c1a8291 100644 (file)
@@ -204,10 +204,10 @@ void FullCodeGenerator::Generate() {
     Comment cmnt(masm_, "[ Allocate context");
     // Argument to NewContext is the function, which is still in a1.
     bool need_write_barrier = true;
-    if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
+    if (FLAG_harmony_scoping && info->scope()->is_script_scope()) {
       __ push(a1);
       __ Push(info->scope()->GetScopeInfo());
-      __ CallRuntime(Runtime::kNewGlobalContext, 2);
+      __ CallRuntime(Runtime::kNewScriptContext, 2);
     } else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
       FastNewContextStub stub(isolate(), heap_slots);
       __ CallStub(&stub);
@@ -929,7 +929,7 @@ void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
   EmitDebugCheckDeclarationContext(variable);
 
   // Load instance object.
-  __ LoadContext(a1, scope_->ContextChainLength(scope_->GlobalScope()));
+  __ LoadContext(a1, scope_->ContextChainLength(scope_->ScriptScope()));
   __ lw(a1, ContextOperand(a1, variable->interface()->Index()));
   __ lw(a1, ContextOperand(a1, Context::EXTENSION_INDEX));
 
@@ -1102,6 +1102,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
 
   // Get the object to enumerate over. If the object is null or undefined, skip
   // over the loop.  See ECMA-262 version 5, section 12.6.4.
+  SetExpressionPosition(stmt->enumerable());
   VisitForAccumulatorValue(stmt->enumerable());
   __ mov(a0, result_register());  // Result as param to InvokeBuiltin below.
   __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
@@ -1201,6 +1202,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
   // Generate code for doing the condition check.
   PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
   __ bind(&loop);
+  SetExpressionPosition(stmt->each());
+
   // Load the current count to a0, load the length to a1.
   __ lw(a0, MemOperand(sp, 0 * kPointerSize));
   __ lw(a1, MemOperand(sp, 1 * kPointerSize));
@@ -1270,48 +1273,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
 }
 
 
-void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
-  Comment cmnt(masm_, "[ ForOfStatement");
-  SetStatementPosition(stmt);
-
-  Iteration loop_statement(this, stmt);
-  increment_loop_depth();
-
-  // var iterator = iterable[Symbol.iterator]();
-  VisitForEffect(stmt->assign_iterator());
-
-  // Loop entry.
-  __ bind(loop_statement.continue_label());
-
-  // result = iterator.next()
-  VisitForEffect(stmt->next_result());
-
-  // if (result.done) break;
-  Label result_not_done;
-  VisitForControl(stmt->result_done(),
-                  loop_statement.break_label(),
-                  &result_not_done,
-                  &result_not_done);
-  __ bind(&result_not_done);
-
-  // each = result.value
-  VisitForEffect(stmt->assign_each());
-
-  // Generate code for the body of the loop.
-  Visit(stmt->body());
-
-  // Check stack before looping.
-  PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS);
-  EmitBackEdgeBookkeeping(stmt, loop_statement.continue_label());
-  __ jmp(loop_statement.continue_label());
-
-  // Exit and decrement the loop depth.
-  PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
-  __ bind(loop_statement.break_label());
-  decrement_loop_depth();
-}
-
-
 void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
                                        bool pretenure) {
   // Use the fast case closure allocation code that allocates in new
@@ -1369,6 +1330,19 @@ void FullCodeGenerator::EmitLoadHomeObject(SuperReference* expr) {
 }
 
 
+void FullCodeGenerator::EmitSetHomeObjectIfNeeded(Expression* initializer,
+                                                  int offset) {
+  if (NeedsHomeObject(initializer)) {
+    __ lw(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
+    __ li(StoreDescriptor::NameRegister(),
+          Operand(isolate()->factory()->home_object_symbol()));
+    __ lw(StoreDescriptor::ValueRegister(),
+          MemOperand(sp, offset * kPointerSize));
+    CallStoreIC();
+  }
+}
+
+
 void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
                                                       TypeofState typeof_state,
                                                       Label* slow) {
@@ -1725,6 +1699,14 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
             __ lw(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
             CallStoreIC(key->LiteralFeedbackId());
             PrepareForBailoutForId(key->id(), NO_REGISTERS);
+
+            if (NeedsHomeObject(value)) {
+              __ Move(StoreDescriptor::ReceiverRegister(), v0);
+              __ li(StoreDescriptor::NameRegister(),
+                    Operand(isolate()->factory()->home_object_symbol()));
+              __ lw(StoreDescriptor::ValueRegister(), MemOperand(sp));
+              CallStoreIC();
+            }
           } else {
             VisitForEffect(value);
           }
@@ -1736,6 +1718,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
         VisitForStackValue(key);
         VisitForStackValue(value);
         if (property->emit_store()) {
+          EmitSetHomeObjectIfNeeded(value, 2);
           __ li(a0, Operand(Smi::FromInt(SLOPPY)));  // PropertyAttributes.
           __ push(a0);
           __ CallRuntime(Runtime::kSetProperty, 4);
@@ -1772,7 +1755,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
     __ push(a0);
     VisitForStackValue(it->first);
     EmitAccessor(it->second->getter);
+    EmitSetHomeObjectIfNeeded(it->second->getter, 2);
     EmitAccessor(it->second->setter);
+    EmitSetHomeObjectIfNeeded(it->second->setter, 3);
     __ li(a0, Operand(Smi::FromInt(NONE)));
     __ push(a0);
     __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
@@ -2200,14 +2185,6 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
   VisitForAccumulatorValue(value);
   __ pop(a1);
 
-  // Check generator state.
-  Label wrong_state, closed_state, done;
-  __ lw(a3, FieldMemOperand(a1, JSGeneratorObject::kContinuationOffset));
-  STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting < 0);
-  STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed == 0);
-  __ Branch(&closed_state, eq, a3, Operand(zero_reg));
-  __ Branch(&wrong_state, lt, a3, Operand(zero_reg));
-
   // Load suspended function and context.
   __ lw(cp, FieldMemOperand(a1, JSGeneratorObject::kContextOffset));
   __ lw(t0, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
@@ -2230,7 +2207,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
 
   // Enter a new JavaScript frame, and initialize its slots as they were when
   // the generator was suspended.
-  Label resume_frame;
+  Label resume_frame, done;
   __ bind(&push_frame);
   __ Call(&resume_frame);
   __ jmp(&done);
@@ -2279,26 +2256,6 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
   // Not reached: the runtime call returns elsewhere.
   __ stop("not-reached");
 
-  // Reach here when generator is closed.
-  __ bind(&closed_state);
-  if (resume_mode == JSGeneratorObject::NEXT) {
-    // Return completed iterator result when generator is closed.
-    __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
-    __ push(a2);
-    // Pop value from top-of-stack slot; box result into result register.
-    EmitCreateIteratorResult(true);
-  } else {
-    // Throw the provided value.
-    __ push(a0);
-    __ CallRuntime(Runtime::kThrow, 1);
-  }
-  __ jmp(&done);
-
-  // Throw error if we attempt to operate on a running generator.
-  __ bind(&wrong_state);
-  __ push(a1);
-  __ CallRuntime(Runtime::kThrowGeneratorStateError, 1);
-
   __ bind(&done);
   context()->Plug(result_register());
 }
@@ -2511,6 +2468,7 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
     __ push(scratch);
     VisitForStackValue(key);
     VisitForStackValue(value);
+    EmitSetHomeObjectIfNeeded(value, 2);
 
     switch (property->kind()) {
       case ObjectLiteral::Property::CONSTANT:
@@ -2684,7 +2642,6 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) {
     // Perform the assignment.
     __ bind(&assign);
     EmitStoreToStackLocalOrContextSlot(var, location);
-
   } else if (!var->is_const_mode() || op == Token::INIT_CONST) {
     if (var->IsLookupSlot()) {
       // Assignment to var.
@@ -2705,8 +2662,9 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) {
       }
       EmitStoreToStackLocalOrContextSlot(var, location);
     }
+  } else if (IsSignallingAssignmentToConst(var, op, strict_mode())) {
+    __ CallRuntime(Runtime::kThrowConstAssignError, 0);
   }
-  // Non-initializing assignments to consts are ignored.
 }
 
 
@@ -5084,7 +5042,7 @@ void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
 
 void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
   Scope* declaration_scope = scope()->DeclarationScope();
-  if (declaration_scope->is_global_scope() ||
+  if (declaration_scope->is_script_scope() ||
       declaration_scope->is_module_scope()) {
     // Contexts nested in the native context have a canonical empty function
     // as their closure, not the anonymous closure containing the global
index c9e3686..cdc68c8 100644 (file)
@@ -51,9 +51,9 @@ class SafepointGenerator FINAL  : public CallWrapper {
         deopt_mode_(mode) { }
   virtual ~SafepointGenerator() {}
 
-  virtual void BeforeCall(int call_size) const OVERRIDE {}
+  void BeforeCall(int call_size) const OVERRIDE {}
 
-  virtual void AfterCall() const OVERRIDE {
+  void AfterCall() const OVERRIDE {
     codegen_->RecordSafepoint(pointers_, deopt_mode_);
   }
 
@@ -2695,10 +2695,10 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
     DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
                                   LInstanceOfKnownGlobal* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() OVERRIDE {
+    void Generate() OVERRIDE {
       codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
     }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    LInstruction* instr() OVERRIDE { return instr_; }
     Label* map_check() { return &map_check_; }
 
    private:
@@ -2858,6 +2858,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
       __ Addu(sp, sp, Operand(sp_delta));
     }
   } else {
+    DCHECK(info()->IsStub());  // Functions would need to drop one more value.
     Register reg = ToRegister(instr->parameter_count());
     // The argument count parameter is a smi
     __ SmiUntag(reg);
@@ -2888,13 +2889,17 @@ template <class T>
 void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
   DCHECK(FLAG_vector_ics);
   Register vector_register = ToRegister(instr->temp_vector());
+  Register slot_register = VectorLoadICDescriptor::SlotRegister();
   DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister()));
+  DCHECK(slot_register.is(a0));
+
+  AllowDeferredHandleDereference vector_structure_check;
   Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
   __ li(vector_register, vector);
   // No need to allocate this register.
-  DCHECK(VectorLoadICDescriptor::SlotRegister().is(a0));
-  int index = vector->GetIndex(instr->hydrogen()->slot());
-  __ li(VectorLoadICDescriptor::SlotRegister(), Operand(Smi::FromInt(index)));
+  FeedbackVectorICSlot slot = instr->hydrogen()->slot();
+  int index = vector->GetIndex(slot);
+  __ li(slot_register, Operand(Smi::FromInt(index)));
 }
 
 
@@ -3676,10 +3681,11 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) {
    public:
     DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() OVERRIDE {
+    void Generate() OVERRIDE {
       codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
     }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
    private:
     LMathAbs* instr_;
   };
@@ -3836,7 +3842,7 @@ void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
   // Math.pow(-Infinity, 0.5) == Infinity
   // Math.sqrt(-Infinity) == NaN
   Label done;
-  __ Move(temp, -V8_INFINITY);
+  __ Move(temp, static_cast<double>(-V8_INFINITY));
   __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, temp, input);
   // Set up Infinity in the delay slot.
   // result is overwritten if the branch is not taken.
@@ -3945,44 +3951,73 @@ void LCodeGen::DoTailCallThroughMegamorphicCache(
   DCHECK(receiver.is(a1));
   DCHECK(name.is(a2));
 
-  Register scratch = a3;
-  Register extra = t0;
-  Register extra2 = t1;
-  Register extra3 = t2;
+  Register scratch = t0;
+  Register extra = t1;
+  Register extra2 = t2;
+  Register extra3 = t5;
+#ifdef DEBUG
+  Register slot = FLAG_vector_ics ? ToRegister(instr->slot()) : no_reg;
+  Register vector = FLAG_vector_ics ? ToRegister(instr->vector()) : no_reg;
+  DCHECK(!FLAG_vector_ics ||
+         !AreAliased(slot, vector, scratch, extra, extra2, extra3));
+#endif
 
   // Important for the tail-call.
   bool must_teardown_frame = NeedsEagerFrame();
 
-  // The probe will tail call to a handler if found.
-  isolate()->stub_cache()->GenerateProbe(masm(), instr->hydrogen()->flags(),
-                                         must_teardown_frame, receiver, name,
-                                         scratch, extra, extra2, extra3);
+  if (!instr->hydrogen()->is_just_miss()) {
+    DCHECK(!instr->hydrogen()->is_keyed_load());
+
+    // The probe will tail call to a handler if found.
+    isolate()->stub_cache()->GenerateProbe(
+        masm(), Code::LOAD_IC, instr->hydrogen()->flags(), must_teardown_frame,
+        receiver, name, scratch, extra, extra2, extra3);
+  }
 
   // Tail call to miss if we ended up here.
   if (must_teardown_frame) __ LeaveFrame(StackFrame::INTERNAL);
-  LoadIC::GenerateMiss(masm());
+  if (instr->hydrogen()->is_keyed_load()) {
+    KeyedLoadIC::GenerateMiss(masm());
+  } else {
+    LoadIC::GenerateMiss(masm());
+  }
 }
 
 
 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
   DCHECK(ToRegister(instr->result()).is(v0));
 
-  LPointerMap* pointers = instr->pointer_map();
-  SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+  if (instr->hydrogen()->IsTailCall()) {
+    if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL);
 
-  if (instr->target()->IsConstantOperand()) {
-    LConstantOperand* target = LConstantOperand::cast(instr->target());
-    Handle<Code> code = Handle<Code>::cast(ToHandle(target));
-    generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
-    __ Call(code, RelocInfo::CODE_TARGET);
+    if (instr->target()->IsConstantOperand()) {
+      LConstantOperand* target = LConstantOperand::cast(instr->target());
+      Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+      __ Jump(code, RelocInfo::CODE_TARGET);
+    } else {
+      DCHECK(instr->target()->IsRegister());
+      Register target = ToRegister(instr->target());
+      __ Addu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
+      __ Jump(target);
+    }
   } else {
-    DCHECK(instr->target()->IsRegister());
-    Register target = ToRegister(instr->target());
-    generator.BeforeCall(__ CallSize(target));
-    __ Addu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
-    __ Call(target);
+    LPointerMap* pointers = instr->pointer_map();
+    SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+
+    if (instr->target()->IsConstantOperand()) {
+      LConstantOperand* target = LConstantOperand::cast(instr->target());
+      Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+      generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
+      __ Call(code, RelocInfo::CODE_TARGET);
+    } else {
+      DCHECK(instr->target()->IsRegister());
+      Register target = ToRegister(instr->target());
+      generator.BeforeCall(__ CallSize(target));
+      __ Addu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
+      __ Call(target);
+    }
+    generator.AfterCall();
   }
-  generator.AfterCall();
 }
 
 
@@ -4492,10 +4527,9 @@ void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
    public:
     DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() OVERRIDE {
-      codegen()->DoDeferredStringCharCodeAt(instr_);
-    }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    void Generate() OVERRIDE { codegen()->DoDeferredStringCharCodeAt(instr_); }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
    private:
     LStringCharCodeAt* instr_;
   };
@@ -4547,10 +4581,11 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
    public:
     DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() OVERRIDE {
+    void Generate() OVERRIDE {
       codegen()->DoDeferredStringCharFromCode(instr_);
     }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
    private:
     LStringCharFromCode* instr_;
   };
@@ -4625,14 +4660,15 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
    public:
     DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() OVERRIDE {
+    void Generate() OVERRIDE {
       codegen()->DoDeferredNumberTagIU(instr_,
                                        instr_->value(),
                                        instr_->temp1(),
                                        instr_->temp2(),
                                        SIGNED_INT32);
     }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
    private:
     LNumberTagI* instr_;
   };
@@ -4653,14 +4689,15 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
    public:
     DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() OVERRIDE {
+    void Generate() OVERRIDE {
       codegen()->DoDeferredNumberTagIU(instr_,
                                        instr_->value(),
                                        instr_->temp1(),
                                        instr_->temp2(),
                                        UNSIGNED_INT32);
     }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
    private:
     LNumberTagU* instr_;
   };
@@ -4747,10 +4784,9 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
    public:
     DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() OVERRIDE {
-      codegen()->DoDeferredNumberTagD(instr_);
-    }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    void Generate() OVERRIDE { codegen()->DoDeferredNumberTagD(instr_); }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
    private:
     LNumberTagD* instr_;
   };
@@ -4973,10 +5009,9 @@ void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
    public:
     DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() OVERRIDE {
-      codegen()->DoDeferredTaggedToI(instr_);
-    }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    void Generate() OVERRIDE { codegen()->DoDeferredTaggedToI(instr_); }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
    private:
     LTaggedToI* instr_;
   };
@@ -5182,11 +5217,12 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
         : LDeferredCode(codegen), instr_(instr), object_(object) {
       SetExit(check_maps());
     }
-    virtual void Generate() OVERRIDE {
+    void Generate() OVERRIDE {
       codegen()->DoDeferredInstanceMigration(instr_, object_);
     }
     Label* check_maps() { return &check_maps_; }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
    private:
     LCheckMaps* instr_;
     Label check_maps_;
@@ -5305,10 +5341,9 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
    public:
     DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() OVERRIDE {
-      codegen()->DoDeferredAllocate(instr_);
-    }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    void Generate() OVERRIDE { codegen()->DoDeferredAllocate(instr_); }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
    private:
     LAllocate* instr_;
   };
@@ -5718,10 +5753,9 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
    public:
     DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() OVERRIDE {
-      codegen()->DoDeferredStackCheck(instr_);
-    }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    void Generate() OVERRIDE { codegen()->DoDeferredStackCheck(instr_); }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
    private:
     LStackCheck* instr_;
   };
@@ -5869,10 +5903,11 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
           object_(object),
           index_(index) {
     }
-    virtual void Generate() OVERRIDE {
+    void Generate() OVERRIDE {
       codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
     }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
    private:
     LLoadFieldByIndex* instr_;
     Register result_;
index 7d5ffad..77dea5b 100644 (file)
@@ -1103,9 +1103,17 @@ LInstruction* LChunkBuilder::DoTailCallThroughMegamorphicCache(
       UseFixed(instr->receiver(), LoadDescriptor::ReceiverRegister());
   LOperand* name_register =
       UseFixed(instr->name(), LoadDescriptor::NameRegister());
+  LOperand* slot = NULL;
+  LOperand* vector = NULL;
+  if (FLAG_vector_ics) {
+    slot = UseFixed(instr->slot(), VectorLoadICDescriptor::SlotRegister());
+    vector =
+        UseFixed(instr->vector(), VectorLoadICDescriptor::VectorRegister());
+  }
+
   // Not marked as call. It can't deoptimize, and it never returns.
   return new (zone()) LTailCallThroughMegamorphicCache(
-      context, receiver_register, name_register);
+      context, receiver_register, name_register, slot, vector);
 }
 
 
@@ -1401,8 +1409,14 @@ LInstruction* LChunkBuilder::DoFlooringDivI(HMathFloorOfDiv* instr) {
   DCHECK(instr->right()->representation().Equals(instr->representation()));
   LOperand* dividend = UseRegister(instr->left());
   LOperand* divisor = UseRegister(instr->right());
-  LFlooringDivI* div = new(zone()) LFlooringDivI(dividend, divisor);
-  return AssignEnvironment(DefineAsRegister(div));
+  LInstruction* result =
+      DefineAsRegister(new (zone()) LFlooringDivI(dividend, divisor));
+  if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+      instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
+      (instr->CheckFlag(HValue::kCanOverflow))) {
+    result = AssignEnvironment(result);
+  }
+  return result;
 }
 
 
@@ -2060,7 +2074,7 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
   LOperand* global_object =
       UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
   LOperand* vector = NULL;
-  if (FLAG_vector_ics) {
+  if (instr->HasVectorAndSlot()) {
     vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
   }
   LLoadGlobalGeneric* result =
@@ -2119,7 +2133,7 @@ LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
   LOperand* object =
       UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
   LOperand* vector = NULL;
-  if (FLAG_vector_ics) {
+  if (instr->HasVectorAndSlot()) {
     vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
   }
 
@@ -2186,7 +2200,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
       UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
   LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
   LOperand* vector = NULL;
-  if (FLAG_vector_ics) {
+  if (instr->HasVectorAndSlot()) {
     vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
   }
 
index 36e5b57..ecffef7 100644 (file)
@@ -163,17 +163,13 @@ class LCodeGen;
   V(UnknownOSRValue)                         \
   V(WrapReceiver)
 
-#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)                        \
-  virtual Opcode opcode() const FINAL OVERRIDE {                      \
-    return LInstruction::k##type;                                           \
-  }                                                                         \
-  virtual void CompileToNative(LCodeGen* generator) FINAL OVERRIDE;   \
-  virtual const char* Mnemonic() const FINAL OVERRIDE {               \
-    return mnemonic;                                                        \
-  }                                                                         \
-  static L##type* cast(LInstruction* instr) {                               \
-    DCHECK(instr->Is##type());                                              \
-    return reinterpret_cast<L##type*>(instr);                               \
+#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)            \
+  Opcode opcode() const FINAL { return LInstruction::k##type; } \
+  void CompileToNative(LCodeGen* generator) FINAL;              \
+  const char* Mnemonic() const FINAL { return mnemonic; }       \
+  static L##type* cast(LInstruction* instr) {                   \
+    DCHECK(instr->Is##type());                                  \
+    return reinterpret_cast<L##type*>(instr);                   \
   }
 
 
@@ -288,11 +284,9 @@ class LTemplateResultInstruction : public LInstruction {
  public:
   // Allow 0 or 1 output operands.
   STATIC_ASSERT(R == 0 || R == 1);
-  virtual bool HasResult() const FINAL OVERRIDE {
-    return R != 0 && result() != NULL;
-  }
+  bool HasResult() const FINAL { return R != 0 && result() != NULL; }
   void set_result(LOperand* operand) { results_[0] = operand; }
-  LOperand* result() const { return results_[0]; }
+  LOperand* result() const OVERRIDE { return results_[0]; }
 
  protected:
   EmbeddedContainer<LOperand*, R> results_;
@@ -310,11 +304,11 @@ class LTemplateInstruction : public LTemplateResultInstruction<R> {
 
  private:
   // Iterator support.
-  virtual int InputCount() FINAL OVERRIDE { return I; }
-  virtual LOperand* InputAt(int i) FINAL OVERRIDE { return inputs_[i]; }
+  int InputCount() FINAL { return I; }
+  LOperand* InputAt(int i) FINAL { return inputs_[i]; }
 
-  virtual int TempCount() FINAL OVERRIDE { return T; }
-  virtual LOperand* TempAt(int i) FINAL OVERRIDE { return temps_[i]; }
+  int TempCount() FINAL { return T; }
+  LOperand* TempAt(int i) FINAL { return temps_[i]; }
 };
 
 
@@ -329,8 +323,8 @@ class LGap : public LTemplateInstruction<0, 0, 0> {
   }
 
   // Can't use the DECLARE-macro here because of sub-classes.
-  virtual bool IsGap() const FINAL OVERRIDE { return true; }
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  bool IsGap() const FINAL { return true; }
+  void PrintDataTo(StringStream* stream) OVERRIDE;
   static LGap* cast(LInstruction* instr) {
     DCHECK(instr->IsGap());
     return reinterpret_cast<LGap*>(instr);
@@ -370,7 +364,7 @@ class LInstructionGap FINAL : public LGap {
  public:
   explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
 
-  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
+  bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
     return !IsRedundant();
   }
 
@@ -382,10 +376,10 @@ class LGoto FINAL : public LTemplateInstruction<0, 0, 0> {
  public:
   explicit LGoto(HBasicBlock* block) : block_(block) { }
 
-  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE;
+  bool HasInterestingComment(LCodeGen* gen) const OVERRIDE;
   DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
-  virtual bool IsControl() const OVERRIDE { return true; }
+  void PrintDataTo(StringStream* stream) OVERRIDE;
+  bool IsControl() const OVERRIDE { return true; }
 
   int block_id() const { return block_->block_id(); }
 
@@ -428,7 +422,7 @@ class LDummyUse FINAL : public LTemplateInstruction<1, 1, 0> {
 
 class LDeoptimize FINAL : public LTemplateInstruction<0, 0, 0> {
  public:
-  virtual bool IsControl() const OVERRIDE { return true; }
+  bool IsControl() const OVERRIDE { return true; }
   DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
   DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
 };
@@ -439,12 +433,10 @@ class LLabel FINAL : public LGap {
   explicit LLabel(HBasicBlock* block)
       : LGap(block), replacement_(NULL) { }
 
-  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
-    return false;
-  }
+  bool HasInterestingComment(LCodeGen* gen) const OVERRIDE { return false; }
   DECLARE_CONCRETE_INSTRUCTION(Label, "label")
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int block_id() const { return block()->block_id(); }
   bool is_loop_header() const { return block()->IsLoopHeader(); }
@@ -462,9 +454,7 @@ class LLabel FINAL : public LGap {
 
 class LParameter FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
-  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
-    return false;
-  }
+  bool HasInterestingComment(LCodeGen* gen) const OVERRIDE { return false; }
   DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
 };
 
@@ -483,19 +473,23 @@ class LCallStub FINAL : public LTemplateInstruction<1, 1, 0> {
 
 
 class LTailCallThroughMegamorphicCache FINAL
-    : public LTemplateInstruction<0, 3, 0> {
+    : public LTemplateInstruction<0, 5, 0> {
  public:
-  explicit LTailCallThroughMegamorphicCache(LOperand* context,
-                                            LOperand* receiver,
-                                            LOperand* name) {
+  LTailCallThroughMegamorphicCache(LOperand* context, LOperand* receiver,
+                                   LOperand* name, LOperand* slot,
+                                   LOperand* vector) {
     inputs_[0] = context;
     inputs_[1] = receiver;
     inputs_[2] = name;
+    inputs_[3] = slot;
+    inputs_[4] = vector;
   }
 
   LOperand* context() { return inputs_[0]; }
   LOperand* receiver() { return inputs_[1]; }
   LOperand* name() { return inputs_[2]; }
+  LOperand* slot() { return inputs_[3]; }
+  LOperand* vector() { return inputs_[4]; }
 
   DECLARE_CONCRETE_INSTRUCTION(TailCallThroughMegamorphicCache,
                                "tail-call-through-megamorphic-cache")
@@ -505,9 +499,7 @@ class LTailCallThroughMegamorphicCache FINAL
 
 class LUnknownOSRValue FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
-  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
-    return false;
-  }
+  bool HasInterestingComment(LCodeGen* gen) const OVERRIDE { return false; }
   DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
 };
 
@@ -517,7 +509,7 @@ class LControlInstruction : public LTemplateInstruction<0, I, T> {
  public:
   LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
 
-  virtual bool IsControl() const FINAL OVERRIDE { return true; }
+  bool IsControl() const FINAL { return true; }
 
   int SuccessorCount() { return hydrogen()->SuccessorCount(); }
   HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
@@ -606,7 +598,7 @@ class LAccessArgumentsAt FINAL : public LTemplateInstruction<1, 3, 0> {
   LOperand* length() { return inputs_[1]; }
   LOperand* index() { return inputs_[2]; }
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -846,7 +838,7 @@ class LCompareNumericAndBranch FINAL : public LControlInstruction<2, 0> {
     return hydrogen()->representation().IsDouble();
   }
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1034,7 +1026,7 @@ class LIsObjectAndBranch FINAL : public LControlInstruction<1, 1> {
   DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream);
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1051,7 +1043,7 @@ class LIsStringAndBranch FINAL : public LControlInstruction<1, 1> {
   DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1066,7 +1058,7 @@ class LIsSmiAndBranch FINAL : public LControlInstruction<1, 0> {
   DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1084,7 +1076,7 @@ class LIsUndetectableAndBranch FINAL : public LControlInstruction<1, 1> {
                                "is-undetectable-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1106,7 +1098,7 @@ class LStringCompareAndBranch FINAL : public LControlInstruction<3, 0> {
 
   Token::Value op() const { return hydrogen()->token(); }
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1122,7 +1114,7 @@ class LHasInstanceTypeAndBranch FINAL : public LControlInstruction<1, 0> {
                                "has-instance-type-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1152,7 +1144,7 @@ class LHasCachedArrayIndexAndBranch FINAL
                                "has-cached-array-index-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1170,7 +1162,7 @@ class LClassOfTestAndBranch FINAL : public LControlInstruction<1, 1> {
                                "class-of-test-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1367,7 +1359,7 @@ class LBranch FINAL : public LControlInstruction<1, 0> {
   DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
   DECLARE_HYDROGEN_ACCESSOR(Branch)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1512,11 +1504,9 @@ class LArithmeticD FINAL : public LTemplateInstruction<1, 2, 0> {
   LOperand* left() { return inputs_[0]; }
   LOperand* right() { return inputs_[1]; }
 
-  virtual Opcode opcode() const OVERRIDE {
-    return LInstruction::kArithmeticD;
-  }
-  virtual void CompileToNative(LCodeGen* generator) OVERRIDE;
-  virtual const char* Mnemonic() const OVERRIDE;
+  Opcode opcode() const OVERRIDE { return LInstruction::kArithmeticD; }
+  void CompileToNative(LCodeGen* generator) OVERRIDE;
+  const char* Mnemonic() const OVERRIDE;
 
  private:
   Token::Value op_;
@@ -1540,9 +1530,9 @@ class LArithmeticT FINAL : public LTemplateInstruction<1, 3, 0> {
   LOperand* right() { return inputs_[2]; }
   Token::Value op() const { return op_; }
 
-  virtual Opcode opcode() const FINAL  { return LInstruction::kArithmeticT; }
-  virtual void CompileToNative(LCodeGen* generator) OVERRIDE;
-  virtual const char* Mnemonic() const OVERRIDE;
+  Opcode opcode() const FINAL { return LInstruction::kArithmeticT; }
+  void CompileToNative(LCodeGen* generator) OVERRIDE;
+  const char* Mnemonic() const OVERRIDE;
 
  private:
   Token::Value op_;
@@ -1651,7 +1641,7 @@ class LLoadKeyed FINAL : public LTemplateInstruction<1, 2, 0> {
   DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
   DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
 
-  virtual void PrintDataTo(StringStream* stream);
+  void PrintDataTo(StringStream* stream) OVERRIDE;
   uint32_t base_offset() const { return hydrogen()->base_offset(); }
 };
 
@@ -1732,7 +1722,7 @@ class LLoadContextSlot FINAL : public LTemplateInstruction<1, 1, 0> {
 
   int slot_index() { return hydrogen()->slot_index(); }
 
-  virtual void PrintDataTo(StringStream* stream);
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1751,7 +1741,7 @@ class LStoreContextSlot FINAL : public LTemplateInstruction<0, 2, 0> {
 
   int slot_index() { return hydrogen()->slot_index(); }
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1790,7 +1780,7 @@ class LStoreCodeEntry FINAL: public LTemplateInstruction<0, 2, 0> {
   LOperand* function() { return inputs_[0]; }
   LOperand* code_object() { return inputs_[1]; }
 
-  virtual void PrintDataTo(StringStream* stream);
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry")
   DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry)
@@ -1807,7 +1797,7 @@ class LInnerAllocatedObject FINAL: public LTemplateInstruction<1, 2, 0> {
   LOperand* base_object() const { return inputs_[0]; }
   LOperand* offset() const { return inputs_[1]; }
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object")
 };
@@ -1851,7 +1841,7 @@ class LCallJSFunction FINAL : public LTemplateInstruction<1, 1, 0> {
   DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
   DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int arity() const { return hydrogen()->argument_count() - 1; }
 };
@@ -1871,11 +1861,12 @@ class LCallWithDescriptor FINAL : public LTemplateResultInstruction<1> {
 
   const CallInterfaceDescriptor descriptor() { return descriptor_; }
 
+  DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
+
  private:
   DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
-  DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int arity() const { return hydrogen()->argument_count() - 1; }
 
@@ -1883,11 +1874,11 @@ class LCallWithDescriptor FINAL : public LTemplateResultInstruction<1> {
   ZoneList<LOperand*> inputs_;
 
   // Iterator support.
-  virtual int InputCount() FINAL OVERRIDE { return inputs_.length(); }
-  virtual LOperand* InputAt(int i) FINAL OVERRIDE { return inputs_[i]; }
+  int InputCount() FINAL { return inputs_.length(); }
+  LOperand* InputAt(int i) FINAL { return inputs_[i]; }
 
-  virtual int TempCount() FINAL OVERRIDE { return 0; }
-  virtual LOperand* TempAt(int i) FINAL OVERRIDE { return NULL; }
+  int TempCount() FINAL { return 0; }
+  LOperand* TempAt(int i) FINAL { return NULL; }
 };
 
 
@@ -1904,7 +1895,7 @@ class LInvokeFunction FINAL : public LTemplateInstruction<1, 2, 0> {
   DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
   DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int arity() const { return hydrogen()->argument_count() - 1; }
 };
@@ -1940,7 +1931,7 @@ class LCallNew FINAL : public LTemplateInstruction<1, 2, 0> {
   DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
   DECLARE_HYDROGEN_ACCESSOR(CallNew)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int arity() const { return hydrogen()->argument_count() - 1; }
 };
@@ -1959,7 +1950,7 @@ class LCallNewArray FINAL : public LTemplateInstruction<1, 2, 0> {
   DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
   DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int arity() const { return hydrogen()->argument_count() - 1; }
 };
@@ -1976,7 +1967,7 @@ class LCallRuntime FINAL : public LTemplateInstruction<1, 1, 0> {
   DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
   DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
 
-  virtual bool ClobbersDoubleRegisters(Isolate* isolate) const OVERRIDE {
+  bool ClobbersDoubleRegisters(Isolate* isolate) const OVERRIDE {
     return save_doubles() == kDontSaveFPRegs;
   }
 
@@ -2170,7 +2161,7 @@ class LStoreNamedField FINAL : public LTemplateInstruction<0, 2, 1> {
   DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
   DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   Representation representation() const {
     return hydrogen()->field_representation();
@@ -2193,7 +2184,7 @@ class LStoreNamedGeneric FINAL : public LTemplateInstruction<0, 3, 0> {
   DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
   DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   Handle<Object> name() const { return hydrogen()->name(); }
   StrictMode strict_mode() { return hydrogen()->strict_mode(); }
@@ -2225,7 +2216,7 @@ class LStoreKeyed FINAL : public LTemplateInstruction<0, 3, 0> {
   DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
   DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
   bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
   uint32_t base_offset() const { return hydrogen()->base_offset(); }
 };
@@ -2251,7 +2242,7 @@ class LStoreKeyedGeneric FINAL : public LTemplateInstruction<0, 4, 0> {
   DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
   DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   StrictMode strict_mode() { return hydrogen()->strict_mode(); }
 };
@@ -2275,7 +2266,7 @@ class LTransitionElementsKind FINAL : public LTemplateInstruction<0, 2, 1> {
                                "transition-elements-kind")
   DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
   Handle<Map> transitioned_map() {
@@ -2571,7 +2562,7 @@ class LTypeofIsAndBranch FINAL : public LControlInstruction<1, 0> {
 
   Handle<String> type_literal() { return hydrogen()->type_literal(); }
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -2592,9 +2583,7 @@ class LOsrEntry FINAL : public LTemplateInstruction<0, 0, 0> {
  public:
   LOsrEntry() {}
 
-  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
-    return false;
-  }
+  bool HasInterestingComment(LCodeGen* gen) const OVERRIDE { return false; }
   DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
 };
 
@@ -2797,7 +2786,7 @@ class LChunkBuilder FINAL : public LChunkBuilderBase {
 
   // An input operand in register, stack slot or a constant operand.
   // Will not be moved to a register even if one is freely available.
-  virtual MUST_USE_RESULT LOperand* UseAny(HValue* value) OVERRIDE;
+  MUST_USE_RESULT LOperand* UseAny(HValue* value) OVERRIDE;
 
   // Temporary operand that must be in a register.
   MUST_USE_RESULT LUnallocated* TempRegister();
index deb3ff6..90c3499 100644 (file)
@@ -591,11 +591,12 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
   }
 
   bind(&done);
-  // Check that the value is a normal property.
+  // Check that the value is a field property.
   // reg2: elements + (index * kPointerSize).
   const int kDetailsOffset =
       SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
   lw(reg1, FieldMemOperand(reg2, kDetailsOffset));
+  DCHECK_EQ(FIELD, 0);
   And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
   Branch(miss, ne, at, Operand(zero_reg));
 
@@ -1548,6 +1549,12 @@ void MacroAssembler::BranchF(Label* target,
 }
 
 
+void MacroAssembler::Move(FPURegister dst, float imm) {
+  li(at, Operand(bit_cast<int32_t>(imm)));
+  mtc1(at, dst);
+}
+
+
 void MacroAssembler::Move(FPURegister dst, double imm) {
   static const DoubleRepresentation minus_zero(-0.0);
   static const DoubleRepresentation zero(0.0);
@@ -3991,17 +3998,17 @@ void MacroAssembler::CheckMap(Register obj,
 }
 
 
-void MacroAssembler::DispatchMap(Register obj,
-                                 Register scratch,
-                                 Handle<Map> map,
-                                 Handle<Code> success,
-                                 SmiCheckType smi_check_type) {
+void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
+                                     Register scratch2, Handle<WeakCell> cell,
+                                     Handle<Code> success,
+                                     SmiCheckType smi_check_type) {
   Label fail;
   if (smi_check_type == DO_SMI_CHECK) {
     JumpIfSmi(obj, &fail);
   }
-  lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
-  Jump(success, RelocInfo::CODE_TARGET, eq, scratch, Operand(map));
+  lw(scratch1, FieldMemOperand(obj, HeapObject::kMapOffset));
+  GetWeakValue(scratch2, cell);
+  Jump(success, RelocInfo::CODE_TARGET, eq, scratch1, Operand(scratch2));
   bind(&fail);
 }
 
@@ -4020,6 +4027,19 @@ void MacroAssembler::CheckMap(Register obj,
 }
 
 
+void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
+  li(value, Operand(cell));
+  lw(value, FieldMemOperand(value, WeakCell::kValueOffset));
+}
+
+
+void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
+                                   Label* miss) {
+  GetWeakValue(value, cell);
+  JumpIfSmi(value, miss);
+}
+
+
 void MacroAssembler::MovFromFloatResult(DoubleRegister dst) {
   if (IsMipsSoftFloatABI) {
     if (kArchEndian == kLittle) {
@@ -5785,18 +5805,6 @@ void MacroAssembler::CheckPageFlag(
 }
 
 
-void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
-                                        Register scratch,
-                                        Label* if_deprecated) {
-  if (map->CanBeDeprecated()) {
-    li(scratch, Operand(map));
-    lw(scratch, FieldMemOperand(scratch, Map::kBitField3Offset));
-    And(scratch, scratch, Operand(Map::Deprecated::kMask));
-    Branch(if_deprecated, ne, scratch, Operand(zero_reg));
-  }
-}
-
-
 void MacroAssembler::JumpIfBlack(Register object,
                                  Register scratch0,
                                  Register scratch1,
index d500eaa..0bc1e15 100644 (file)
@@ -250,8 +250,10 @@ class MacroAssembler: public Assembler {
     Mthc1(src_high, dst);
   }
 
-  // Conditional move.
+  void Move(FPURegister dst, float imm);
   void Move(FPURegister dst, double imm);
+
+  // Conditional move.
   void Movz(Register rd, Register rs, Register rt);
   void Movn(Register rd, Register rs, Register rt);
   void Movt(Register rd, Register rs, uint16_t cc = 0);
@@ -313,10 +315,6 @@ class MacroAssembler: public Assembler {
                      Condition cc,
                      Label* condition_met);
 
-  void CheckMapDeprecated(Handle<Map> map,
-                          Register scratch,
-                          Label* if_deprecated);
-
   // Check if object is in new space.  Jumps if the object is not in new space.
   // The register scratch can be object itself, but it will be clobbered.
   void JumpIfNotInNewSpace(Register object,
@@ -1082,15 +1080,19 @@ class MacroAssembler: public Assembler {
                 Label* fail,
                 SmiCheckType smi_check_type);
 
-  // Check if the map of an object is equal to a specified map and branch to a
-  // specified target if equal. Skip the smi check if not required (object is
-  // known to be a heap object)
-  void DispatchMap(Register obj,
-                   Register scratch,
-                   Handle<Map> map,
-                   Handle<Code> success,
-                   SmiCheckType smi_check_type);
+  // Check if the map of an object is equal to a specified weak map and branch
+  // to a specified target if equal. Skip the smi check if not required
+  // (object is known to be a heap object)
+  void DispatchWeakMap(Register obj, Register scratch1, Register scratch2,
+                       Handle<WeakCell> cell, Handle<Code> success,
+                       SmiCheckType smi_check_type);
+
+  // Get value of the weak cell.
+  void GetWeakValue(Register value, Handle<WeakCell> cell);
 
+  // Load the value of the weak cell in the value register. Branch to the
+  // given miss label is the weak cell was cleared.
+  void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
 
   // Load and check the instance type of an object for being a string.
   // Loads the type into the second argument register.
index 5d51e63..b8f5821 100644 (file)
@@ -2192,6 +2192,14 @@ void Assembler::ext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
 }
 
 
+void Assembler::dext_(Register rt, Register rs, uint16_t pos, uint16_t size) {
+  // Should be called via MacroAssembler::Ext.
+  // Dext instr has 'rt' field as dest, and two uint5: msb, lsb.
+  DCHECK(kArchVariant == kMips64r2 || kArchVariant == kMips64r6);
+  GenInstrRegister(SPECIAL3, rs, rt, size - 1, pos, DEXT);
+}
+
+
 void Assembler::pref(int32_t hint, const MemOperand& rs) {
   DCHECK(is_uint5(hint) && is_uint16(rs.offset_));
   Instr instr = PREF | (rs.rm().code() << kRsShift) | (hint << kRtShift)
index b296d51..5ca2f3a 100644 (file)
@@ -886,6 +886,7 @@ class Assembler : public AssemblerBase {
   void clz(Register rd, Register rs);
   void ins_(Register rt, Register rs, uint16_t pos, uint16_t size);
   void ext_(Register rt, Register rs, uint16_t pos, uint16_t size);
+  void dext_(Register rt, Register rs, uint16_t pos, uint16_t size);
 
   // --------Coprocessor-instructions----------------
 
index f4bd386..c95ff30 100644 (file)
@@ -44,11 +44,9 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
     DCHECK(extra_args == NO_EXTRA_ARGUMENTS);
   }
 
-  // JumpToExternalReference expects s0 to contain the number of arguments
+  // JumpToExternalReference expects a0 to contain the number of arguments
   // including the receiver and the extra arguments.
-  __ Daddu(s0, a0, num_extra_args + 1);
-  __ dsll(s1, s0, kPointerSizeLog2);
-  __ Dsubu(s1, s1, kPointerSize);
+  __ Daddu(a0, a0, num_extra_args + 1);
   __ JumpToExternalReference(ExternalReference(id, masm->isolate()));
 }
 
@@ -384,24 +382,22 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
         MemOperand bit_field3 = FieldMemOperand(a2, Map::kBitField3Offset);
         // Check if slack tracking is enabled.
         __ lwu(a4, bit_field3);
-        __ DecodeField<Map::ConstructionCount>(a6, a4);
-        __ Branch(&allocate,
-                  eq,
-                  a6,
-                  Operand(static_cast<int64_t>(JSFunction::kNoSlackTracking)));
+        __ DecodeField<Map::Counter>(a6, a4);
+        __ Branch(&allocate, lt, a6,
+                  Operand(static_cast<int64_t>(Map::kSlackTrackingCounterEnd)));
         // Decrease generous allocation count.
-        __ Dsubu(a4, a4, Operand(1 << Map::ConstructionCount::kShift));
-        __ Branch(USE_DELAY_SLOT,
-            &allocate, ne, a6, Operand(JSFunction::kFinishSlackTracking));
+        __ Dsubu(a4, a4, Operand(1 << Map::Counter::kShift));
+        __ Branch(USE_DELAY_SLOT, &allocate, ne, a6,
+                  Operand(Map::kSlackTrackingCounterEnd));
         __ sw(a4, bit_field3);  // In delay slot.
 
         __ Push(a1, a2, a1);  // a1 = Constructor.
         __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
 
         __ Pop(a1, a2);
-        // Slack tracking counter is kNoSlackTracking after runtime call.
-        DCHECK(JSFunction::kNoSlackTracking == 0);
-        __ mov(a6, zero_reg);
+        // Slack tracking counter is Map::kSlackTrackingCounterEnd after runtime
+        // call.
+        __ li(a6, Map::kSlackTrackingCounterEnd);
 
         __ bind(&allocate);
       }
@@ -448,10 +444,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
         Label no_inobject_slack_tracking;
 
         // Check if slack tracking is enabled.
-        __ Branch(&no_inobject_slack_tracking,
-                  eq,
-                  a6,
-                  Operand(static_cast<int64_t>(JSFunction::kNoSlackTracking)));
+        __ Branch(&no_inobject_slack_tracking, lt, a6,
+                  Operand(static_cast<int64_t>(Map::kSlackTrackingCounterEnd)));
 
         // Allocate object with a slack.
         __ lwu(a0, FieldMemOperand(a2, Map::kInstanceSizesOffset));
index ef812c7..6bbd1a3 100644 (file)
@@ -268,66 +268,6 @@ void DoubleToIStub::Generate(MacroAssembler* masm) {
 }
 
 
-void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(
-    Isolate* isolate) {
-  WriteInt32ToHeapNumberStub stub1(isolate, a1, v0, a2, a3);
-  WriteInt32ToHeapNumberStub stub2(isolate, a2, v0, a3, a0);
-  stub1.GetCode();
-  stub2.GetCode();
-}
-
-
-// See comment for class, this does NOT work for int32's that are in Smi range.
-void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
-  Label max_negative_int;
-  // the_int_ has the answer which is a signed int32 but not a Smi.
-  // We test for the special value that has a different exponent.
-  STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
-  // Test sign, and save for later conditionals.
-  __ And(sign(), the_int(), Operand(0x80000000u));
-  __ Branch(&max_negative_int, eq, the_int(), Operand(0x80000000u));
-
-  // Set up the correct exponent in scratch_.  All non-Smi int32s have the same.
-  // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased).
-  uint32_t non_smi_exponent =
-      (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
-  __ li(scratch(), Operand(non_smi_exponent));
-  // Set the sign bit in scratch_ if the value was negative.
-  __ or_(scratch(), scratch(), sign());
-  // Subtract from 0 if the value was negative.
-  __ subu(at, zero_reg, the_int());
-  __ Movn(the_int(), at, sign());
-  // We should be masking the implict first digit of the mantissa away here,
-  // but it just ends up combining harmlessly with the last digit of the
-  // exponent that happens to be 1.  The sign bit is 0 so we shift 10 to get
-  // the most significant 1 to hit the last bit of the 12 bit sign and exponent.
-  DCHECK(((1 << HeapNumber::kExponentShift) & non_smi_exponent) != 0);
-  const int shift_distance = HeapNumber::kNonMantissaBitsInTopWord - 2;
-  __ srl(at, the_int(), shift_distance);
-  __ or_(scratch(), scratch(), at);
-  __ sw(scratch(), FieldMemOperand(the_heap_number(),
-                                   HeapNumber::kExponentOffset));
-  __ sll(scratch(), the_int(), 32 - shift_distance);
-  __ Ret(USE_DELAY_SLOT);
-  __ sw(scratch(), FieldMemOperand(the_heap_number(),
-                                   HeapNumber::kMantissaOffset));
-
-  __ bind(&max_negative_int);
-  // The max negative int32 is stored as a positive number in the mantissa of
-  // a double because it uses a sign bit instead of using two's complement.
-  // The actual mantissa bits stored are all 0 because the implicit most
-  // significant 1 bit is not stored.
-  non_smi_exponent += 1 << HeapNumber::kExponentShift;
-  __ li(scratch(), Operand(HeapNumber::kSignMask | non_smi_exponent));
-  __ sw(scratch(),
-        FieldMemOperand(the_heap_number(), HeapNumber::kExponentOffset));
-  __ mov(scratch(), zero_reg);
-  __ Ret(USE_DELAY_SLOT);
-  __ sw(scratch(),
-        FieldMemOperand(the_heap_number(), HeapNumber::kMantissaOffset));
-}
-
-
 // Handle the case where the lhs and rhs are the same object.
 // Equality is almost reflexive (everything but NaN), so this is a test
 // for "identity and not NaN".
@@ -911,7 +851,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
       // double_scratch can be overwritten in the delay slot.
       // Calculates square root of base.  Check for the special case of
       // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
-      __ Move(double_scratch, -V8_INFINITY);
+      __ Move(double_scratch, static_cast<double>(-V8_INFINITY));
       __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
       __ neg_d(double_result, double_scratch);
 
@@ -931,13 +871,13 @@ void MathPowStub::Generate(MacroAssembler* masm) {
       // double_scratch can be overwritten in the delay slot.
       // Calculates square root of base.  Check for the special case of
       // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
-      __ Move(double_scratch, -V8_INFINITY);
+      __ Move(double_scratch, static_cast<double>(-V8_INFINITY));
       __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, double_base, double_scratch);
       __ Move(double_result, kDoubleRegZero);
 
       // Add +0 to convert -0 to +0.
       __ add_d(double_scratch, double_base, kDoubleRegZero);
-      __ Move(double_result, 1);
+      __ Move(double_result, 1.);
       __ sqrt_d(double_scratch, double_scratch);
       __ div_d(double_result, double_result, double_scratch);
       __ jmp(&done);
@@ -1053,7 +993,6 @@ bool CEntryStub::NeedsImmovableCode() {
 
 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
   CEntryStub::GenerateAheadOfTime(isolate);
-  WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate);
   StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
   StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
   ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
@@ -1094,22 +1033,18 @@ void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
 
 void CEntryStub::Generate(MacroAssembler* masm) {
   // Called from JavaScript; parameters are on stack as if calling JS function
-  // s0: number of arguments including receiver
-  // s1: size of arguments excluding receiver
-  // s2: pointer to builtin function
+  // a0: number of arguments including receiver
+  // a1: pointer to builtin function
   // fp: frame pointer    (restored after C call)
   // sp: stack pointer    (restored as callee's sp after C call)
   // cp: current context  (C callee-saved)
 
   ProfileEntryHookStub::MaybeCallEntryHook(masm);
 
-  // NOTE: s0-s2 hold the arguments of this function instead of a0-a2.
-  // The reason for this is that these arguments would need to be saved anyway
-  // so it's faster to set them up directly.
-  // See MacroAssembler::PrepareCEntryArgs and PrepareCEntryFunction.
-
   // Compute the argv pointer in a callee-saved register.
+  __ dsll(s1, a0, kPointerSizeLog2);
   __ Daddu(s1, sp, s1);
+  __ Dsubu(s1, s1, kPointerSize);
 
   // Enter the exit frame that transitions from JavaScript to C++.
   FrameScope scope(masm, StackFrame::MANUAL);
@@ -1121,7 +1056,8 @@ void CEntryStub::Generate(MacroAssembler* masm) {
 
   // Prepare arguments for C routine.
   // a0 = argc
-  __ mov(a0, s0);
+  __ mov(s0, a0);
+  __ mov(s2, a1);
   // a1 = argv (set in the delay slot after find_ra below).
 
   // We are calling compiled C/C++ code. a0 and a1 hold our two arguments. We
@@ -1411,10 +1347,16 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
 
   Register receiver = LoadDescriptor::ReceiverRegister();
   Register index = LoadDescriptor::NameRegister();
-  Register scratch = a3;
+  Register scratch = a4;
   Register result = v0;
   DCHECK(!scratch.is(receiver) && !scratch.is(index));
+  DCHECK(!FLAG_vector_ics ||
+         (!scratch.is(VectorLoadICDescriptor::VectorRegister()) &&
+          result.is(VectorLoadICDescriptor::SlotRegister())));
 
+  // StringCharAtGenerator doesn't use the result register until it's passed
+  // the different miss possibilities. If it did, we would have a conflict
+  // when FLAG_vector_ics is true.
   StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
                                           &miss,  // When not a string.
                                           &miss,  // When not a number.
@@ -1444,8 +1386,6 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
 void InstanceofStub::Generate(MacroAssembler* masm) {
   // Call site inlining and patching implies arguments in registers.
   DCHECK(HasArgsInRegisters() || !HasCallSiteInlineCheck());
-  // ReturnTrueFalse is only implemented for inlined call sites.
-  DCHECK(!ReturnTrueFalseObject() || HasCallSiteInlineCheck());
 
   // Fixed register usage throughout the stub:
   const Register object = a0;  // Object (lhs).
@@ -1470,7 +1410,7 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
 
   // If there is a call site cache don't look in the global cache, but do the
   // real lookup and update the call site cache.
-  if (!HasCallSiteInlineCheck()) {
+  if (!HasCallSiteInlineCheck() && !ReturnTrueFalseObject()) {
     Label miss;
     __ LoadRoot(at, Heap::kInstanceofCacheFunctionRootIndex);
     __ Branch(&miss, ne, function, Operand(at));
@@ -1529,6 +1469,9 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
   if (!HasCallSiteInlineCheck()) {
     __ mov(v0, zero_reg);
     __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
+    if (ReturnTrueFalseObject()) {
+      __ LoadRoot(v0, Heap::kTrueValueRootIndex);
+    }
   } else {
     // Patch the call site to return true.
     __ LoadRoot(v0, Heap::kTrueValueRootIndex);
@@ -1547,6 +1490,9 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
   if (!HasCallSiteInlineCheck()) {
     __ li(v0, Operand(Smi::FromInt(1)));
     __ StoreRoot(v0, Heap::kInstanceofCacheAnswerRootIndex);
+    if (ReturnTrueFalseObject()) {
+      __ LoadRoot(v0, Heap::kFalseValueRootIndex);
+    }
   } else {
     // Patch the call site to return false.
     __ LoadRoot(v0, Heap::kFalseValueRootIndex);
@@ -1572,19 +1518,31 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
   // Null is not instance of anything.
   __ Branch(&object_not_null, ne, object,
             Operand(isolate()->factory()->null_value()));
-  __ li(v0, Operand(Smi::FromInt(1)));
+  if (ReturnTrueFalseObject()) {
+    __ LoadRoot(v0, Heap::kFalseValueRootIndex);
+  } else {
+    __ li(v0, Operand(Smi::FromInt(1)));
+  }
   __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
 
   __ bind(&object_not_null);
   // Smi values are not instances of anything.
   __ JumpIfNotSmi(object, &object_not_null_or_smi);
-  __ li(v0, Operand(Smi::FromInt(1)));
+  if (ReturnTrueFalseObject()) {
+    __ LoadRoot(v0, Heap::kFalseValueRootIndex);
+  } else {
+    __ li(v0, Operand(Smi::FromInt(1)));
+  }
   __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
 
   __ bind(&object_not_null_or_smi);
   // String values are not instances of anything.
   __ IsObjectJSStringType(object, scratch, &slow);
-  __ li(v0, Operand(Smi::FromInt(1)));
+  if (ReturnTrueFalseObject()) {
+    __ LoadRoot(v0, Heap::kFalseValueRootIndex);
+  } else {
+    __ li(v0, Operand(Smi::FromInt(1)));
+  }
   __ DropAndRet(HasArgsInRegisters() ? 0 : 2);
 
   // Slow-case.  Tail call builtin.
@@ -1612,8 +1570,14 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
 void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
   Label miss;
   Register receiver = LoadDescriptor::ReceiverRegister();
-  NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, a3,
-                                                          a4, &miss);
+  // Ensure that the vector and slot registers won't be clobbered before
+  // calling the miss handler.
+  DCHECK(!FLAG_vector_ics ||
+         !AreAliased(a4, a5, VectorLoadICDescriptor::VectorRegister(),
+                     VectorLoadICDescriptor::SlotRegister()));
+
+  NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, a4,
+                                                          a5, &miss);
   __ bind(&miss);
   PropertyAccessCompiler::TailCallBuiltin(
       masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
@@ -2892,6 +2856,10 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
 void CallICStub::Generate(MacroAssembler* masm) {
   // a1 - function
   // a3 - slot id (Smi)
+  const int with_types_offset =
+      FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
+  const int generic_offset =
+      FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
   Label extra_checks_or_miss, slow_start;
   Label slow, non_function, wrap, cont;
   Label have_js_function;
@@ -2930,38 +2898,71 @@ void CallICStub::Generate(MacroAssembler* masm) {
   }
 
   __ bind(&extra_checks_or_miss);
-  Label miss;
+  Label uninitialized, miss;
 
   __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
   __ Branch(&slow_start, eq, a4, Operand(at));
-  __ LoadRoot(at, Heap::kuninitialized_symbolRootIndex);
-  __ Branch(&miss, eq, a4, Operand(at));
-
-  if (!FLAG_trace_ic) {
-    // We are going megamorphic. If the feedback is a JSFunction, it is fine
-    // to handle it here. More complex cases are dealt with in the runtime.
-    __ AssertNotSmi(a4);
-    __ GetObjectType(a4, a5, a5);
-    __ Branch(&miss, ne, a5, Operand(JS_FUNCTION_TYPE));
-    __ dsrl(a4, a3, 32 - kPointerSizeLog2);
-    __ Daddu(a4, a2, Operand(a4));
-    __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
-    __ sd(at, FieldMemOperand(a4, FixedArray::kHeaderSize));
-    // We have to update statistics for runtime profiling.
-    const int with_types_offset =
-    FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
-    __ ld(a4, FieldMemOperand(a2, with_types_offset));
-    __ Dsubu(a4, a4, Operand(Smi::FromInt(1)));
-    __ sd(a4, FieldMemOperand(a2, with_types_offset));
-    const int generic_offset =
-    FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
-    __ ld(a4, FieldMemOperand(a2, generic_offset));
-    __ Daddu(a4, a4, Operand(Smi::FromInt(1)));
-    __ Branch(USE_DELAY_SLOT, &slow_start);
-    __ sd(a4, FieldMemOperand(a2, generic_offset));  // In delay slot.
+
+  // The following cases attempt to handle MISS cases without going to the
+  // runtime.
+  if (FLAG_trace_ic) {
+    __ Branch(&miss);
   }
 
-  // We are here because tracing is on or we are going monomorphic.
+  __ LoadRoot(at, Heap::kuninitialized_symbolRootIndex);
+  __ Branch(&uninitialized, eq, a4, Operand(at));
+
+  // We are going megamorphic. If the feedback is a JSFunction, it is fine
+  // to handle it here. More complex cases are dealt with in the runtime.
+  __ AssertNotSmi(a4);
+  __ GetObjectType(a4, a5, a5);
+  __ Branch(&miss, ne, a5, Operand(JS_FUNCTION_TYPE));
+  __ dsrl(a4, a3, 32 - kPointerSizeLog2);
+  __ Daddu(a4, a2, Operand(a4));
+  __ LoadRoot(at, Heap::kmegamorphic_symbolRootIndex);
+  __ sd(at, FieldMemOperand(a4, FixedArray::kHeaderSize));
+  // We have to update statistics for runtime profiling.
+  __ ld(a4, FieldMemOperand(a2, with_types_offset));
+  __ Dsubu(a4, a4, Operand(Smi::FromInt(1)));
+  __ sd(a4, FieldMemOperand(a2, with_types_offset));
+  __ ld(a4, FieldMemOperand(a2, generic_offset));
+  __ Daddu(a4, a4, Operand(Smi::FromInt(1)));
+  __ Branch(USE_DELAY_SLOT, &slow_start);
+  __ sd(a4, FieldMemOperand(a2, generic_offset));  // In delay slot.
+
+  __ bind(&uninitialized);
+
+  // We are going monomorphic, provided we actually have a JSFunction.
+  __ JumpIfSmi(a1, &miss);
+
+  // Goto miss case if we do not have a function.
+  __ GetObjectType(a1, a4, a4);
+  __ Branch(&miss, ne, a4, Operand(JS_FUNCTION_TYPE));
+
+  // Make sure the function is not the Array() function, which requires special
+  // behavior on MISS.
+  __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, a4);
+  __ Branch(&miss, eq, a1, Operand(a4));
+
+  // Update stats.
+  __ ld(a4, FieldMemOperand(a2, with_types_offset));
+  __ Daddu(a4, a4, Operand(Smi::FromInt(1)));
+  __ sd(a4, FieldMemOperand(a2, with_types_offset));
+
+  // Store the function.
+  __ dsrl(a4, a3, 32 - kPointerSizeLog2);
+  __ Daddu(a4, a2, Operand(a4));
+  __ Daddu(a4, a4, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ sd(a1, MemOperand(a4, 0));
+
+  // Update the write barrier.
+  __ mov(a5, a1);
+  __ RecordWrite(a2, a4, a5, kRAHasNotBeenSaved, kDontSaveFPRegs,
+                 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+  __ Branch(&have_js_function);
+
+  // We are here because tracing is on or we encountered a MISS case we can't
+  // handle here.
   __ bind(&miss);
   GenerateMiss(masm);
 
@@ -3378,20 +3379,43 @@ void SubStringStub::Generate(MacroAssembler* masm) {
 
 void ToNumberStub::Generate(MacroAssembler* masm) {
   // The ToNumber stub takes one argument in a0.
-  Label check_heap_number, call_builtin;
-  __ JumpIfNotSmi(a0, &check_heap_number);
+  Label not_smi;
+  __ JumpIfNotSmi(a0, &not_smi);
   __ Ret(USE_DELAY_SLOT);
   __ mov(v0, a0);
+  __ bind(&not_smi);
 
-  __ bind(&check_heap_number);
+  Label not_heap_number;
   __ ld(a1, FieldMemOperand(a0, HeapObject::kMapOffset));
-  __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
-  __ Branch(&call_builtin, ne, a1, Operand(at));
+  __ lbu(a1, FieldMemOperand(a1, Map::kInstanceTypeOffset));
+  // a0: object
+  // a1: instance type.
+  __ Branch(&not_heap_number, ne, a1, Operand(HEAP_NUMBER_TYPE));
   __ Ret(USE_DELAY_SLOT);
   __ mov(v0, a0);
+  __ bind(&not_heap_number);
+
+  Label not_string, slow_string;
+  __ Branch(&not_string, hs, a1, Operand(FIRST_NONSTRING_TYPE));
+  // Check if string has a cached array index.
+  __ ld(a2, FieldMemOperand(a0, String::kHashFieldOffset));
+  __ And(at, a2, Operand(String::kContainsCachedArrayIndexMask));
+  __ Branch(&slow_string, ne, at, Operand(zero_reg));
+  __ IndexFromHash(a2, a0);
+  __ Ret(USE_DELAY_SLOT);
+  __ mov(v0, a0);
+  __ bind(&slow_string);
+  __ push(a0);  // Push argument.
+  __ TailCallRuntime(Runtime::kStringToNumber, 1, 1);
+  __ bind(&not_string);
+
+  Label not_oddball;
+  __ Branch(&not_oddball, ne, a1, Operand(ODDBALL_TYPE));
+  __ Ret(USE_DELAY_SLOT);
+  __ ld(v0, FieldMemOperand(a0, Oddball::kToNumberOffset));
+  __ bind(&not_oddball);
 
-  __ bind(&call_builtin);
-  __ push(a0);
+  __ push(a0);  // Push argument.
   __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
 }
 
index 6c324bb..edc1bd2 100644 (file)
@@ -73,56 +73,6 @@ class RestoreRegistersStateStub: public PlatformCodeStub {
   DEFINE_PLATFORM_CODE_STUB(RestoreRegistersState, PlatformCodeStub);
 };
 
-// This stub can convert a signed int32 to a heap number (double).  It does
-// not work for int32s that are in Smi range!  No GC occurs during this stub
-// so you don't have to set up the frame.
-class WriteInt32ToHeapNumberStub : public PlatformCodeStub {
- public:
-  WriteInt32ToHeapNumberStub(Isolate* isolate, Register the_int,
-                             Register the_heap_number, Register scratch,
-                             Register scratch2)
-      : PlatformCodeStub(isolate) {
-    minor_key_ = IntRegisterBits::encode(the_int.code()) |
-                 HeapNumberRegisterBits::encode(the_heap_number.code()) |
-                 ScratchRegisterBits::encode(scratch.code()) |
-                 SignRegisterBits::encode(scratch2.code());
-    DCHECK(IntRegisterBits::is_valid(the_int.code()));
-    DCHECK(HeapNumberRegisterBits::is_valid(the_heap_number.code()));
-    DCHECK(ScratchRegisterBits::is_valid(scratch.code()));
-    DCHECK(SignRegisterBits::is_valid(scratch2.code()));
-  }
-
-  static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
-
- private:
-  void Generate(MacroAssembler* masm);
-
-  Register the_int() const {
-    return Register::from_code(IntRegisterBits::decode(minor_key_));
-  }
-
-  Register the_heap_number() const {
-    return Register::from_code(HeapNumberRegisterBits::decode(minor_key_));
-  }
-
-  Register scratch() const {
-    return Register::from_code(ScratchRegisterBits::decode(minor_key_));
-  }
-
-  Register sign() const {
-    return Register::from_code(SignRegisterBits::decode(minor_key_));
-  }
-
-  // Minor key encoding in 16 bits.
-  class IntRegisterBits: public BitField<int, 0, 4> {};
-  class HeapNumberRegisterBits: public BitField<int, 4, 4> {};
-  class ScratchRegisterBits: public BitField<int, 8, 4> {};
-  class SignRegisterBits: public BitField<int, 12, 4> {};
-
-  DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
-  DEFINE_CODE_STUB(WriteInt32ToHeapNumber, PlatformCodeStub);
-};
-
 
 class RecordWriteStub: public PlatformCodeStub {
  public:
@@ -152,7 +102,7 @@ class RecordWriteStub: public PlatformCodeStub {
     INCREMENTAL_COMPACTION
   };
 
-  virtual bool SometimesSetsUpAFrame() { return false; }
+  bool SometimesSetsUpAFrame() OVERRIDE { return false; }
 
   static void PatchBranchIntoNop(MacroAssembler* masm, int pos) {
     const unsigned offset = masm->instr_at(pos) & kImm16Mask;
@@ -279,9 +229,9 @@ class RecordWriteStub: public PlatformCodeStub {
     kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
   };
 
-  virtual inline Major MajorKey() const FINAL OVERRIDE { return RecordWrite; }
+  inline Major MajorKey() const FINAL { return RecordWrite; }
 
-  virtual void Generate(MacroAssembler* masm) OVERRIDE;
+  void Generate(MacroAssembler* masm) OVERRIDE;
   void GenerateIncremental(MacroAssembler* masm, Mode mode);
   void CheckNeedsToInformIncrementalMarker(
       MacroAssembler* masm,
@@ -289,7 +239,7 @@ class RecordWriteStub: public PlatformCodeStub {
       Mode mode);
   void InformIncrementalMarker(MacroAssembler* masm);
 
-  void Activate(Code* code) {
+  void Activate(Code* code) OVERRIDE {
     code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
   }
 
@@ -337,7 +287,7 @@ class DirectCEntryStub: public PlatformCodeStub {
   void GenerateCall(MacroAssembler* masm, Register target);
 
  private:
-  bool NeedsImmovableCode() { return true; }
+  bool NeedsImmovableCode() OVERRIDE { return true; }
 
   DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
   DEFINE_PLATFORM_CODE_STUB(DirectCEntry, PlatformCodeStub);
@@ -369,7 +319,7 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
                                      Register r0,
                                      Register r1);
 
-  virtual bool SometimesSetsUpAFrame() { return false; }
+  bool SometimesSetsUpAFrame() OVERRIDE { return false; }
 
  private:
   static const int kInlinedProbes = 4;
index cffac91..b292506 100644 (file)
@@ -1033,7 +1033,7 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
   // Mov 1 in double_scratch2 as math_exp_constants_array[8] == 1.
   DCHECK(*reinterpret_cast<double*>
          (ExternalReference::math_exp_constants(8).address()) == 1);
-  __ Move(double_scratch2, 1);
+  __ Move(double_scratch2, 1.);
   __ add_d(result, result, double_scratch2);
   __ dsrl(temp1, temp2, 11);
   __ Ext(temp2, temp2, 0, 11);
index dfd6243..dd34c57 100644 (file)
@@ -287,6 +287,7 @@ Instruction::Type Instruction::InstructionType() const {
       switch (FunctionFieldRaw()) {
         case INS:
         case EXT:
+        case DEXT:
           return kRegisterType;
         default:
           return kUnsupported;
index 521869b..d12148a 100644 (file)
@@ -574,52 +574,49 @@ enum SecondaryField {
 // because 'NegateCondition' function flips LSB to negate condition.
 enum Condition {
   // Any value < 0 is considered no_condition.
-  kNoCondition  = -1,
-
-  overflow      =  0,
-  no_overflow   =  1,
-  Uless         =  2,
-  Ugreater_equal=  3,
-  equal         =  4,
-  not_equal     =  5,
-  Uless_equal   =  6,
-  Ugreater      =  7,
-  negative      =  8,
-  positive      =  9,
-  parity_even   = 10,
-  parity_odd    = 11,
-  less          = 12,
+  kNoCondition = -1,
+  overflow = 0,
+  no_overflow = 1,
+  Uless = 2,
+  Ugreater_equal = 3,
+  equal = 4,
+  not_equal = 5,
+  Uless_equal = 6,
+  Ugreater = 7,
+  negative = 8,
+  positive = 9,
+  parity_even = 10,
+  parity_odd = 11,
+  less = 12,
   greater_equal = 13,
-  less_equal    = 14,
-  greater       = 15,
-  ueq           = 16,  // Unordered or Equal.
-  nue           = 17,  // Not (Unordered or Equal).
-
-  cc_always     = 18,
+  less_equal = 14,
+  greater = 15,
+  ueq = 16,  // Unordered or Equal.
+  nue = 17,  // Not (Unordered or Equal).
+  cc_always = 18,
 
   // Aliases.
-  carry         = Uless,
-  not_carry     = Ugreater_equal,
-  zero          = equal,
-  eq            = equal,
-  not_zero      = not_equal,
-  ne            = not_equal,
-  nz            = not_equal,
-  sign          = negative,
-  not_sign      = positive,
-  mi            = negative,
-  pl            = positive,
-  hi            = Ugreater,
-  ls            = Uless_equal,
-  ge            = greater_equal,
-  lt            = less,
-  gt            = greater,
-  le            = less_equal,
-  hs            = Ugreater_equal,
-  lo            = Uless,
-  al            = cc_always,
-
-  cc_default    = kNoCondition
+  carry = Uless,
+  not_carry = Ugreater_equal,
+  zero = equal,
+  eq = equal,
+  not_zero = not_equal,
+  ne = not_equal,
+  nz = not_equal,
+  sign = negative,
+  not_sign = positive,
+  mi = negative,
+  pl = positive,
+  hi = Ugreater,
+  ls = Uless_equal,
+  ge = greater_equal,
+  lt = less,
+  gt = greater,
+  le = less_equal,
+  hs = Ugreater_equal,
+  lo = Uless,
+  al = cc_always,
+  cc_default = kNoCondition
 };
 
 
index 2550b76..d7a7f05 100644 (file)
@@ -19,6 +19,12 @@ int Deoptimizer::patch_size() {
 }
 
 
+void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
+  // Empty because there is no need for relocation information for the code
+  // patching in Deoptimizer::PatchCodeForDeoptimization below.
+}
+
+
 void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
   Address code_start_address = code->instruction_start();
   // Invalidate the relocation information, as it will become invalid by the
@@ -101,9 +107,8 @@ void Deoptimizer::SetPlatformCompiledStubRegisters(
   ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
   intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
   int params = descriptor->GetHandlerParameterCount();
-  output_frame->SetRegister(s0.code(), params);
-  output_frame->SetRegister(s1.code(), (params - 1) * kPointerSize);
-  output_frame->SetRegister(s2.code(), handler);
+  output_frame->SetRegister(a0.code(), params);
+  output_frame->SetRegister(a1.code(), handler);
 }
 
 
index d47950f..3f0642d 100644 (file)
@@ -963,6 +963,10 @@ int Decoder::DecodeTypeRegister(Instruction* instr) {
           Format(instr, "ext     'rt, 'rs, 'sa, 'ss1");
           break;
         }
+        case DEXT: {
+          Format(instr, "dext    'rt, 'rs, 'sa, 'ss1");
+          break;
+        }
         default:
           UNREACHABLE();
       }
index 06c3bb4..9d4ed09 100644 (file)
@@ -201,10 +201,10 @@ void FullCodeGenerator::Generate() {
     Comment cmnt(masm_, "[ Allocate context");
     // Argument to NewContext is the function, which is still in a1.
     bool need_write_barrier = true;
-    if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
+    if (FLAG_harmony_scoping && info->scope()->is_script_scope()) {
       __ push(a1);
       __ Push(info->scope()->GetScopeInfo());
-      __ CallRuntime(Runtime::kNewGlobalContext, 2);
+      __ CallRuntime(Runtime::kNewScriptContext, 2);
     } else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
       FastNewContextStub stub(isolate(), heap_slots);
       __ CallStub(&stub);
@@ -924,7 +924,7 @@ void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
   EmitDebugCheckDeclarationContext(variable);
 
   // Load instance object.
-  __ LoadContext(a1, scope_->ContextChainLength(scope_->GlobalScope()));
+  __ LoadContext(a1, scope_->ContextChainLength(scope_->ScriptScope()));
   __ ld(a1, ContextOperand(a1, variable->interface()->Index()));
   __ ld(a1, ContextOperand(a1, Context::EXTENSION_INDEX));
 
@@ -1097,6 +1097,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
 
   // Get the object to enumerate over. If the object is null or undefined, skip
   // over the loop.  See ECMA-262 version 5, section 12.6.4.
+  SetExpressionPosition(stmt->enumerable());
   VisitForAccumulatorValue(stmt->enumerable());
   __ mov(a0, result_register());  // Result as param to InvokeBuiltin below.
   __ LoadRoot(at, Heap::kUndefinedValueRootIndex);
@@ -1196,6 +1197,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
   // Generate code for doing the condition check.
   PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
   __ bind(&loop);
+  SetExpressionPosition(stmt->each());
+
   // Load the current count to a0, load the length to a1.
   __ ld(a0, MemOperand(sp, 0 * kPointerSize));
   __ ld(a1, MemOperand(sp, 1 * kPointerSize));
@@ -1265,48 +1268,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
 }
 
 
-void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
-  Comment cmnt(masm_, "[ ForOfStatement");
-  SetStatementPosition(stmt);
-
-  Iteration loop_statement(this, stmt);
-  increment_loop_depth();
-
-  // var iterator = iterable[Symbol.iterator]();
-  VisitForEffect(stmt->assign_iterator());
-
-  // Loop entry.
-  __ bind(loop_statement.continue_label());
-
-  // result = iterator.next()
-  VisitForEffect(stmt->next_result());
-
-  // if (result.done) break;
-  Label result_not_done;
-  VisitForControl(stmt->result_done(),
-                  loop_statement.break_label(),
-                  &result_not_done,
-                  &result_not_done);
-  __ bind(&result_not_done);
-
-  // each = result.value
-  VisitForEffect(stmt->assign_each());
-
-  // Generate code for the body of the loop.
-  Visit(stmt->body());
-
-  // Check stack before looping.
-  PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS);
-  EmitBackEdgeBookkeeping(stmt, loop_statement.continue_label());
-  __ jmp(loop_statement.continue_label());
-
-  // Exit and decrement the loop depth.
-  PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
-  __ bind(loop_statement.break_label());
-  decrement_loop_depth();
-}
-
-
 void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
                                        bool pretenure) {
   // Use the fast case closure allocation code that allocates in new
@@ -1364,6 +1325,19 @@ void FullCodeGenerator::EmitLoadHomeObject(SuperReference* expr) {
 }
 
 
+void FullCodeGenerator::EmitSetHomeObjectIfNeeded(Expression* initializer,
+                                                  int offset) {
+  if (NeedsHomeObject(initializer)) {
+    __ ld(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
+    __ li(StoreDescriptor::NameRegister(),
+          Operand(isolate()->factory()->home_object_symbol()));
+    __ ld(StoreDescriptor::ValueRegister(),
+          MemOperand(sp, offset * kPointerSize));
+    CallStoreIC();
+  }
+}
+
+
 void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
                                                       TypeofState typeof_state,
                                                       Label* slow) {
@@ -1722,6 +1696,14 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
             __ ld(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
             CallStoreIC(key->LiteralFeedbackId());
             PrepareForBailoutForId(key->id(), NO_REGISTERS);
+
+            if (NeedsHomeObject(value)) {
+              __ Move(StoreDescriptor::ReceiverRegister(), v0);
+              __ li(StoreDescriptor::NameRegister(),
+                    Operand(isolate()->factory()->home_object_symbol()));
+              __ ld(StoreDescriptor::ValueRegister(), MemOperand(sp));
+              CallStoreIC();
+            }
           } else {
             VisitForEffect(value);
           }
@@ -1733,6 +1715,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
         VisitForStackValue(key);
         VisitForStackValue(value);
         if (property->emit_store()) {
+          EmitSetHomeObjectIfNeeded(value, 2);
           __ li(a0, Operand(Smi::FromInt(SLOPPY)));  // PropertyAttributes.
           __ push(a0);
           __ CallRuntime(Runtime::kSetProperty, 4);
@@ -1769,7 +1752,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
     __ push(a0);
     VisitForStackValue(it->first);
     EmitAccessor(it->second->getter);
+    EmitSetHomeObjectIfNeeded(it->second->getter, 2);
     EmitAccessor(it->second->setter);
+    EmitSetHomeObjectIfNeeded(it->second->setter, 3);
     __ li(a0, Operand(Smi::FromInt(NONE)));
     __ push(a0);
     __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
@@ -2195,14 +2180,6 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
   VisitForAccumulatorValue(value);
   __ pop(a1);
 
-  // Check generator state.
-  Label wrong_state, closed_state, done;
-  __ ld(a3, FieldMemOperand(a1, JSGeneratorObject::kContinuationOffset));
-  STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting < 0);
-  STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed == 0);
-  __ Branch(&closed_state, eq, a3, Operand(zero_reg));
-  __ Branch(&wrong_state, lt, a3, Operand(zero_reg));
-
   // Load suspended function and context.
   __ ld(cp, FieldMemOperand(a1, JSGeneratorObject::kContextOffset));
   __ ld(a4, FieldMemOperand(a1, JSGeneratorObject::kFunctionOffset));
@@ -2227,7 +2204,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
 
   // Enter a new JavaScript frame, and initialize its slots as they were when
   // the generator was suspended.
-  Label resume_frame;
+  Label resume_frame, done;
   __ bind(&push_frame);
   __ Call(&resume_frame);
   __ jmp(&done);
@@ -2276,26 +2253,6 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
   // Not reached: the runtime call returns elsewhere.
   __ stop("not-reached");
 
-  // Reach here when generator is closed.
-  __ bind(&closed_state);
-  if (resume_mode == JSGeneratorObject::NEXT) {
-    // Return completed iterator result when generator is closed.
-    __ LoadRoot(a2, Heap::kUndefinedValueRootIndex);
-    __ push(a2);
-    // Pop value from top-of-stack slot; box result into result register.
-    EmitCreateIteratorResult(true);
-  } else {
-    // Throw the provided value.
-    __ push(a0);
-    __ CallRuntime(Runtime::kThrow, 1);
-  }
-  __ jmp(&done);
-
-  // Throw error if we attempt to operate on a running generator.
-  __ bind(&wrong_state);
-  __ push(a1);
-  __ CallRuntime(Runtime::kThrowGeneratorStateError, 1);
-
   __ bind(&done);
   context()->Plug(result_register());
 }
@@ -2508,6 +2465,7 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
     __ push(scratch);
     VisitForStackValue(key);
     VisitForStackValue(value);
+    EmitSetHomeObjectIfNeeded(value, 2);
 
     switch (property->kind()) {
       case ObjectLiteral::Property::CONSTANT:
@@ -2680,7 +2638,6 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) {
     // Perform the assignment.
     __ bind(&assign);
     EmitStoreToStackLocalOrContextSlot(var, location);
-
   } else if (!var->is_const_mode() || op == Token::INIT_CONST) {
     if (var->IsLookupSlot()) {
       // Assignment to var.
@@ -2705,8 +2662,9 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) {
       }
       EmitStoreToStackLocalOrContextSlot(var, location);
     }
+  } else if (IsSignallingAssignmentToConst(var, op, strict_mode())) {
+    __ CallRuntime(Runtime::kThrowConstAssignError, 0);
   }
-  // Non-initializing assignments to consts are ignored.
 }
 
 
@@ -5086,7 +5044,7 @@ void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
 
 void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
   Scope* declaration_scope = scope()->DeclarationScope();
-  if (declaration_scope->is_global_scope() ||
+  if (declaration_scope->is_script_scope() ||
       declaration_scope->is_module_scope()) {
     // Contexts nested in the native context have a canonical empty function
     // as their closure, not the anonymous closure containing the global
index 88f6b18..a817a28 100644 (file)
@@ -26,9 +26,9 @@ class SafepointGenerator FINAL  : public CallWrapper {
         deopt_mode_(mode) { }
   virtual ~SafepointGenerator() {}
 
-  virtual void BeforeCall(int call_size) const OVERRIDE {}
+  void BeforeCall(int call_size) const OVERRIDE {}
 
-  virtual void AfterCall() const OVERRIDE {
+  void AfterCall() const OVERRIDE {
     codegen_->RecordSafepoint(pointers_, deopt_mode_);
   }
 
@@ -2665,10 +2665,10 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
     DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
                                   LInstanceOfKnownGlobal* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() OVERRIDE {
+    void Generate() OVERRIDE {
       codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
     }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    LInstruction* instr() OVERRIDE { return instr_; }
     Label* map_check() { return &map_check_; }
 
    private:
@@ -2828,6 +2828,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
       __ Daddu(sp, sp, Operand(sp_delta));
     }
   } else {
+    DCHECK(info()->IsStub());  // Functions would need to drop one more value.
     Register reg = ToRegister(instr->parameter_count());
     // The argument count parameter is a smi
     __ SmiUntag(reg);
@@ -2858,13 +2859,17 @@ template <class T>
 void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
   DCHECK(FLAG_vector_ics);
   Register vector_register = ToRegister(instr->temp_vector());
+  Register slot_register = VectorLoadICDescriptor::SlotRegister();
   DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister()));
+  DCHECK(slot_register.is(a0));
+
+  AllowDeferredHandleDereference vector_structure_check;
   Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
   __ li(vector_register, vector);
   // No need to allocate this register.
-  DCHECK(VectorLoadICDescriptor::SlotRegister().is(a0));
-  int index = vector->GetIndex(instr->hydrogen()->slot());
-  __ li(VectorLoadICDescriptor::SlotRegister(), Operand(Smi::FromInt(index)));
+  FeedbackVectorICSlot slot = instr->hydrogen()->slot();
+  int index = vector->GetIndex(slot);
+  __ li(slot_register, Operand(Smi::FromInt(index)));
 }
 
 
@@ -3705,10 +3710,11 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) {
    public:
     DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() OVERRIDE {
+    void Generate() OVERRIDE {
       codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
     }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
    private:
     LMathAbs* instr_;
   };
@@ -3868,7 +3874,7 @@ void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
   // Math.pow(-Infinity, 0.5) == Infinity
   // Math.sqrt(-Infinity) == NaN
   Label done;
-  __ Move(temp, -V8_INFINITY);
+  __ Move(temp, static_cast<double>(-V8_INFINITY));
   __ BranchF(USE_DELAY_SLOT, &done, NULL, eq, temp, input);
   // Set up Infinity in the delay slot.
   // result is overwritten if the branch is not taken.
@@ -3977,44 +3983,73 @@ void LCodeGen::DoTailCallThroughMegamorphicCache(
   DCHECK(receiver.is(a1));
   DCHECK(name.is(a2));
 
-  Register scratch = a3;
-  Register extra = a4;
-  Register extra2 = a5;
-  Register extra3 = a6;
+  Register scratch = a4;
+  Register extra = a5;
+  Register extra2 = a6;
+  Register extra3 = t1;
+#ifdef DEBUG
+  Register slot = FLAG_vector_ics ? ToRegister(instr->slot()) : no_reg;
+  Register vector = FLAG_vector_ics ? ToRegister(instr->vector()) : no_reg;
+  DCHECK(!FLAG_vector_ics ||
+         !AreAliased(slot, vector, scratch, extra, extra2, extra3));
+#endif
 
   // Important for the tail-call.
   bool must_teardown_frame = NeedsEagerFrame();
 
-  // The probe will tail call to a handler if found.
-  isolate()->stub_cache()->GenerateProbe(masm(), instr->hydrogen()->flags(),
-                                         must_teardown_frame, receiver, name,
-                                         scratch, extra, extra2, extra3);
+  if (!instr->hydrogen()->is_just_miss()) {
+    DCHECK(!instr->hydrogen()->is_keyed_load());
+
+    // The probe will tail call to a handler if found.
+    isolate()->stub_cache()->GenerateProbe(
+        masm(), Code::LOAD_IC, instr->hydrogen()->flags(), must_teardown_frame,
+        receiver, name, scratch, extra, extra2, extra3);
+  }
 
   // Tail call to miss if we ended up here.
   if (must_teardown_frame) __ LeaveFrame(StackFrame::INTERNAL);
-  LoadIC::GenerateMiss(masm());
+  if (instr->hydrogen()->is_keyed_load()) {
+    KeyedLoadIC::GenerateMiss(masm());
+  } else {
+    LoadIC::GenerateMiss(masm());
+  }
 }
 
 
 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
   DCHECK(ToRegister(instr->result()).is(v0));
 
-  LPointerMap* pointers = instr->pointer_map();
-  SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+  if (instr->hydrogen()->IsTailCall()) {
+    if (NeedsEagerFrame()) __ LeaveFrame(StackFrame::INTERNAL);
 
-  if (instr->target()->IsConstantOperand()) {
-    LConstantOperand* target = LConstantOperand::cast(instr->target());
-    Handle<Code> code = Handle<Code>::cast(ToHandle(target));
-    generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
-    __ Call(code, RelocInfo::CODE_TARGET);
+    if (instr->target()->IsConstantOperand()) {
+      LConstantOperand* target = LConstantOperand::cast(instr->target());
+      Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+      __ Jump(code, RelocInfo::CODE_TARGET);
+    } else {
+      DCHECK(instr->target()->IsRegister());
+      Register target = ToRegister(instr->target());
+      __ Daddu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
+      __ Jump(target);
+    }
   } else {
-    DCHECK(instr->target()->IsRegister());
-    Register target = ToRegister(instr->target());
-    generator.BeforeCall(__ CallSize(target));
-    __ Daddu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
-    __ Call(target);
+    LPointerMap* pointers = instr->pointer_map();
+    SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+
+    if (instr->target()->IsConstantOperand()) {
+      LConstantOperand* target = LConstantOperand::cast(instr->target());
+      Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+      generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
+      __ Call(code, RelocInfo::CODE_TARGET);
+    } else {
+      DCHECK(instr->target()->IsRegister());
+      Register target = ToRegister(instr->target());
+      generator.BeforeCall(__ CallSize(target));
+      __ Daddu(target, target, Operand(Code::kHeaderSize - kHeapObjectTag));
+      __ Call(target);
+    }
+    generator.AfterCall();
   }
-  generator.AfterCall();
 }
 
 
@@ -4556,10 +4591,9 @@ void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
    public:
     DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() OVERRIDE {
-      codegen()->DoDeferredStringCharCodeAt(instr_);
-    }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    void Generate() OVERRIDE { codegen()->DoDeferredStringCharCodeAt(instr_); }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
    private:
     LStringCharCodeAt* instr_;
   };
@@ -4611,10 +4645,11 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
    public:
     DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() OVERRIDE {
+    void Generate() OVERRIDE {
       codegen()->DoDeferredStringCharFromCode(instr_);
     }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
    private:
     LStringCharFromCode* instr_;
   };
@@ -4689,14 +4724,15 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
    public:
     DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() OVERRIDE {
+    void Generate() OVERRIDE {
       codegen()->DoDeferredNumberTagIU(instr_,
                                        instr_->value(),
                                        instr_->temp1(),
                                        instr_->temp2(),
                                        UNSIGNED_INT32);
     }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
    private:
     LNumberTagU* instr_;
   };
@@ -4779,10 +4815,9 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
    public:
     DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() OVERRIDE {
-      codegen()->DoDeferredNumberTagD(instr_);
-    }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    void Generate() OVERRIDE { codegen()->DoDeferredNumberTagD(instr_); }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
    private:
     LNumberTagD* instr_;
   };
@@ -5005,10 +5040,9 @@ void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
    public:
     DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() OVERRIDE {
-      codegen()->DoDeferredTaggedToI(instr_);
-    }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    void Generate() OVERRIDE { codegen()->DoDeferredTaggedToI(instr_); }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
    private:
     LTaggedToI* instr_;
   };
@@ -5213,11 +5247,12 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
         : LDeferredCode(codegen), instr_(instr), object_(object) {
       SetExit(check_maps());
     }
-    virtual void Generate() OVERRIDE {
+    void Generate() OVERRIDE {
       codegen()->DoDeferredInstanceMigration(instr_, object_);
     }
     Label* check_maps() { return &check_maps_; }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
    private:
     LCheckMaps* instr_;
     Label check_maps_;
@@ -5336,10 +5371,9 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
    public:
     DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() OVERRIDE {
-      codegen()->DoDeferredAllocate(instr_);
-    }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    void Generate() OVERRIDE { codegen()->DoDeferredAllocate(instr_); }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
    private:
     LAllocate* instr_;
   };
@@ -5751,10 +5785,9 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
    public:
     DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() OVERRIDE {
-      codegen()->DoDeferredStackCheck(instr_);
-    }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    void Generate() OVERRIDE { codegen()->DoDeferredStackCheck(instr_); }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
    private:
     LStackCheck* instr_;
   };
@@ -5902,10 +5935,11 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
           object_(object),
           index_(index) {
     }
-    virtual void Generate() OVERRIDE {
+    void Generate() OVERRIDE {
       codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
     }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
    private:
     LLoadFieldByIndex* instr_;
     Register result_;
index 664fdd0..a12764b 100644 (file)
@@ -1103,9 +1103,17 @@ LInstruction* LChunkBuilder::DoTailCallThroughMegamorphicCache(
       UseFixed(instr->receiver(), LoadDescriptor::ReceiverRegister());
   LOperand* name_register =
       UseFixed(instr->name(), LoadDescriptor::NameRegister());
+  LOperand* slot = NULL;
+  LOperand* vector = NULL;
+  if (FLAG_vector_ics) {
+    slot = UseFixed(instr->slot(), VectorLoadICDescriptor::SlotRegister());
+    vector =
+        UseFixed(instr->vector(), VectorLoadICDescriptor::VectorRegister());
+  }
+
   // Not marked as call. It can't deoptimize, and it never returns.
   return new (zone()) LTailCallThroughMegamorphicCache(
-      context, receiver_register, name_register);
+      context, receiver_register, name_register, slot, vector);
 }
 
 
@@ -1402,8 +1410,14 @@ LInstruction* LChunkBuilder::DoFlooringDivI(HMathFloorOfDiv* instr) {
   DCHECK(instr->right()->representation().Equals(instr->representation()));
   LOperand* dividend = UseRegister(instr->left());
   LOperand* divisor = UseRegister(instr->right());
-  LFlooringDivI* div = new(zone()) LFlooringDivI(dividend, divisor);
-  return AssignEnvironment(DefineAsRegister(div));
+  LInstruction* result =
+      DefineAsRegister(new (zone()) LFlooringDivI(dividend, divisor));
+  if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+      instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
+      (instr->CheckFlag(HValue::kCanOverflow))) {
+    result = AssignEnvironment(result);
+  }
+  return result;
 }
 
 
@@ -2058,7 +2072,7 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
   LOperand* global_object =
       UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
   LOperand* vector = NULL;
-  if (FLAG_vector_ics) {
+  if (instr->HasVectorAndSlot()) {
     vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
   }
   LLoadGlobalGeneric* result =
@@ -2117,7 +2131,7 @@ LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
   LOperand* object =
       UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
   LOperand* vector = NULL;
-  if (FLAG_vector_ics) {
+  if (instr->HasVectorAndSlot()) {
     vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
   }
 
@@ -2185,7 +2199,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
       UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
   LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
   LOperand* vector = NULL;
-  if (FLAG_vector_ics) {
+  if (instr->HasVectorAndSlot()) {
     vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
   }
 
index c6257a4..b89a9c4 100644 (file)
@@ -162,17 +162,13 @@ class LCodeGen;
   V(UnknownOSRValue)                         \
   V(WrapReceiver)
 
-#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)                        \
-  virtual Opcode opcode() const FINAL OVERRIDE {                      \
-    return LInstruction::k##type;                                           \
-  }                                                                         \
-  virtual void CompileToNative(LCodeGen* generator) FINAL OVERRIDE;   \
-  virtual const char* Mnemonic() const FINAL OVERRIDE {               \
-    return mnemonic;                                                        \
-  }                                                                         \
-  static L##type* cast(LInstruction* instr) {                               \
-    DCHECK(instr->Is##type());                                              \
-    return reinterpret_cast<L##type*>(instr);                               \
+#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)            \
+  Opcode opcode() const FINAL { return LInstruction::k##type; } \
+  void CompileToNative(LCodeGen* generator) FINAL;              \
+  const char* Mnemonic() const FINAL { return mnemonic; }       \
+  static L##type* cast(LInstruction* instr) {                   \
+    DCHECK(instr->Is##type());                                  \
+    return reinterpret_cast<L##type*>(instr);                   \
   }
 
 
@@ -287,11 +283,9 @@ class LTemplateResultInstruction : public LInstruction {
  public:
   // Allow 0 or 1 output operands.
   STATIC_ASSERT(R == 0 || R == 1);
-  virtual bool HasResult() const FINAL OVERRIDE {
-    return R != 0 && result() != NULL;
-  }
+  bool HasResult() const FINAL { return R != 0 && result() != NULL; }
   void set_result(LOperand* operand) { results_[0] = operand; }
-  LOperand* result() const { return results_[0]; }
+  LOperand* result() const OVERRIDE { return results_[0]; }
 
  protected:
   EmbeddedContainer<LOperand*, R> results_;
@@ -309,11 +303,11 @@ class LTemplateInstruction : public LTemplateResultInstruction<R> {
 
  private:
   // Iterator support.
-  virtual int InputCount() FINAL OVERRIDE { return I; }
-  virtual LOperand* InputAt(int i) FINAL OVERRIDE { return inputs_[i]; }
+  int InputCount() FINAL { return I; }
+  LOperand* InputAt(int i) FINAL { return inputs_[i]; }
 
-  virtual int TempCount() FINAL OVERRIDE { return T; }
-  virtual LOperand* TempAt(int i) FINAL OVERRIDE { return temps_[i]; }
+  int TempCount() FINAL { return T; }
+  LOperand* TempAt(int i) FINAL { return temps_[i]; }
 };
 
 
@@ -328,8 +322,8 @@ class LGap : public LTemplateInstruction<0, 0, 0> {
   }
 
   // Can't use the DECLARE-macro here because of sub-classes.
-  virtual bool IsGap() const FINAL OVERRIDE { return true; }
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  bool IsGap() const FINAL { return true; }
+  void PrintDataTo(StringStream* stream) OVERRIDE;
   static LGap* cast(LInstruction* instr) {
     DCHECK(instr->IsGap());
     return reinterpret_cast<LGap*>(instr);
@@ -369,7 +363,7 @@ class LInstructionGap FINAL : public LGap {
  public:
   explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
 
-  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
+  bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
     return !IsRedundant();
   }
 
@@ -381,10 +375,10 @@ class LGoto FINAL : public LTemplateInstruction<0, 0, 0> {
  public:
   explicit LGoto(HBasicBlock* block) : block_(block) { }
 
-  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE;
+  bool HasInterestingComment(LCodeGen* gen) const OVERRIDE;
   DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
-  virtual bool IsControl() const OVERRIDE { return true; }
+  void PrintDataTo(StringStream* stream) OVERRIDE;
+  bool IsControl() const OVERRIDE { return true; }
 
   int block_id() const { return block_->block_id(); }
 
@@ -427,7 +421,7 @@ class LDummyUse FINAL : public LTemplateInstruction<1, 1, 0> {
 
 class LDeoptimize FINAL : public LTemplateInstruction<0, 0, 0> {
  public:
-  virtual bool IsControl() const OVERRIDE { return true; }
+  bool IsControl() const OVERRIDE { return true; }
   DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
   DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
 };
@@ -438,12 +432,10 @@ class LLabel FINAL : public LGap {
   explicit LLabel(HBasicBlock* block)
       : LGap(block), replacement_(NULL) { }
 
-  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
-    return false;
-  }
+  bool HasInterestingComment(LCodeGen* gen) const OVERRIDE { return false; }
   DECLARE_CONCRETE_INSTRUCTION(Label, "label")
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int block_id() const { return block()->block_id(); }
   bool is_loop_header() const { return block()->IsLoopHeader(); }
@@ -461,9 +453,7 @@ class LLabel FINAL : public LGap {
 
 class LParameter FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
-  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
-    return false;
-  }
+  bool HasInterestingComment(LCodeGen* gen) const OVERRIDE { return false; }
   DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
 };
 
@@ -482,19 +472,23 @@ class LCallStub FINAL : public LTemplateInstruction<1, 1, 0> {
 
 
 class LTailCallThroughMegamorphicCache FINAL
-    : public LTemplateInstruction<0, 3, 0> {
+    : public LTemplateInstruction<0, 5, 0> {
  public:
-  explicit LTailCallThroughMegamorphicCache(LOperand* context,
-                                            LOperand* receiver,
-                                            LOperand* name) {
+  LTailCallThroughMegamorphicCache(LOperand* context, LOperand* receiver,
+                                   LOperand* name, LOperand* slot,
+                                   LOperand* vector) {
     inputs_[0] = context;
     inputs_[1] = receiver;
     inputs_[2] = name;
+    inputs_[3] = slot;
+    inputs_[4] = vector;
   }
 
   LOperand* context() { return inputs_[0]; }
   LOperand* receiver() { return inputs_[1]; }
   LOperand* name() { return inputs_[2]; }
+  LOperand* slot() { return inputs_[3]; }
+  LOperand* vector() { return inputs_[4]; }
 
   DECLARE_CONCRETE_INSTRUCTION(TailCallThroughMegamorphicCache,
                                "tail-call-through-megamorphic-cache")
@@ -504,9 +498,7 @@ class LTailCallThroughMegamorphicCache FINAL
 
 class LUnknownOSRValue FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
-  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
-    return false;
-  }
+  bool HasInterestingComment(LCodeGen* gen) const OVERRIDE { return false; }
   DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
 };
 
@@ -516,7 +508,7 @@ class LControlInstruction : public LTemplateInstruction<0, I, T> {
  public:
   LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
 
-  virtual bool IsControl() const FINAL OVERRIDE { return true; }
+  bool IsControl() const FINAL { return true; }
 
   int SuccessorCount() { return hydrogen()->SuccessorCount(); }
   HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
@@ -605,7 +597,7 @@ class LAccessArgumentsAt FINAL : public LTemplateInstruction<1, 3, 0> {
   LOperand* length() { return inputs_[1]; }
   LOperand* index() { return inputs_[2]; }
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -845,7 +837,7 @@ class LCompareNumericAndBranch FINAL : public LControlInstruction<2, 0> {
     return hydrogen()->representation().IsDouble();
   }
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1033,7 +1025,7 @@ class LIsObjectAndBranch FINAL : public LControlInstruction<1, 1> {
   DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream);
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1050,7 +1042,7 @@ class LIsStringAndBranch FINAL : public LControlInstruction<1, 1> {
   DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1065,7 +1057,7 @@ class LIsSmiAndBranch FINAL : public LControlInstruction<1, 0> {
   DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1083,7 +1075,7 @@ class LIsUndetectableAndBranch FINAL : public LControlInstruction<1, 1> {
                                "is-undetectable-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1105,7 +1097,7 @@ class LStringCompareAndBranch FINAL : public LControlInstruction<3, 0> {
 
   Token::Value op() const { return hydrogen()->token(); }
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1121,7 +1113,7 @@ class LHasInstanceTypeAndBranch FINAL : public LControlInstruction<1, 0> {
                                "has-instance-type-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1151,7 +1143,7 @@ class LHasCachedArrayIndexAndBranch FINAL
                                "has-cached-array-index-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1169,7 +1161,7 @@ class LClassOfTestAndBranch FINAL : public LControlInstruction<1, 1> {
                                "class-of-test-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1366,7 +1358,7 @@ class LBranch FINAL : public LControlInstruction<1, 0> {
   DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
   DECLARE_HYDROGEN_ACCESSOR(Branch)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1511,11 +1503,9 @@ class LArithmeticD FINAL : public LTemplateInstruction<1, 2, 0> {
   LOperand* left() { return inputs_[0]; }
   LOperand* right() { return inputs_[1]; }
 
-  virtual Opcode opcode() const OVERRIDE {
-    return LInstruction::kArithmeticD;
-  }
-  virtual void CompileToNative(LCodeGen* generator) OVERRIDE;
-  virtual const char* Mnemonic() const OVERRIDE;
+  Opcode opcode() const OVERRIDE { return LInstruction::kArithmeticD; }
+  void CompileToNative(LCodeGen* generator) OVERRIDE;
+  const char* Mnemonic() const OVERRIDE;
 
  private:
   Token::Value op_;
@@ -1539,9 +1529,9 @@ class LArithmeticT FINAL : public LTemplateInstruction<1, 3, 0> {
   LOperand* right() { return inputs_[2]; }
   Token::Value op() const { return op_; }
 
-  virtual Opcode opcode() const FINAL  { return LInstruction::kArithmeticT; }
-  virtual void CompileToNative(LCodeGen* generator) OVERRIDE;
-  virtual const char* Mnemonic() const OVERRIDE;
+  Opcode opcode() const FINAL { return LInstruction::kArithmeticT; }
+  void CompileToNative(LCodeGen* generator) OVERRIDE;
+  const char* Mnemonic() const OVERRIDE;
 
  private:
   Token::Value op_;
@@ -1650,7 +1640,7 @@ class LLoadKeyed FINAL : public LTemplateInstruction<1, 2, 0> {
   DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
   DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
 
-  virtual void PrintDataTo(StringStream* stream);
+  void PrintDataTo(StringStream* stream) OVERRIDE;
   uint32_t base_offset() const { return hydrogen()->base_offset(); }
 };
 
@@ -1731,7 +1721,7 @@ class LLoadContextSlot FINAL : public LTemplateInstruction<1, 1, 0> {
 
   int slot_index() { return hydrogen()->slot_index(); }
 
-  virtual void PrintDataTo(StringStream* stream);
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1750,7 +1740,7 @@ class LStoreContextSlot FINAL : public LTemplateInstruction<0, 2, 0> {
 
   int slot_index() { return hydrogen()->slot_index(); }
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1789,7 +1779,7 @@ class LStoreCodeEntry FINAL: public LTemplateInstruction<0, 2, 0> {
   LOperand* function() { return inputs_[0]; }
   LOperand* code_object() { return inputs_[1]; }
 
-  virtual void PrintDataTo(StringStream* stream);
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry")
   DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry)
@@ -1806,7 +1796,7 @@ class LInnerAllocatedObject FINAL: public LTemplateInstruction<1, 2, 0> {
   LOperand* base_object() const { return inputs_[0]; }
   LOperand* offset() const { return inputs_[1]; }
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object")
 };
@@ -1850,7 +1840,7 @@ class LCallJSFunction FINAL : public LTemplateInstruction<1, 1, 0> {
   DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
   DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int arity() const { return hydrogen()->argument_count() - 1; }
 };
@@ -1870,11 +1860,12 @@ class LCallWithDescriptor FINAL : public LTemplateResultInstruction<1> {
 
   const CallInterfaceDescriptor descriptor() { return descriptor_; }
 
+  DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
+
  private:
   DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
-  DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int arity() const { return hydrogen()->argument_count() - 1; }
 
@@ -1882,11 +1873,11 @@ class LCallWithDescriptor FINAL : public LTemplateResultInstruction<1> {
   ZoneList<LOperand*> inputs_;
 
   // Iterator support.
-  virtual int InputCount() FINAL OVERRIDE { return inputs_.length(); }
-  virtual LOperand* InputAt(int i) FINAL OVERRIDE { return inputs_[i]; }
+  int InputCount() FINAL { return inputs_.length(); }
+  LOperand* InputAt(int i) FINAL { return inputs_[i]; }
 
-  virtual int TempCount() FINAL OVERRIDE { return 0; }
-  virtual LOperand* TempAt(int i) FINAL OVERRIDE { return NULL; }
+  int TempCount() FINAL { return 0; }
+  LOperand* TempAt(int i) FINAL { return NULL; }
 };
 
 
@@ -1904,7 +1895,7 @@ class LInvokeFunction FINAL : public LTemplateInstruction<1, 2, 0> {
   DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
   DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int arity() const { return hydrogen()->argument_count() - 1; }
 };
@@ -1940,7 +1931,7 @@ class LCallNew FINAL : public LTemplateInstruction<1, 2, 0> {
   DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
   DECLARE_HYDROGEN_ACCESSOR(CallNew)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int arity() const { return hydrogen()->argument_count() - 1; }
 };
@@ -1959,7 +1950,7 @@ class LCallNewArray FINAL : public LTemplateInstruction<1, 2, 0> {
   DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
   DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int arity() const { return hydrogen()->argument_count() - 1; }
 };
@@ -1976,7 +1967,7 @@ class LCallRuntime FINAL : public LTemplateInstruction<1, 1, 0> {
   DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
   DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
 
-  virtual bool ClobbersDoubleRegisters(Isolate* isolate) const OVERRIDE {
+  bool ClobbersDoubleRegisters(Isolate* isolate) const OVERRIDE {
     return save_doubles() == kDontSaveFPRegs;
   }
 
@@ -2154,7 +2145,7 @@ class LStoreNamedField FINAL : public LTemplateInstruction<0, 2, 1> {
   DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
   DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   Representation representation() const {
     return hydrogen()->field_representation();
@@ -2177,7 +2168,7 @@ class LStoreNamedGeneric FINAL : public LTemplateInstruction<0, 3, 0> {
   DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
   DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   Handle<Object> name() const { return hydrogen()->name(); }
   StrictMode strict_mode() { return hydrogen()->strict_mode(); }
@@ -2209,7 +2200,7 @@ class LStoreKeyed FINAL : public LTemplateInstruction<0, 3, 0> {
   DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
   DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
   bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
   uint32_t base_offset() const { return hydrogen()->base_offset(); }
 };
@@ -2235,7 +2226,7 @@ class LStoreKeyedGeneric FINAL : public LTemplateInstruction<0, 4, 0> {
   DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
   DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   StrictMode strict_mode() { return hydrogen()->strict_mode(); }
 };
@@ -2259,7 +2250,7 @@ class LTransitionElementsKind FINAL : public LTemplateInstruction<0, 2, 1> {
                                "transition-elements-kind")
   DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
   Handle<Map> transitioned_map() {
@@ -2555,7 +2546,7 @@ class LTypeofIsAndBranch FINAL : public LControlInstruction<1, 0> {
 
   Handle<String> type_literal() { return hydrogen()->type_literal(); }
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -2576,9 +2567,7 @@ class LOsrEntry FINAL : public LTemplateInstruction<0, 0, 0> {
  public:
   LOsrEntry() {}
 
-  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
-    return false;
-  }
+  bool HasInterestingComment(LCodeGen* gen) const OVERRIDE { return false; }
   DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
 };
 
@@ -2781,7 +2770,7 @@ class LChunkBuilder FINAL : public LChunkBuilderBase {
 
   // An input operand in register, stack slot or a constant operand.
   // Will not be moved to a register even if one is freely available.
-  virtual MUST_USE_RESULT LOperand* UseAny(HValue* value) OVERRIDE;
+  MUST_USE_RESULT LOperand* UseAny(HValue* value) OVERRIDE;
 
   // Temporary operand that must be in a register.
   MUST_USE_RESULT LUnallocated* TempRegister();
index 466906a..2de1c2a 100644 (file)
@@ -22,7 +22,8 @@ namespace internal {
 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
     : Assembler(arg_isolate, buffer, size),
       generating_stub_(false),
-      has_frame_(false) {
+      has_frame_(false),
+      has_double_zero_reg_set_(false) {
   if (isolate() != NULL) {
     code_object_ = Handle<Object>(isolate()->heap()->undefined_value(),
                                   isolate());
@@ -595,11 +596,12 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
   }
 
   bind(&done);
-  // Check that the value is a normal property.
+  // Check that the value is a field property.
   // reg2: elements + (index * kPointerSize).
   const int kDetailsOffset =
       SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
   ld(reg1, FieldMemOperand(reg2, kDetailsOffset));
+  DCHECK_EQ(FIELD, 0);
   And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
   Branch(miss, ne, at, Operand(zero_reg));
 
@@ -711,6 +713,28 @@ void MacroAssembler::Mulh(Register rd, Register rs, const Operand& rt) {
 }
 
 
+void MacroAssembler::Mulhu(Register rd, Register rs, const Operand& rt) {
+  if (rt.is_reg()) {
+    if (kArchVariant != kMips64r6) {
+      multu(rs, rt.rm());
+      mfhi(rd);
+    } else {
+      muhu(rd, rs, rt.rm());
+    }
+  } else {
+    // li handles the relocation.
+    DCHECK(!rs.is(at));
+    li(at, rt);
+    if (kArchVariant != kMips64r6) {
+      multu(rs, at);
+      mfhi(rd);
+    } else {
+      muhu(rd, rs, at);
+    }
+  }
+}
+
+
 void MacroAssembler::Dmul(Register rd, Register rs, const Operand& rt) {
   if (rt.is_reg()) {
     if (kArchVariant == kMips64r6) {
@@ -815,6 +839,72 @@ void MacroAssembler::Div(Register rs, const Operand& rt) {
 }
 
 
+void MacroAssembler::Div(Register res, Register rs, const Operand& rt) {
+  if (rt.is_reg()) {
+    if (kArchVariant != kMips64r6) {
+      div(rs, rt.rm());
+      mflo(res);
+    } else {
+      div(res, rs, rt.rm());
+    }
+  } else {
+    // li handles the relocation.
+    DCHECK(!rs.is(at));
+    li(at, rt);
+    if (kArchVariant != kMips64r6) {
+      div(rs, at);
+      mflo(res);
+    } else {
+      div(res, rs, at);
+    }
+  }
+}
+
+
+void MacroAssembler::Mod(Register rd, Register rs, const Operand& rt) {
+  if (rt.is_reg()) {
+    if (kArchVariant != kMips64r6) {
+      div(rs, rt.rm());
+      mfhi(rd);
+    } else {
+      mod(rd, rs, rt.rm());
+    }
+  } else {
+    // li handles the relocation.
+    DCHECK(!rs.is(at));
+    li(at, rt);
+    if (kArchVariant != kMips64r6) {
+      div(rs, at);
+      mfhi(rd);
+    } else {
+      mod(rd, rs, at);
+    }
+  }
+}
+
+
+void MacroAssembler::Modu(Register rd, Register rs, const Operand& rt) {
+  if (rt.is_reg()) {
+    if (kArchVariant != kMips64r6) {
+      divu(rs, rt.rm());
+      mfhi(rd);
+    } else {
+      modu(rd, rs, rt.rm());
+    }
+  } else {
+    // li handles the relocation.
+    DCHECK(!rs.is(at));
+    li(at, rt);
+    if (kArchVariant != kMips64r6) {
+      divu(rs, at);
+      mfhi(rd);
+    } else {
+      modu(rd, rs, at);
+    }
+  }
+}
+
+
 void MacroAssembler::Ddiv(Register rs, const Operand& rt) {
   if (rt.is_reg()) {
     ddiv(rs, rt.rm());
@@ -864,6 +954,28 @@ void MacroAssembler::Divu(Register rs, const Operand& rt) {
 }
 
 
+void MacroAssembler::Divu(Register res, Register rs, const Operand& rt) {
+  if (rt.is_reg()) {
+    if (kArchVariant != kMips64r6) {
+      divu(rs, rt.rm());
+      mflo(res);
+    } else {
+      divu(res, rs, rt.rm());
+    }
+  } else {
+    // li handles the relocation.
+    DCHECK(!rs.is(at));
+    li(at, rt);
+    if (kArchVariant != kMips64r6) {
+      divu(rs, at);
+      mflo(res);
+    } else {
+      divu(res, rs, at);
+    }
+  }
+}
+
+
 void MacroAssembler::Ddivu(Register rs, const Operand& rt) {
   if (rt.is_reg()) {
     ddivu(rs, rt.rm());
@@ -876,6 +988,28 @@ void MacroAssembler::Ddivu(Register rs, const Operand& rt) {
 }
 
 
+void MacroAssembler::Ddivu(Register res, Register rs, const Operand& rt) {
+  if (rt.is_reg()) {
+    if (kArchVariant != kMips64r6) {
+      ddivu(rs, rt.rm());
+      mflo(res);
+    } else {
+      ddivu(res, rs, rt.rm());
+    }
+  } else {
+    // li handles the relocation.
+    DCHECK(!rs.is(at));
+    li(at, rt);
+    if (kArchVariant != kMips64r6) {
+      ddivu(rs, at);
+      mflo(res);
+    } else {
+      ddivu(res, rs, at);
+    }
+  }
+}
+
+
 void MacroAssembler::Dmod(Register rd, Register rs, const Operand& rt) {
   if (kArchVariant != kMips64r6) {
     if (rt.is_reg()) {
@@ -901,6 +1035,31 @@ void MacroAssembler::Dmod(Register rd, Register rs, const Operand& rt) {
 }
 
 
+void MacroAssembler::Dmodu(Register rd, Register rs, const Operand& rt) {
+  if (kArchVariant != kMips64r6) {
+    if (rt.is_reg()) {
+      ddivu(rs, rt.rm());
+      mfhi(rd);
+    } else {
+      // li handles the relocation.
+      DCHECK(!rs.is(at));
+      li(at, rt);
+      ddivu(rs, at);
+      mfhi(rd);
+    }
+  } else {
+    if (rt.is_reg()) {
+      dmodu(rd, rs, rt.rm());
+    } else {
+      // li handles the relocation.
+      DCHECK(!rs.is(at));
+      li(at, rt);
+      dmodu(rd, rs, at);
+    }
+  }
+}
+
+
 void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
   if (rt.is_reg()) {
     and_(rd, rs, rt.rm());
@@ -1003,27 +1162,10 @@ void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
 
 
 void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
-  if (kArchVariant == kMips64r2) {
-    if (rt.is_reg()) {
-      rotrv(rd, rs, rt.rm());
-    } else {
-      rotr(rd, rs, rt.imm64_);
-    }
+  if (rt.is_reg()) {
+    rotrv(rd, rs, rt.rm());
   } else {
-    if (rt.is_reg()) {
-      subu(at, zero_reg, rt.rm());
-      sllv(at, rs, at);
-      srlv(rd, rs, rt.rm());
-      or_(rd, rd, at);
-    } else {
-      if (rt.imm64_ == 0) {
-        srl(rd, rs, 0);
-      } else {
-        srl(at, rs, rt.imm64_);
-        sll(rd, rs, (0x20 - rt.imm64_) & 0x1f);
-        or_(rd, rd, at);
-      }
-    }
+    rotr(rd, rs, rt.imm64_);
   }
 }
 
@@ -1281,6 +1423,14 @@ void MacroAssembler::Ext(Register rt,
 }
 
 
+void MacroAssembler::Dext(Register rt, Register rs, uint16_t pos,
+                          uint16_t size) {
+  DCHECK(pos < 32);
+  DCHECK(pos + size < 33);
+  dext_(rt, rs, pos, size);
+}
+
+
 void MacroAssembler::Ins(Register rt,
                          Register rs,
                          uint16_t pos,
@@ -1567,15 +1717,20 @@ void MacroAssembler::BranchF(Label* target,
 }
 
 
+void MacroAssembler::Move(FPURegister dst, float imm) {
+  li(at, Operand(bit_cast<int32_t>(imm)));
+  mtc1(at, dst);
+}
+
+
 void MacroAssembler::Move(FPURegister dst, double imm) {
   static const DoubleRepresentation minus_zero(-0.0);
   static const DoubleRepresentation zero(0.0);
   DoubleRepresentation value_rep(imm);
   // Handle special values first.
-  bool force_load = dst.is(kDoubleRegZero);
-  if (value_rep == zero && !force_load) {
+  if (value_rep == zero && has_double_zero_reg_set_) {
     mov_d(dst, kDoubleRegZero);
-  } else if (value_rep == minus_zero && !force_load) {
+  } else if (value_rep == minus_zero && has_double_zero_reg_set_) {
     neg_d(dst, kDoubleRegZero);
   } else {
     uint32_t lo, hi;
@@ -1596,6 +1751,7 @@ void MacroAssembler::Move(FPURegister dst, double imm) {
     } else {
       mthc1(zero_reg, dst);
     }
+    if (dst.is(kDoubleRegZero)) has_double_zero_reg_set_ = true;
   }
 }
 
@@ -2000,18 +2156,26 @@ void MacroAssembler::BranchShort(int16_t offset, Condition cond, Register rs,
         b(offset);
         break;
       case eq:
-        // We don't want any other register but scratch clobbered.
-        DCHECK(!scratch.is(rs));
-        r2 = scratch;
-        li(r2, rt);
-        beq(rs, r2, offset);
+        if (rt.imm64_ == 0) {
+          beq(rs, zero_reg, offset);
+        } else {
+          // We don't want any other register but scratch clobbered.
+          DCHECK(!scratch.is(rs));
+          r2 = scratch;
+          li(r2, rt);
+          beq(rs, r2, offset);
+        }
         break;
       case ne:
-        // We don't want any other register but scratch clobbered.
-        DCHECK(!scratch.is(rs));
-        r2 = scratch;
-        li(r2, rt);
-        bne(rs, r2, offset);
+        if (rt.imm64_ == 0) {
+          bne(rs, zero_reg, offset);
+        } else {
+          // We don't want any other register but scratch clobbered.
+          DCHECK(!scratch.is(rs));
+          r2 = scratch;
+          li(r2, rt);
+          bne(rs, r2, offset);
+        }
         break;
       // Signed comparison.
       case greater:
@@ -2253,18 +2417,28 @@ void MacroAssembler::BranchShort(Label* L, Condition cond, Register rs,
         b(offset);
         break;
       case eq:
-        DCHECK(!scratch.is(rs));
-        r2 = scratch;
-        li(r2, rt);
-        offset = shifted_branch_offset(L, false);
-        beq(rs, r2, offset);
+        if (rt.imm64_ == 0) {
+          offset = shifted_branch_offset(L, false);
+          beq(rs, zero_reg, offset);
+        } else {
+          DCHECK(!scratch.is(rs));
+          r2 = scratch;
+          li(r2, rt);
+          offset = shifted_branch_offset(L, false);
+          beq(rs, r2, offset);
+        }
         break;
       case ne:
-        DCHECK(!scratch.is(rs));
-        r2 = scratch;
-        li(r2, rt);
-        offset = shifted_branch_offset(L, false);
-        bne(rs, r2, offset);
+        if (rt.imm64_ == 0) {
+          offset = shifted_branch_offset(L, false);
+          bne(rs, zero_reg, offset);
+        } else {
+          DCHECK(!scratch.is(rs));
+          r2 = scratch;
+          li(r2, rt);
+          offset = shifted_branch_offset(L, false);
+          bne(rs, r2, offset);
+        }
         break;
       // Signed comparison.
       case greater:
@@ -3791,17 +3965,17 @@ void MacroAssembler::CheckMap(Register obj,
 }
 
 
-void MacroAssembler::DispatchMap(Register obj,
-                                 Register scratch,
-                                 Handle<Map> map,
-                                 Handle<Code> success,
-                                 SmiCheckType smi_check_type) {
+void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
+                                     Register scratch2, Handle<WeakCell> cell,
+                                     Handle<Code> success,
+                                     SmiCheckType smi_check_type) {
   Label fail;
   if (smi_check_type == DO_SMI_CHECK) {
     JumpIfSmi(obj, &fail);
   }
-  ld(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
-  Jump(success, RelocInfo::CODE_TARGET, eq, scratch, Operand(map));
+  ld(scratch1, FieldMemOperand(obj, HeapObject::kMapOffset));
+  GetWeakValue(scratch2, cell);
+  Jump(success, RelocInfo::CODE_TARGET, eq, scratch1, Operand(scratch2));
   bind(&fail);
 }
 
@@ -3820,6 +3994,19 @@ void MacroAssembler::CheckMap(Register obj,
 }
 
 
+void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
+  li(value, Operand(cell));
+  ld(value, FieldMemOperand(value, WeakCell::kValueOffset));
+}
+
+
+void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
+                                   Label* miss) {
+  GetWeakValue(value, cell);
+  JumpIfSmi(value, miss);
+}
+
+
 void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) {
   if (IsMipsSoftFloatABI) {
     Move(dst, v0, v1);
@@ -4367,6 +4554,33 @@ void MacroAssembler::SmiToDoubleFPURegister(Register smi,
 }
 
 
+void MacroAssembler::AdduAndCheckForOverflow(Register dst, Register left,
+                                             const Operand& right,
+                                             Register overflow_dst,
+                                             Register scratch) {
+  if (right.is_reg()) {
+    AdduAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch);
+  } else {
+    if (dst.is(left)) {
+      mov(scratch, left);                    // Preserve left.
+      daddiu(dst, left, right.immediate());  // Left is overwritten.
+      xor_(scratch, dst, scratch);           // Original left.
+      // Load right since xori takes uint16 as immediate.
+      daddiu(t9, zero_reg, right.immediate());
+      xor_(overflow_dst, dst, t9);
+      and_(overflow_dst, overflow_dst, scratch);
+    } else {
+      daddiu(dst, left, right.immediate());
+      xor_(overflow_dst, dst, left);
+      // Load right since xori takes uint16 as immediate.
+      daddiu(t9, zero_reg, right.immediate());
+      xor_(scratch, dst, t9);
+      and_(overflow_dst, scratch, overflow_dst);
+    }
+  }
+}
+
+
 void MacroAssembler::AdduAndCheckForOverflow(Register dst,
                                              Register left,
                                              Register right,
@@ -4409,6 +4623,33 @@ void MacroAssembler::AdduAndCheckForOverflow(Register dst,
 }
 
 
+void MacroAssembler::SubuAndCheckForOverflow(Register dst, Register left,
+                                             const Operand& right,
+                                             Register overflow_dst,
+                                             Register scratch) {
+  if (right.is_reg()) {
+    SubuAndCheckForOverflow(dst, left, right.rm(), overflow_dst, scratch);
+  } else {
+    if (dst.is(left)) {
+      mov(scratch, left);                       // Preserve left.
+      daddiu(dst, left, -(right.immediate()));  // Left is overwritten.
+      xor_(overflow_dst, dst, scratch);         // scratch is original left.
+      // Load right since xori takes uint16 as immediate.
+      daddiu(t9, zero_reg, right.immediate());
+      xor_(scratch, scratch, t9);  // scratch is original left.
+      and_(overflow_dst, scratch, overflow_dst);
+    } else {
+      daddiu(dst, left, -(right.immediate()));
+      xor_(overflow_dst, dst, left);
+      // Load right since xori takes uint16 as immediate.
+      daddiu(t9, zero_reg, right.immediate());
+      xor_(scratch, left, t9);
+      and_(overflow_dst, scratch, overflow_dst);
+    }
+  }
+}
+
+
 void MacroAssembler::SubuAndCheckForOverflow(Register dst,
                                              Register left,
                                              Register right,
@@ -5609,18 +5850,6 @@ void MacroAssembler::CheckPageFlag(
 }
 
 
-void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
-                                        Register scratch,
-                                        Label* if_deprecated) {
-  if (map->CanBeDeprecated()) {
-    li(scratch, Operand(map));
-    ld(scratch, FieldMemOperand(scratch, Map::kBitField3Offset));
-    And(scratch, scratch, Operand(Map::Deprecated::kMask));
-    Branch(if_deprecated, ne, scratch, Operand(zero_reg));
-  }
-}
-
-
 void MacroAssembler::JumpIfBlack(Register object,
                                  Register scratch0,
                                  Register scratch1,
index 4ef3e53..a9e8772 100644 (file)
@@ -271,8 +271,10 @@ class MacroAssembler: public Assembler {
     mthc1(src_high, dst);
   }
 
-  // Conditional move.
+  void Move(FPURegister dst, float imm);
   void Move(FPURegister dst, double imm);
+
+  // Conditional move.
   void Movz(Register rd, Register rs, Register rt);
   void Movn(Register rd, Register rs, Register rt);
   void Movt(Register rd, Register rs, uint16_t cc = 0);
@@ -334,10 +336,6 @@ class MacroAssembler: public Assembler {
                      Condition cc,
                      Label* condition_met);
 
-  void CheckMapDeprecated(Handle<Map> map,
-                          Register scratch,
-                          Label* if_deprecated);
-
   // Check if object is in new space.  Jumps if the object is not in new space.
   // The register scratch can be object itself, but it will be clobbered.
   void JumpIfNotInNewSpace(Register object,
@@ -599,12 +597,19 @@ class MacroAssembler: public Assembler {
 
   DEFINE_INSTRUCTION(Addu);
   DEFINE_INSTRUCTION(Daddu);
+  DEFINE_INSTRUCTION(Div);
+  DEFINE_INSTRUCTION(Divu);
+  DEFINE_INSTRUCTION(Ddivu);
+  DEFINE_INSTRUCTION(Mod);
+  DEFINE_INSTRUCTION(Modu);
   DEFINE_INSTRUCTION(Ddiv);
   DEFINE_INSTRUCTION(Subu);
   DEFINE_INSTRUCTION(Dsubu);
   DEFINE_INSTRUCTION(Dmod);
+  DEFINE_INSTRUCTION(Dmodu);
   DEFINE_INSTRUCTION(Mul);
   DEFINE_INSTRUCTION(Mulh);
+  DEFINE_INSTRUCTION(Mulhu);
   DEFINE_INSTRUCTION(Dmul);
   DEFINE_INSTRUCTION(Dmulh);
   DEFINE_INSTRUCTION2(Mult);
@@ -758,6 +763,7 @@ class MacroAssembler: public Assembler {
   // MIPS64 R2 instruction macro.
   void Ins(Register rt, Register rs, uint16_t pos, uint16_t size);
   void Ext(Register rt, Register rs, uint16_t pos, uint16_t size);
+  void Dext(Register rt, Register rs, uint16_t pos, uint16_t size);
 
   // ---------------------------------------------------------------------------
   // FPU macros. These do not handle special cases like NaN or +- inf.
@@ -1104,15 +1110,19 @@ class MacroAssembler: public Assembler {
                 Label* fail,
                 SmiCheckType smi_check_type);
 
-  // Check if the map of an object is equal to a specified map and branch to a
-  // specified target if equal. Skip the smi check if not required (object is
-  // known to be a heap object)
-  void DispatchMap(Register obj,
-                   Register scratch,
-                   Handle<Map> map,
-                   Handle<Code> success,
-                   SmiCheckType smi_check_type);
+  // Check if the map of an object is equal to a specified weak map and branch
+  // to a specified target if equal. Skip the smi check if not required
+  // (object is known to be a heap object)
+  void DispatchWeakMap(Register obj, Register scratch1, Register scratch2,
+                       Handle<WeakCell> cell, Handle<Code> success,
+                       SmiCheckType smi_check_type);
+
+  // Get value of the weak cell.
+  void GetWeakValue(Register value, Handle<WeakCell> cell);
 
+  // Load the value of the weak cell in the value register. Branch to the
+  // given miss label is the weak cell was cleared.
+  void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
 
   // Load and check the instance type of an object for being a string.
   // Loads the type into the second argument register.
@@ -1168,12 +1178,20 @@ class MacroAssembler: public Assembler {
                                Register overflow_dst,
                                Register scratch = at);
 
+  void AdduAndCheckForOverflow(Register dst, Register left,
+                               const Operand& right, Register overflow_dst,
+                               Register scratch = at);
+
   void SubuAndCheckForOverflow(Register dst,
                                Register left,
                                Register right,
                                Register overflow_dst,
                                Register scratch = at);
 
+  void SubuAndCheckForOverflow(Register dst, Register left,
+                               const Operand& right, Register overflow_dst,
+                               Register scratch = at);
+
   void BranchOnOverflow(Label* label,
                         Register overflow_check,
                         BranchDelaySlot bd = PROTECT) {
@@ -1198,13 +1216,10 @@ class MacroAssembler: public Assembler {
   // Runtime calls.
 
   // See comments at the beginning of CEntryStub::Generate.
-  inline void PrepareCEntryArgs(int num_args) {
-    li(s0, num_args);
-    li(s1, (num_args - 1) * kPointerSize);
-  }
+  inline void PrepareCEntryArgs(int num_args) { li(a0, num_args); }
 
   inline void PrepareCEntryFunction(const ExternalReference& ref) {
-    li(s2, Operand(ref));
+    li(a1, Operand(ref));
   }
 
 #define COND_ARGS Condition cond = al, Register rs = zero_reg, \
@@ -1720,6 +1735,7 @@ const Operand& rt = Operand(zero_reg), BranchDelaySlot bd = PROTECT
 
   bool generating_stub_;
   bool has_frame_;
+  bool has_double_zero_reg_set_;
   // This handle will be patched with the code object on installation.
   Handle<Object> code_object_;
 
index 0039665..9899d47 100644 (file)
@@ -2080,15 +2080,15 @@ void Simulator::ConfigureTypeRegister(Instruction* instr,
         case MFLO:
           *alu_out = get_register(LO);
           break;
-        case MULT:  // MULT == D_MUL_MUH.
-          // TODO(plind) - Unify MULT/DMULT with single set of 64-bit HI/Lo
-          // regs.
-          // TODO(plind) - make the 32-bit MULT ops conform to spec regarding
-          //   checking of 32-bit input values, and un-define operations of HW.
-          *i64hilo = rs * rt;
+        case MULT: {  // MULT == D_MUL_MUH.
+          int32_t rs_lo = static_cast<int32_t>(rs);
+          int32_t rt_lo = static_cast<int32_t>(rt);
+          *i64hilo = static_cast<int64_t>(rs_lo) * static_cast<int64_t>(rt_lo);
           break;
+        }
         case MULTU:
-          *u64hilo = static_cast<uint64_t>(rs_u) * static_cast<uint64_t>(rt_u);
+          *u64hilo = static_cast<uint64_t>(rs_u & 0xffffffff) *
+                     static_cast<uint64_t>(rt_u & 0xffffffff);
           break;
         case DMULT:  // DMULT == D_MUL_MUH.
           if (kArchVariant != kMips64r6) {
@@ -2230,7 +2230,7 @@ void Simulator::ConfigureTypeRegister(Instruction* instr,
           // Interpret sa field as 5-bit lsb of insert.
           uint16_t lsb = sa;
           uint16_t size = msb - lsb + 1;
-          uint32_t mask = (1 << size) - 1;
+          uint64_t mask = (1ULL << size) - 1;
           *alu_out = (rt_u & ~(mask << lsb)) | ((rs_u & mask) << lsb);
           break;
         }
@@ -2240,8 +2240,18 @@ void Simulator::ConfigureTypeRegister(Instruction* instr,
           // Interpret sa field as 5-bit lsb of extract.
           uint16_t lsb = sa;
           uint16_t size = msb + 1;
-          uint32_t mask = (1 << size) - 1;
-          *alu_out = (rs_u & (mask << lsb)) >> lsb;
+          uint64_t mask = (1ULL << size) - 1;
+          *alu_out = static_cast<int32_t>((rs_u & (mask << lsb)) >> lsb);
+          break;
+        }
+        case DEXT: {  // Mips32r2 instruction.
+          // Interpret rd field as 5-bit msb of extract.
+          uint16_t msb = rd_reg;
+          // Interpret sa field as 5-bit lsb of extract.
+          uint16_t lsb = sa;
+          uint16_t size = msb + 1;
+          uint64_t mask = (1ULL << size) - 1;
+          *alu_out = static_cast<int64_t>((rs_u & (mask << lsb)) >> lsb);
           break;
         }
         default:
@@ -2783,7 +2793,8 @@ void Simulator::DecodeTypeRegister(Instruction* instr) {
           TraceRegWr(alu_out);
           break;
         case EXT:
-          // Ext instr leaves result in Rt, rather than Rd.
+        case DEXT:
+          // Dext/Ext instr leaves result in Rt, rather than Rd.
           set_register(rt_reg, alu_out);
           TraceRegWr(alu_out);
           break;
@@ -2815,9 +2826,9 @@ void Simulator::DecodeTypeImmediate(Instruction* instr) {
   int64_t  ft     = get_fpu_register(ft_reg);
 
   // Zero extended immediate.
-  uint32_t  oe_imm16 = 0xffff & imm16;
+  uint64_t oe_imm16 = 0xffff & imm16;
   // Sign extended immediate.
-  int32_t   se_imm16 = imm16;
+  int64_t se_imm16 = imm16;
 
   // Get current pc.
   int64_t current_pc = get_pc();
index da031d3..c325e58 100644 (file)
@@ -1,6 +1,7 @@
 // Copyright 2006-2012 the V8 project authors. All rights reserved.
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
+"use strict";
 
 // Handle id counters.
 var next_handle_ = 0;
@@ -44,7 +45,7 @@ function MakeMirror(value, opt_transient) {
 
   // Look for non transient mirrors in the mirror cache.
   if (!opt_transient && mirror_cache_enabled_) {
-    for (id in mirror_cache_) {
+    for (var id in mirror_cache_) {
       mirror = mirror_cache_[id];
       if (mirror.value() === value) {
         return mirror;
@@ -179,9 +180,8 @@ PropertyKind.Indexed = 2;
 
 // A copy of the PropertyType enum from property-details.h
 var PropertyType = {};
-PropertyType.Normal                  = 0;
-PropertyType.Field                   = 1;
-PropertyType.Constant                = 2;
+PropertyType.Field                   = 0;
+PropertyType.Constant                = 1;
 PropertyType.Callbacks               = 3;
 
 
@@ -193,13 +193,16 @@ PropertyAttribute.DontEnum   = DONT_ENUM;
 PropertyAttribute.DontDelete = DONT_DELETE;
 
 
-// A copy of the scope types from runtime.cc.
+// A copy of the scope types from runtime-debug.cc.
+// NOTE: these constants should be backward-compatible, so
+// add new ones to the end of this list.
 var ScopeType = { Global: 0,
                   Local: 1,
                   With: 2,
                   Closure: 3,
                   Catch: 4,
-                  Block: 5 };
+                  Block: 5,
+                  Script: 6 };
 
 
 // Mirror hierarchy:
@@ -919,13 +922,37 @@ ObjectMirror.GetInternalProperties = function(value) {
       result.push(new InternalPropertyMirror("[[BoundArgs]]", boundArgs));
     }
     return result;
-  } else if (ObjectIsPromise(value)) {
-    var result = [];
-    result.push(new InternalPropertyMirror("[[PromiseStatus]]",
-                                           PromiseGetStatus_(value)));
-    result.push(new InternalPropertyMirror("[[PromiseValue]]",
-                                           PromiseGetValue_(value)));
+  } else if (IS_MAP_ITERATOR(value) || IS_SET_ITERATOR(value)) {
+    var details = IS_MAP_ITERATOR(value) ? %MapIteratorDetails(value)
+                                         : %SetIteratorDetails(value);
+    var kind;
+    switch (details[2]) {
+      case 1: kind = "keys"; break;
+      case 2: kind = "values"; break;
+      case 3: kind = "entries"; break;
+    }
+    var result = [
+      new InternalPropertyMirror("[[IteratorHasMore]]", details[0]),
+      new InternalPropertyMirror("[[IteratorIndex]]", details[1])
+    ];
+    if (kind) {
+      result.push(new InternalPropertyMirror("[[IteratorKind]]", kind));
+    }
     return result;
+  } else if (IS_GENERATOR(value)) {
+    return [
+      new InternalPropertyMirror("[[GeneratorStatus]]",
+                                 GeneratorGetStatus_(value)),
+      new InternalPropertyMirror("[[GeneratorFunction]]",
+                                 %GeneratorGetFunction(value)),
+      new InternalPropertyMirror("[[GeneratorReceiver]]",
+                                 %GeneratorGetReceiver(value))
+    ];
+  } else if (ObjectIsPromise(value)) {
+    return [
+      new InternalPropertyMirror("[[PromiseStatus]]", PromiseGetStatus_(value)),
+      new InternalPropertyMirror("[[PromiseValue]]", PromiseGetValue_(value))
+    ];
   }
   return [];
 }
@@ -1269,11 +1296,11 @@ ErrorMirror.prototype.toText = function() {
   // Use the same text representation as in messages.js.
   var text;
   try {
-    str = %_CallFunction(this.value_, builtins.ErrorToString);
+    text = %_CallFunction(this.value_, builtins.ErrorToString);
   } catch (e) {
-    str = '#<Error>';
+    text = '#<Error>';
   }
-  return str;
+  return text;
 };
 
 
@@ -1322,13 +1349,14 @@ inherits(MapMirror, ObjectMirror);
  * Returns an array of key/value pairs of a map.
  * This will keep keys alive for WeakMaps.
  *
+ * @param {number=} opt_limit Max elements to return.
  * @returns {Array.<Object>} Array of key/value pairs of a map.
  */
-MapMirror.prototype.entries = function() {
+MapMirror.prototype.entries = function(opt_limit) {
   var result = [];
 
   if (IS_WEAKMAP(this.value_)) {
-    var entries = %GetWeakMapEntries(this.value_);
+    var entries = %GetWeakMapEntries(this.value_, opt_limit || 0);
     for (var i = 0; i < entries.length; i += 2) {
       result.push({
         key: entries[i],
@@ -1340,7 +1368,8 @@ MapMirror.prototype.entries = function() {
 
   var iter = %_CallFunction(this.value_, builtins.MapEntries);
   var next;
-  while (!(next = iter.next()).done) {
+  while ((!opt_limit || result.length < opt_limit) &&
+         !(next = iter.next()).done) {
     result.push({
       key: next.value[0],
       value: next.value[1]
@@ -1356,10 +1385,11 @@ function SetMirror(value) {
 inherits(SetMirror, ObjectMirror);
 
 
-function IteratorGetValues_(iter, next_function) {
+function IteratorGetValues_(iter, next_function, opt_limit) {
   var result = [];
   var next;
-  while (!(next = %_CallFunction(iter, next_function)).done) {
+  while ((!opt_limit || result.length < opt_limit) &&
+         !(next = %_CallFunction(iter, next_function)).done) {
     result.push(next.value);
   }
   return result;
@@ -1370,15 +1400,16 @@ function IteratorGetValues_(iter, next_function) {
  * Returns an array of elements of a set.
  * This will keep elements alive for WeakSets.
  *
+ * @param {number=} opt_limit Max elements to return.
  * @returns {Array.<Object>} Array of elements of a set.
  */
-SetMirror.prototype.values = function() {
+SetMirror.prototype.values = function(opt_limit) {
   if (IS_WEAKSET(this.value_)) {
-    return %GetWeakSetValues(this.value_);
+    return %GetWeakSetValues(this.value_, opt_limit || 0);
   }
 
   var iter = %_CallFunction(this.value_, builtins.SetValues);
-  return IteratorGetValues_(iter, builtins.SetIteratorNextJS);
+  return IteratorGetValues_(iter, builtins.SetIteratorNextJS, opt_limit);
 };
 
 
@@ -1392,15 +1423,18 @@ inherits(IteratorMirror, ObjectMirror);
  * Returns a preview of elements of an iterator.
  * Does not change the backing iterator state.
  *
+ * @param {number=} opt_limit Max elements to return.
  * @returns {Array.<Object>} Array of elements of an iterator.
  */
-IteratorMirror.prototype.preview = function() {
+IteratorMirror.prototype.preview = function(opt_limit) {
   if (IS_MAP_ITERATOR(this.value_)) {
     return IteratorGetValues_(%MapIteratorClone(this.value_),
-                              builtins.MapIteratorNextJS);
+                              builtins.MapIteratorNextJS,
+                              opt_limit);
   } else if (IS_SET_ITERATOR(this.value_)) {
     return IteratorGetValues_(%SetIteratorClone(this.value_),
-                              builtins.SetIteratorNextJS);
+                              builtins.SetIteratorNextJS,
+                              opt_limit);
   }
 };
 
@@ -1417,11 +1451,16 @@ function GeneratorMirror(value) {
 inherits(GeneratorMirror, ObjectMirror);
 
 
-GeneratorMirror.prototype.status = function() {
-  var continuation = %GeneratorGetContinuation(this.value_);
+function GeneratorGetStatus_(value) {
+  var continuation = %GeneratorGetContinuation(value);
   if (continuation < 0) return "running";
   if (continuation == 0) return "closed";
   return "suspended";
+}
+
+
+GeneratorMirror.prototype.status = function() {
+  return GeneratorGetStatus_(this.value_);
 };
 
 
@@ -2286,11 +2325,12 @@ ScopeMirror.prototype.scopeType = function() {
 
 
 ScopeMirror.prototype.scopeObject = function() {
-  // For local and closure scopes create a transient mirror as these objects are
-  // created on the fly materializing the local or closure scopes and
-  // therefore will not preserve identity.
+  // For local, closure and script scopes create a transient mirror
+  // as these objects are created on the fly materializing the local
+  // or closure scopes and therefore will not preserve identity.
   var transient = this.scopeType() == ScopeType.Local ||
-                  this.scopeType() == ScopeType.Closure;
+                  this.scopeType() == ScopeType.Closure ||
+                  this.scopeType() == ScopeType.Script;
   return MakeMirror(this.details_.object(), transient);
 };
 
@@ -2875,10 +2915,9 @@ function serializeLocationFields (location, content) {
  *    "ref":<number>}
  *
  * If the attribute for the property is PropertyAttribute.None it is not added.
- * If the propertyType for the property is PropertyType.Normal it is not added.
  * Here are a couple of examples.
  *
- *   {"name":"hello","ref":1}
+ *   {"name":"hello","propertyType":0,"ref":1}
  *   {"name":"length","attributes":7,"propertyType":3,"ref":2}
  *
  * @param {PropertyMirror} propertyMirror The property to serialize.
@@ -2895,9 +2934,7 @@ JSONProtocolSerializer.prototype.serializeProperty_ = function(propertyMirror) {
     if (propertyMirror.attributes() != PropertyAttribute.None) {
       result.attributes = propertyMirror.attributes();
     }
-    if (propertyMirror.propertyType() != PropertyType.Normal) {
-      result.propertyType = propertyMirror.propertyType();
-    }
+    result.propertyType = propertyMirror.propertyType();
     result.ref = propertyValue.handle();
   }
   return result;
index 740c30c..988e7da 100644 (file)
@@ -3,11 +3,8 @@
 // found in the LICENSE file.
 
 #include <errno.h>
-#include <stdio.h>
-#ifdef COMPRESS_STARTUP_DATA_BZ2
-#include <bzlib.h>
-#endif
 #include <signal.h>
+#include <stdio.h>
 
 #include "src/v8.h"
 
 
 using namespace v8;
 
-
-class Compressor {
- public:
-  virtual ~Compressor() {}
-  virtual bool Compress(i::Vector<i::byte> input) = 0;
-  virtual i::Vector<i::byte>* output() = 0;
-};
-
-
 class SnapshotWriter {
  public:
   explicit SnapshotWriter(const char* snapshot_file)
-      : fp_(GetFileDescriptorOrDie(snapshot_file))
-      , raw_file_(NULL)
-      , raw_context_file_(NULL)
-      , startup_blob_file_(NULL)
-      , compressor_(NULL) {
-  }
+      : fp_(GetFileDescriptorOrDie(snapshot_file)),
+        startup_blob_file_(NULL) {}
 
   ~SnapshotWriter() {
     fclose(fp_);
-    if (raw_file_) fclose(raw_file_);
-    if (raw_context_file_) fclose(raw_context_file_);
     if (startup_blob_file_) fclose(startup_blob_file_);
   }
 
-  void SetCompressor(Compressor* compressor) {
-    compressor_ = compressor;
-  }
-
-  void SetRawFiles(const char* raw_file, const char* raw_context_file) {
-    raw_file_ = GetFileDescriptorOrDie(raw_file);
-    raw_context_file_ = GetFileDescriptorOrDie(raw_context_file);
-  }
-
   void SetStartupBlobFile(const char* startup_blob_file) {
     if (startup_blob_file != NULL)
       startup_blob_file_ = GetFileDescriptorOrDie(startup_blob_file);
   }
 
-  void WriteSnapshot(const i::List<i::byte>& snapshot_data,
-                     const i::Serializer& serializer,
-                     const i::List<i::byte>& context_snapshot_data,
-                     const i::Serializer& context_serializer) const {
-    WriteSnapshotFile(snapshot_data, serializer,
-                      context_snapshot_data, context_serializer);
-    MaybeWriteStartupBlob(snapshot_data, serializer,
-                          context_snapshot_data, context_serializer);
+  void WriteSnapshot(v8::StartupData blob) const {
+    i::Vector<const i::byte> blob_vector(
+        reinterpret_cast<const i::byte*>(blob.data), blob.raw_size);
+    WriteSnapshotFile(blob_vector);
+    MaybeWriteStartupBlob(blob_vector);
   }
 
  private:
-  void MaybeWriteStartupBlob(const i::List<i::byte>& snapshot_data,
-                             const i::Serializer& serializer,
-                             const i::List<i::byte>& context_snapshot_data,
-                             const i::Serializer& context_serializer) const {
+  void MaybeWriteStartupBlob(const i::Vector<const i::byte>& blob) const {
     if (!startup_blob_file_) return;
 
-    i::SnapshotByteSink sink;
-
-    int spaces[] = {i::NEW_SPACE,           i::OLD_POINTER_SPACE,
-                    i::OLD_DATA_SPACE,      i::CODE_SPACE,
-                    i::MAP_SPACE,           i::CELL_SPACE,
-                    i::PROPERTY_CELL_SPACE, i::LO_SPACE};
-
-    i::byte* snapshot_bytes = snapshot_data.begin();
-    sink.PutBlob(snapshot_bytes, snapshot_data.length(), "snapshot");
-    for (size_t i = 0; i < arraysize(spaces); ++i) {
-      i::Vector<const uint32_t> chunks =
-          serializer.FinalAllocationChunks(spaces[i]);
-      // For the start-up snapshot, none of the reservations has more than
-      // one chunk (reservation for each space fits onto a single page).
-      CHECK_EQ(1, chunks.length());
-      sink.PutInt(chunks[0], "spaces");
-    }
-
-    i::byte* context_bytes = context_snapshot_data.begin();
-    sink.PutBlob(context_bytes, context_snapshot_data.length(), "context");
-    for (size_t i = 0; i < arraysize(spaces); ++i) {
-      i::Vector<const uint32_t> chunks =
-          context_serializer.FinalAllocationChunks(spaces[i]);
-      // For the context snapshot, none of the reservations has more than
-      // one chunk (reservation for each space fits onto a single page).
-      CHECK_EQ(1, chunks.length());
-      sink.PutInt(chunks[0], "spaces");
-    }
-
-    const i::List<i::byte>& startup_blob = sink.data();
-    size_t written = fwrite(startup_blob.begin(), 1, startup_blob.length(),
-                            startup_blob_file_);
-    if (written != static_cast<size_t>(startup_blob.length())) {
+    size_t written = fwrite(blob.begin(), 1, blob.length(), startup_blob_file_);
+    if (written != static_cast<size_t>(blob.length())) {
       i::PrintF("Writing snapshot file failed.. Aborting.\n");
       exit(1);
     }
   }
 
-  void WriteSnapshotFile(const i::List<i::byte>& snapshot_data,
-                         const i::Serializer& serializer,
-                         const i::List<i::byte>& context_snapshot_data,
-                         const i::Serializer& context_serializer) const {
+  void WriteSnapshotFile(const i::Vector<const i::byte>& blob) const {
     WriteFilePrefix();
-    WriteData("", snapshot_data, raw_file_);
-    WriteData("context_", context_snapshot_data, raw_context_file_);
-    WriteMeta("context_", context_serializer);
-    WriteMeta("", serializer);
+    WriteData(blob);
     WriteFileSuffix();
   }
 
@@ -140,94 +70,29 @@ class SnapshotWriter {
   }
 
   void WriteFileSuffix() const {
+    fprintf(fp_, "const v8::StartupData Snapshot::SnapshotBlob() {\n");
+    fprintf(fp_, "  v8::StartupData blob;\n");
+    fprintf(fp_, "  blob.data = reinterpret_cast<const char*>(blob_data);\n");
+    fprintf(fp_, "  blob.raw_size = blob_size;\n");
+    fprintf(fp_, "  return blob;\n");
+    fprintf(fp_, "}\n\n");
     fprintf(fp_, "}  // namespace internal\n");
     fprintf(fp_, "}  // namespace v8\n");
   }
 
-  void WriteData(const char* prefix, const i::List<i::byte>& source_data,
-                 FILE* raw_file) const {
-    const i::List<i::byte>* data_to_be_written = NULL;
-    i::List<i::byte> compressed_data;
-    if (!compressor_) {
-      data_to_be_written = &source_data;
-    } else if (compressor_->Compress(source_data.ToVector())) {
-      compressed_data.AddAll(*compressor_->output());
-      data_to_be_written = &compressed_data;
-    } else {
-      i::PrintF("Compression failed. Aborting.\n");
-      exit(1);
-    }
-
-    DCHECK(data_to_be_written);
-    MaybeWriteRawFile(data_to_be_written, raw_file);
-    WriteData(prefix, source_data, data_to_be_written);
-  }
-
-  void MaybeWriteRawFile(const i::List<i::byte>* data, FILE* raw_file) const {
-    if (!data || !raw_file)
-      return;
-
-    // Sanity check, whether i::List iterators truly return pointers to an
-    // internal array.
-    DCHECK(data->end() - data->begin() == data->length());
-
-    size_t written = fwrite(data->begin(), 1, data->length(), raw_file);
-    if (written != (size_t)data->length()) {
-      i::PrintF("Writing raw file failed.. Aborting.\n");
-      exit(1);
-    }
-  }
-
-  void WriteData(const char* prefix, const i::List<i::byte>& source_data,
-                 const i::List<i::byte>* data_to_be_written) const {
-    fprintf(fp_, "const byte Snapshot::%sdata_[] = {\n", prefix);
-    WriteSnapshotData(data_to_be_written);
+  void WriteData(const i::Vector<const i::byte>& blob) const {
+    fprintf(fp_, "static const byte blob_data[] = {\n");
+    WriteSnapshotData(blob);
     fprintf(fp_, "};\n");
-    fprintf(fp_, "const int Snapshot::%ssize_ = %d;\n", prefix,
-            data_to_be_written->length());
-
-    if (data_to_be_written == &source_data) {
-      fprintf(fp_, "const byte* Snapshot::%sraw_data_ = Snapshot::%sdata_;\n",
-              prefix, prefix);
-      fprintf(fp_, "const int Snapshot::%sraw_size_ = Snapshot::%ssize_;\n",
-              prefix, prefix);
-    } else {
-      fprintf(fp_, "const byte* Snapshot::%sraw_data_ = NULL;\n", prefix);
-      fprintf(fp_, "const int Snapshot::%sraw_size_ = %d;\n",
-              prefix, source_data.length());
-    }
-    fprintf(fp_, "\n");
-  }
-
-  void WriteMeta(const char* prefix, const i::Serializer& ser) const {
-    WriteSizeVar(ser, prefix, "new", i::NEW_SPACE);
-    WriteSizeVar(ser, prefix, "pointer", i::OLD_POINTER_SPACE);
-    WriteSizeVar(ser, prefix, "data", i::OLD_DATA_SPACE);
-    WriteSizeVar(ser, prefix, "code", i::CODE_SPACE);
-    WriteSizeVar(ser, prefix, "map", i::MAP_SPACE);
-    WriteSizeVar(ser, prefix, "cell", i::CELL_SPACE);
-    WriteSizeVar(ser, prefix, "property_cell", i::PROPERTY_CELL_SPACE);
-    WriteSizeVar(ser, prefix, "lo", i::LO_SPACE);
+    fprintf(fp_, "static const int blob_size = %d;\n", blob.length());
     fprintf(fp_, "\n");
   }
 
-  void WriteSizeVar(const i::Serializer& ser, const char* prefix,
-                    const char* name, int space) const {
-    i::Vector<const uint32_t> chunks = ser.FinalAllocationChunks(space);
-    // For the start-up snapshot, none of the reservations has more than
-    // one chunk (total reservation fits into a single page).
-    CHECK_EQ(1, chunks.length());
-    fprintf(fp_, "const int Snapshot::%s%s_space_used_ = %d;\n", prefix, name,
-            chunks[0]);
-  }
-
-  void WriteSnapshotData(const i::List<i::byte>* data) const {
-    for (int i = 0; i < data->length(); i++) {
-      if ((i & 0x1f) == 0x1f)
-        fprintf(fp_, "\n");
-      if (i > 0)
-        fprintf(fp_, ",");
-      fprintf(fp_, "%u", static_cast<unsigned char>(data->at(i)));
+  void WriteSnapshotData(const i::Vector<const i::byte>& blob) const {
+    for (int i = 0; i < blob.length(); i++) {
+      if ((i & 0x1f) == 0x1f) fprintf(fp_, "\n");
+      if (i > 0) fprintf(fp_, ",");
+      fprintf(fp_, "%u", static_cast<unsigned char>(blob.at(i)));
     }
     fprintf(fp_, "\n");
   }
@@ -242,85 +107,20 @@ class SnapshotWriter {
   }
 
   FILE* fp_;
-  FILE* raw_file_;
-  FILE* raw_context_file_;
   FILE* startup_blob_file_;
-  Compressor* compressor_;
-};
-
-
-#ifdef COMPRESS_STARTUP_DATA_BZ2
-class BZip2Compressor : public Compressor {
- public:
-  BZip2Compressor() : output_(NULL) {}
-  virtual ~BZip2Compressor() {
-    delete output_;
-  }
-  virtual bool Compress(i::Vector<char> input) {
-    delete output_;
-    output_ = new i::ScopedVector<char>((input.length() * 101) / 100 + 1000);
-    unsigned int output_length_ = output_->length();
-    int result = BZ2_bzBuffToBuffCompress(output_->start(), &output_length_,
-                                          input.start(), input.length(),
-                                          9, 1, 0);
-    if (result == BZ_OK) {
-      output_->Truncate(output_length_);
-      return true;
-    } else {
-      fprintf(stderr, "bzlib error code: %d\n", result);
-      return false;
-    }
-  }
-  virtual i::Vector<char>* output() { return output_; }
-
- private:
-  i::ScopedVector<char>* output_;
 };
 
 
-class BZip2Decompressor : public StartupDataDecompressor {
- public:
-  virtual ~BZip2Decompressor() { }
-
- protected:
-  virtual int DecompressData(char* raw_data,
-                             int* raw_data_size,
-                             const char* compressed_data,
-                             int compressed_data_size) {
-    DCHECK_EQ(StartupData::kBZip2,
-              V8::GetCompressedStartupDataAlgorithm());
-    unsigned int decompressed_size = *raw_data_size;
-    int result =
-        BZ2_bzBuffToBuffDecompress(raw_data,
-                                   &decompressed_size,
-                                   const_cast<char*>(compressed_data),
-                                   compressed_data_size,
-                                   0, 1);
-    if (result == BZ_OK) {
-      *raw_data_size = decompressed_size;
-    }
-    return result;
-  }
-};
-#endif
-
-
-void DumpException(Handle<Message> message) {
-  String::Utf8Value message_string(message->Get());
-  String::Utf8Value message_line(message->GetSourceLine());
-  fprintf(stderr, "%s at line %d\n", *message_string, message->GetLineNumber());
-  fprintf(stderr, "%s\n", *message_line);
-  for (int i = 0; i <= message->GetEndColumn(); ++i) {
-    fprintf(stderr, "%c", i < message->GetStartColumn() ? ' ' : '^');
-  }
-  fprintf(stderr, "\n");
-}
-
-
 int main(int argc, char** argv) {
   // By default, log code create information in the snapshot.
   i::FLAG_log_code = true;
 
+  // Omit from the snapshot natives for features that can be turned off
+  // at runtime.
+  i::FLAG_harmony_shipping = false;
+
+  i::FLAG_logfile_per_isolate = false;
+
   // Print the usage if an error occurs when parsing the command line
   // flags or if the help flag is set.
   int result = i::FlagList::SetFlagsFromCommandLine(&argc, argv, true);
@@ -336,117 +136,15 @@ int main(int argc, char** argv) {
   v8::V8::InitializePlatform(platform);
   v8::V8::Initialize();
 
-#ifdef COMPRESS_STARTUP_DATA_BZ2
-  BZip2Decompressor natives_decompressor;
-  int bz2_result = natives_decompressor.Decompress();
-  if (bz2_result != BZ_OK) {
-    fprintf(stderr, "bzip error code: %d\n", bz2_result);
-    exit(1);
-  }
-#endif
-  i::FLAG_logfile_per_isolate = false;
-
-  Isolate::CreateParams params;
-  params.enable_serializer = true;
-  Isolate* isolate = v8::Isolate::New(params);
-  { Isolate::Scope isolate_scope(isolate);
-    i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(isolate);
-
-    Persistent<Context> context;
-    {
-      HandleScope handle_scope(isolate);
-      context.Reset(isolate, Context::New(isolate));
-    }
-
-    if (context.IsEmpty()) {
-      fprintf(stderr,
-              "\nException thrown while compiling natives - see above.\n\n");
-      exit(1);
-    }
-    if (i::FLAG_extra_code != NULL) {
-      // Capture 100 frames if anything happens.
-      V8::SetCaptureStackTraceForUncaughtExceptions(true, 100);
-      HandleScope scope(isolate);
-      v8::Context::Scope cscope(v8::Local<v8::Context>::New(isolate, context));
-      const char* name = i::FLAG_extra_code;
-      FILE* file = base::OS::FOpen(name, "rb");
-      if (file == NULL) {
-        fprintf(stderr, "Failed to open '%s': errno %d\n", name, errno);
-        exit(1);
-      }
-
-      fseek(file, 0, SEEK_END);
-      int size = ftell(file);
-      rewind(file);
-
-      char* chars = new char[size + 1];
-      chars[size] = '\0';
-      for (int i = 0; i < size;) {
-        int read = static_cast<int>(fread(&chars[i], 1, size - i, file));
-        if (read < 0) {
-          fprintf(stderr, "Failed to read '%s': errno %d\n", name, errno);
-          exit(1);
-        }
-        i += read;
-      }
-      fclose(file);
-      Local<String> source = String::NewFromUtf8(isolate, chars);
-      TryCatch try_catch;
-      Local<Script> script = Script::Compile(source);
-      if (try_catch.HasCaught()) {
-        fprintf(stderr, "Failure compiling '%s'\n", name);
-        DumpException(try_catch.Message());
-        exit(1);
-      }
-      script->Run();
-      if (try_catch.HasCaught()) {
-        fprintf(stderr, "Failure running '%s'\n", name);
-        DumpException(try_catch.Message());
-        exit(1);
-      }
-    }
-    // Make sure all builtin scripts are cached.
-    { HandleScope scope(isolate);
-      for (int i = 0; i < i::Natives::GetBuiltinsCount(); i++) {
-        internal_isolate->bootstrapper()->NativesSourceLookup(i);
-      }
-    }
-    // If we don't do this then we end up with a stray root pointing at the
-    // context even after we have disposed of the context.
-    internal_isolate->heap()->CollectAllAvailableGarbage("mksnapshot");
-    i::Object* raw_context = *v8::Utils::OpenPersistent(context);
-    context.Reset();
-
-    // This results in a somewhat smaller snapshot, probably because it gets
-    // rid of some things that are cached between garbage collections.
-    i::SnapshotByteSink snapshot_sink;
-    i::StartupSerializer ser(internal_isolate, &snapshot_sink);
-    ser.SerializeStrongReferences();
-
-    i::SnapshotByteSink context_sink;
-    i::PartialSerializer context_ser(internal_isolate, &ser, &context_sink);
-    context_ser.Serialize(&raw_context);
-    ser.SerializeWeakReferences();
-
-    context_ser.FinalizeAllocation();
-    ser.FinalizeAllocation();
-
-    {
-      SnapshotWriter writer(argv[1]);
-      if (i::FLAG_raw_file && i::FLAG_raw_context_file)
-        writer.SetRawFiles(i::FLAG_raw_file, i::FLAG_raw_context_file);
-      if (i::FLAG_startup_blob)
-        writer.SetStartupBlobFile(i::FLAG_startup_blob);
-  #ifdef COMPRESS_STARTUP_DATA_BZ2
-      BZip2Compressor bzip2;
-      writer.SetCompressor(&bzip2);
-  #endif
-      writer.WriteSnapshot(snapshot_sink.data(), ser, context_sink.data(),
-                           context_ser);
-    }
+  {
+    SnapshotWriter writer(argv[1]);
+    if (i::FLAG_startup_blob) writer.SetStartupBlobFile(i::FLAG_startup_blob);
+    StartupData blob = v8::V8::CreateSnapshotDataBlob();
+    CHECK(blob.data);
+    writer.WriteSnapshot(blob);
+    delete[] blob.data;
   }
 
-  isolate->Dispose();
   V8::Dispose();
   V8::ShutdownPlatform();
   delete platform;
index fc66149..e601808 100644 (file)
 #include "src/snapshot-source-sink.h"
 #include "src/vector.h"
 
+#ifndef V8_USE_EXTERNAL_STARTUP_DATA
+#error natives-external.cc is used only for the external snapshot build.
+#endif  // V8_USE_EXTERNAL_STARTUP_DATA
+
+
 namespace v8 {
 namespace internal {
 
@@ -23,20 +28,26 @@ namespace internal {
  */
 class NativesStore {
  public:
-  ~NativesStore() {}
+  ~NativesStore() {
+    for (int i = 0; i < native_names_.length(); i++) {
+      native_names_[i].Dispose();
+    }
+  }
 
-  int GetBuiltinsCount() { return native_names_.length(); }
+  int GetBuiltinsCount() { return native_ids_.length(); }
   int GetDebuggerCount() { return debugger_count_; }
-  Vector<const char> GetScriptName(int index) { return native_names_[index]; }
-  Vector<const char> GetRawScriptSource(int index) {
+
+  Vector<const char> GetScriptSource(int index) {
     return native_source_[index];
   }
 
-  int GetIndex(const char* name) {
-    for (int i = 0; i < native_names_.length(); ++i) {
-      int native_name_length = native_names_[i].length();
-      if ((static_cast<int>(strlen(name)) == native_name_length) &&
-          (strncmp(name, native_names_[i].start(), native_name_length) == 0)) {
+  Vector<const char> GetScriptName(int index) { return native_names_[index]; }
+
+  int GetIndex(const char* id) {
+    for (int i = 0; i < native_ids_.length(); ++i) {
+      int native_id_length = native_ids_[i].length();
+      if ((static_cast<int>(strlen(id)) == native_id_length) &&
+          (strncmp(id, native_ids_[i].start(), native_id_length) == 0)) {
         return i;
       }
     }
@@ -44,14 +55,9 @@ class NativesStore {
     return -1;
   }
 
-  int GetRawScriptsSize() {
-    DCHECK(false);  // Used for compression. Doesn't really make sense here.
-    return 0;
-  }
-
-  Vector<const byte> GetScriptsSource() {
-    DCHECK(false);  // Used for compression. Doesn't really make sense here.
-    return Vector<const byte>();
+  Vector<const char> GetScriptsSource() {
+    DCHECK(false);  // Not implemented.
+    return Vector<const char>();
   }
 
   static NativesStore* MakeFromScriptsSource(SnapshotByteSource* source) {
@@ -75,24 +81,38 @@ class NativesStore {
  private:
   NativesStore() : debugger_count_(0) {}
 
+  Vector<const char> NameFromId(const byte* id, int id_length) {
+    const char native[] = "native ";
+    const char extension[] = ".js";
+    Vector<char> name(Vector<char>::New(id_length + sizeof(native) - 1 +
+                                        sizeof(extension) - 1));
+    memcpy(name.start(), native, sizeof(native) - 1);
+    memcpy(name.start() + sizeof(native) - 1, id, id_length);
+    memcpy(name.start() + sizeof(native) - 1 + id_length, extension,
+           sizeof(extension) - 1);
+    return Vector<const char>::cast(name);
+  }
+
   bool ReadNameAndContentPair(SnapshotByteSource* bytes) {
-    const byte* name;
-    int name_length;
+    const byte* id;
+    int id_length;
     const byte* source;
     int source_length;
-    bool success = bytes->GetBlob(&name, &name_length) &&
+    bool success = bytes->GetBlob(&id, &id_length) &&
                    bytes->GetBlob(&source, &source_length);
     if (success) {
-      Vector<const char> name_vector(
-          reinterpret_cast<const char*>(name), name_length);
+      Vector<const char> id_vector(reinterpret_cast<const char*>(id),
+                                   id_length);
       Vector<const char> source_vector(
           reinterpret_cast<const char*>(source), source_length);
-      native_names_.Add(name_vector);
+      native_ids_.Add(id_vector);
       native_source_.Add(source_vector);
+      native_names_.Add(NameFromId(id, id_length));
     }
     return success;
   }
 
+  List<Vector<const char> > native_ids_;
   List<Vector<const char> > native_names_;
   List<Vector<const char> > native_source_;
   int debugger_count_;
@@ -130,9 +150,7 @@ void SetNativesFromFile(StartupData* natives_blob) {
   DCHECK(natives_blob->data);
   DCHECK(natives_blob->raw_size > 0);
 
-  SnapshotByteSource bytes(
-      reinterpret_cast<const byte*>(natives_blob->data),
-      natives_blob->raw_size);
+  SnapshotByteSource bytes(natives_blob->data, natives_blob->raw_size);
   NativesHolder<CORE>::set(NativesStore::MakeFromScriptsSource(&bytes));
   NativesHolder<EXPERIMENTAL>::set(NativesStore::MakeFromScriptsSource(&bytes));
   DCHECK(!bytes.HasMore());
@@ -160,14 +178,9 @@ int NativesCollection<type>::GetIndex(const char* name) {
   return NativesHolder<type>::get()->GetIndex(name);
 }
 
-template<NativeType type>
-int NativesCollection<type>::GetRawScriptsSize() {
-  return NativesHolder<type>::get()->GetRawScriptsSize();
-}
-
-template<NativeType type>
-Vector<const char> NativesCollection<type>::GetRawScriptSource(int index) {
-  return NativesHolder<type>::get()->GetRawScriptSource(index);
+template <NativeType type>
+Vector<const char> NativesCollection<type>::GetScriptSource(int index) {
+  return NativesHolder<type>::get()->GetScriptSource(index);
 }
 
 template<NativeType type>
@@ -175,24 +188,16 @@ Vector<const char> NativesCollection<type>::GetScriptName(int index) {
   return NativesHolder<type>::get()->GetScriptName(index);
 }
 
-template<NativeType type>
-Vector<const byte> NativesCollection<type>::GetScriptsSource() {
+template <NativeType type>
+Vector<const char> NativesCollection<type>::GetScriptsSource() {
   return NativesHolder<type>::get()->GetScriptsSource();
 }
 
-template<NativeType type>
-void NativesCollection<type>::SetRawScriptsSource(
-    Vector<const char> raw_source) {
-  CHECK(false);  // Use SetNativesFromFile for this implementation.
-}
-
 
 // The compiler can't 'see' all uses of the static methods and hence
-// my chose to elide them. This we'll explicitly instantiate these.
+// my choice to elide them. This we'll explicitly instantiate these.
 template class NativesCollection<CORE>;
 template class NativesCollection<EXPERIMENTAL>;
-template class NativesCollection<D8>;
-template class NativesCollection<TEST>;
 
 }  // namespace v8::internal
 }  // namespace v8
index 6ddedf0..7ce7213 100644 (file)
@@ -12,10 +12,6 @@ namespace v8 { class StartupData; }  // Forward declaration.
 namespace v8 {
 namespace internal {
 
-typedef bool (*NativeSourceCallback)(Vector<const char> name,
-                                     Vector<const char> source,
-                                     int index);
-
 enum NativeType {
   CORE, EXPERIMENTAL, D8, TEST
 };
@@ -33,11 +29,9 @@ class NativesCollection {
   // non-debugger scripts have an index in the interval [GetDebuggerCount(),
   // GetNativesCount()).
   static int GetIndex(const char* name);
-  static int GetRawScriptsSize();
-  static Vector<const char> GetRawScriptSource(int index);
+  static Vector<const char> GetScriptSource(int index);
   static Vector<const char> GetScriptName(int index);
-  static Vector<const byte> GetScriptsSource();
-  static void SetRawScriptsSource(Vector<const char> raw_source);
+  static Vector<const char> GetScriptsSource();
 };
 
 typedef NativesCollection<CORE> Natives;
index ca649c7..e990559 100644 (file)
@@ -47,6 +47,10 @@ void HeapObject::HeapObjectVerify() {
     return;
   }
 
+  // TODO(yangguo): Use this check once crbug/436911 has been fixed.
+  //  DCHECK(!NeedsToEnsureDoubleAlignment() ||
+  //         IsAligned(OffsetFrom(address()), kDoubleAlignment));
+
   switch (instance_type) {
     case SYMBOL_TYPE:
       Symbol::cast(this)->SymbolVerify();
@@ -275,6 +279,10 @@ void JSObject::JSObjectVerify() {
       if (descriptors->GetDetails(i).type() == FIELD) {
         Representation r = descriptors->GetDetails(i).representation();
         FieldIndex index = FieldIndex::ForDescriptor(map(), i);
+        if (IsUnboxedDoubleField(index)) {
+          DCHECK(r.IsDouble());
+          continue;
+        }
         Object* value = RawFastPropertyAt(index);
         if (r.IsDouble()) DCHECK(value->IsMutableHeapNumber());
         if (value->IsUninitialized()) continue;
@@ -316,6 +324,8 @@ void Map::MapVerify() {
     SLOW_DCHECK(transitions()->IsSortedNoDuplicates());
     SLOW_DCHECK(transitions()->IsConsistentWithBackPointers(this));
   }
+  SLOW_DCHECK(!FLAG_unbox_double_fields ||
+              layout_descriptor()->IsConsistentWithMap(this));
 }
 
 
@@ -325,8 +335,7 @@ void Map::DictionaryMapVerify() {
   CHECK(instance_descriptors()->IsEmpty());
   CHECK_EQ(0, pre_allocated_property_fields());
   CHECK_EQ(0, unused_property_fields());
-  CHECK_EQ(StaticVisitorBase::GetVisitorId(instance_type(), instance_size()),
-      visitor_id());
+  CHECK_EQ(StaticVisitorBase::GetVisitorId(this), visitor_id());
 }
 
 
@@ -675,9 +684,8 @@ void Code::VerifyEmbeddedObjectsDependency() {
     if (IsWeakObject(obj)) {
       if (obj->IsMap()) {
         Map* map = Map::cast(obj);
-        DependentCode::DependencyGroup group = is_optimized_code() ?
-            DependentCode::kWeakCodeGroup : DependentCode::kWeakICGroup;
-        CHECK(map->dependent_code()->Contains(group, this));
+        CHECK(map->dependent_code()->Contains(DependentCode::kWeakCodeGroup,
+                                              this));
       } else if (obj->IsJSObject()) {
         Object* raw_table = GetIsolate()->heap()->weak_object_to_code_table();
         WeakHashTable* table = WeakHashTable::cast(raw_table);
@@ -922,6 +930,7 @@ void InterceptorInfo::InterceptorInfoVerify() {
   VerifyPointer(deleter());
   VerifyPointer(enumerator());
   VerifyPointer(data());
+  VerifySmiField(kFlagsOffset);
 }
 
 
@@ -1179,30 +1188,50 @@ bool DescriptorArray::IsSortedNoDuplicates(int valid_entries) {
 }
 
 
+bool LayoutDescriptor::IsConsistentWithMap(Map* map) {
+  if (FLAG_unbox_double_fields) {
+    DescriptorArray* descriptors = map->instance_descriptors();
+    int nof_descriptors = map->NumberOfOwnDescriptors();
+    for (int i = 0; i < nof_descriptors; i++) {
+      PropertyDetails details = descriptors->GetDetails(i);
+      if (details.type() != FIELD) continue;
+      FieldIndex field_index = FieldIndex::ForDescriptor(map, i);
+      bool tagged_expected =
+          !field_index.is_inobject() || !details.representation().IsDouble();
+      for (int bit = 0; bit < details.field_width_in_words(); bit++) {
+        bool tagged_actual = IsTagged(details.field_index() + bit);
+        DCHECK_EQ(tagged_expected, tagged_actual);
+        if (tagged_actual != tagged_expected) return false;
+      }
+    }
+  }
+  return true;
+}
+
+
 bool TransitionArray::IsSortedNoDuplicates(int valid_entries) {
   DCHECK(valid_entries == -1);
   Name* prev_key = NULL;
-  bool prev_is_data_property = false;
+  PropertyKind prev_kind = DATA;
   PropertyAttributes prev_attributes = NONE;
   uint32_t prev_hash = 0;
   for (int i = 0; i < number_of_transitions(); i++) {
     Name* key = GetSortedKey(i);
     uint32_t hash = key->Hash();
-    bool is_data_property = false;
+    PropertyKind kind = DATA;
     PropertyAttributes attributes = NONE;
     if (!IsSpecialTransition(key)) {
       Map* target = GetTarget(i);
       PropertyDetails details = GetTargetDetails(key, target);
-      is_data_property = details.type() == FIELD || details.type() == CONSTANT;
+      kind = details.kind();
       attributes = details.attributes();
     } else {
       // Duplicate entries are not allowed for non-property transitions.
       CHECK_NE(prev_key, key);
     }
 
-    int cmp =
-        CompareKeys(prev_key, prev_hash, prev_is_data_property, prev_attributes,
-                    key, hash, is_data_property, attributes);
+    int cmp = CompareKeys(prev_key, prev_hash, prev_kind, prev_attributes, key,
+                          hash, kind, attributes);
     if (cmp >= 0) {
       Print();
       return false;
@@ -1210,7 +1239,7 @@ bool TransitionArray::IsSortedNoDuplicates(int valid_entries) {
     prev_key = key;
     prev_hash = hash;
     prev_attributes = attributes;
-    prev_is_data_property = is_data_property;
+    prev_kind = kind;
   }
   return true;
 }
index 0288bfb..485560f 100644 (file)
@@ -26,6 +26,7 @@
 #include "src/heap/spaces.h"
 #include "src/heap/store-buffer.h"
 #include "src/isolate.h"
+#include "src/layout-descriptor-inl.h"
 #include "src/lookup.h"
 #include "src/objects.h"
 #include "src/property.h"
@@ -56,6 +57,14 @@ PropertyDetails PropertyDetails::AsDeleted() const {
 }
 
 
+int PropertyDetails::field_width_in_words() const {
+  DCHECK(type() == FIELD);
+  if (!FLAG_unbox_double_fields) return 1;
+  if (kDoubleSize == kPointerSize) return 1;
+  return representation().IsDouble() ? kDoubleSize / kPointerSize : 1;
+}
+
+
 #define TYPE_CHECKER(type, instancetype)                                \
   bool Object::Is##type() const {                                       \
   return Object::IsHeapObject() &&                                      \
@@ -464,22 +473,29 @@ STATIC_ASSERT((kExternalStringTag | kTwoByteStringTag) ==
 
 STATIC_ASSERT(v8::String::TWO_BYTE_ENCODING == kTwoByteStringTag);
 
+
 uc32 FlatStringReader::Get(int index) {
-  DCHECK(0 <= index && index <= length_);
   if (is_one_byte_) {
-    return static_cast<const byte*>(start_)[index];
+    return Get<uint8_t>(index);
   } else {
-    return static_cast<const uc16*>(start_)[index];
+    return Get<uc16>(index);
   }
 }
 
 
-Handle<Object> StringTableShape::AsHandle(Isolate* isolate, HashTableKey* key) {
-  return key->AsHandle(isolate);
+template <typename Char>
+Char FlatStringReader::Get(int index) {
+  DCHECK_EQ(is_one_byte_, sizeof(Char) == 1);
+  DCHECK(0 <= index && index <= length_);
+  if (sizeof(Char) == 1) {
+    return static_cast<Char>(static_cast<const uint8_t*>(start_)[index]);
+  } else {
+    return static_cast<Char>(static_cast<const uc16*>(start_)[index]);
+  }
 }
 
 
-Handle<Object> MapCacheShape::AsHandle(Isolate* isolate, HashTableKey* key) {
+Handle<Object> StringTableShape::AsHandle(Isolate* isolate, HashTableKey* key) {
   return key->AsHandle(isolate);
 }
 
@@ -501,7 +517,7 @@ class SequentialStringKey : public HashTableKey {
   explicit SequentialStringKey(Vector<const Char> string, uint32_t seed)
       : string_(string), hash_field_(0), seed_(seed) { }
 
-  virtual uint32_t Hash() OVERRIDE {
+  uint32_t Hash() OVERRIDE {
     hash_field_ = StringHasher::HashSequentialString<Char>(string_.start(),
                                                            string_.length(),
                                                            seed_);
@@ -512,7 +528,7 @@ class SequentialStringKey : public HashTableKey {
   }
 
 
-  virtual uint32_t HashForObject(Object* other) OVERRIDE {
+  uint32_t HashForObject(Object* other) OVERRIDE {
     return String::cast(other)->Hash();
   }
 
@@ -527,11 +543,11 @@ class OneByteStringKey : public SequentialStringKey<uint8_t> {
   OneByteStringKey(Vector<const uint8_t> str, uint32_t seed)
       : SequentialStringKey<uint8_t>(str, seed) { }
 
-  virtual bool IsMatch(Object* string) OVERRIDE {
+  bool IsMatch(Object* string) OVERRIDE {
     return String::cast(string)->IsOneByteEqualTo(string_);
   }
 
-  virtual Handle<Object> AsHandle(Isolate* isolate) OVERRIDE;
+  Handle<Object> AsHandle(Isolate* isolate) OVERRIDE;
 };
 
 
@@ -542,7 +558,7 @@ class SeqOneByteSubStringKey : public HashTableKey {
     DCHECK(string_->IsSeqOneByteString());
   }
 
-  virtual uint32_t Hash() OVERRIDE {
+  uint32_t Hash() OVERRIDE {
     DCHECK(length_ >= 0);
     DCHECK(from_ + length_ <= string_->length());
     const uint8_t* chars = string_->GetChars() + from_;
@@ -553,12 +569,12 @@ class SeqOneByteSubStringKey : public HashTableKey {
     return result;
   }
 
-  virtual uint32_t HashForObject(Object* other) OVERRIDE {
+  uint32_t HashForObject(Object* other) OVERRIDE {
     return String::cast(other)->Hash();
   }
 
-  virtual bool IsMatch(Object* string) OVERRIDE;
-  virtual Handle<Object> AsHandle(Isolate* isolate) OVERRIDE;
+  bool IsMatch(Object* string) OVERRIDE;
+  Handle<Object> AsHandle(Isolate* isolate) OVERRIDE;
 
  private:
   Handle<SeqOneByteString> string_;
@@ -573,11 +589,11 @@ class TwoByteStringKey : public SequentialStringKey<uc16> {
   explicit TwoByteStringKey(Vector<const uc16> str, uint32_t seed)
       : SequentialStringKey<uc16>(str, seed) { }
 
-  virtual bool IsMatch(Object* string) OVERRIDE {
+  bool IsMatch(Object* string) OVERRIDE {
     return String::cast(string)->IsTwoByteEqualTo(string_);
   }
 
-  virtual Handle<Object> AsHandle(Isolate* isolate) OVERRIDE;
+  Handle<Object> AsHandle(Isolate* isolate) OVERRIDE;
 };
 
 
@@ -587,11 +603,11 @@ class Utf8StringKey : public HashTableKey {
   explicit Utf8StringKey(Vector<const char> string, uint32_t seed)
       : string_(string), hash_field_(0), seed_(seed) { }
 
-  virtual bool IsMatch(Object* string) OVERRIDE {
+  bool IsMatch(Object* string) OVERRIDE {
     return String::cast(string)->IsUtf8EqualTo(string_);
   }
 
-  virtual uint32_t Hash() OVERRIDE {
+  uint32_t Hash() OVERRIDE {
     if (hash_field_ != 0) return hash_field_ >> String::kHashShift;
     hash_field_ = StringHasher::ComputeUtf8Hash(string_, seed_, &chars_);
     uint32_t result = hash_field_ >> String::kHashShift;
@@ -599,11 +615,11 @@ class Utf8StringKey : public HashTableKey {
     return result;
   }
 
-  virtual uint32_t HashForObject(Object* other) OVERRIDE {
+  uint32_t HashForObject(Object* other) OVERRIDE {
     return String::cast(other)->Hash();
   }
 
-  virtual Handle<Object> AsHandle(Isolate* isolate) OVERRIDE {
+  Handle<Object> AsHandle(Isolate* isolate) OVERRIDE {
     if (hash_field_ == 0) Hash();
     return isolate->factory()->NewInternalizedStringFromUtf8(
         string_, chars_, hash_field_);
@@ -691,6 +707,7 @@ TYPE_CHECKER(JSContextExtensionObject, JS_CONTEXT_EXTENSION_OBJECT_TYPE)
 TYPE_CHECKER(Map, MAP_TYPE)
 TYPE_CHECKER(FixedArray, FIXED_ARRAY_TYPE)
 TYPE_CHECKER(FixedDoubleArray, FIXED_DOUBLE_ARRAY_TYPE)
+TYPE_CHECKER(WeakFixedArray, FIXED_ARRAY_TYPE)
 TYPE_CHECKER(ConstantPoolArray, CONSTANT_POOL_ARRAY_TYPE)
 
 
@@ -704,6 +721,11 @@ bool Object::IsDescriptorArray() const {
 }
 
 
+bool Object::IsLayoutDescriptor() const {
+  return IsSmi() || IsFixedTypedArrayBase();
+}
+
+
 bool Object::IsTransitionArray() const {
   return IsFixedArray();
 }
@@ -756,7 +778,7 @@ bool Object::IsContext() const {
       map == heap->native_context_map() ||
       map == heap->block_context_map() ||
       map == heap->module_context_map() ||
-      map == heap->global_context_map());
+      map == heap->script_context_map());
 }
 
 
@@ -767,6 +789,14 @@ bool Object::IsNativeContext() const {
 }
 
 
+bool Object::IsScriptContextTable() const {
+  if (!Object::IsHeapObject()) return false;
+  Map* map = HeapObject::cast(this)->map();
+  Heap* heap = map->GetHeap();
+  return map == heap->script_context_table_map();
+}
+
+
 bool Object::IsScopeInfo() const {
   return Object::IsHeapObject() &&
       HeapObject::cast(this)->map() ==
@@ -1635,21 +1665,6 @@ inline bool AllocationSite::CanTrack(InstanceType type) {
 }
 
 
-inline DependentCode::DependencyGroup AllocationSite::ToDependencyGroup(
-    Reason reason) {
-  switch (reason) {
-    case TENURING:
-      return DependentCode::kAllocationSiteTenuringChangedGroup;
-      break;
-    case TRANSITIONS:
-      return DependentCode::kAllocationSiteTransitionChangedGroup;
-      break;
-  }
-  UNREACHABLE();
-  return DependentCode::kAllocationSiteTransitionChangedGroup;
-}
-
-
 inline void AllocationSite::set_memento_found_count(int count) {
   int value = pretenure_data()->value();
   // Verify that we can count more mementos than we can possibly find in one
@@ -1884,7 +1899,7 @@ Handle<Map> Map::FindTransitionToField(Handle<Map> map, Handle<Name> key) {
   DisallowHeapAllocation no_allocation;
   if (!map->HasTransitionArray()) return Handle<Map>::null();
   TransitionArray* transitions = map->transitions();
-  int transition = transitions->Search(FIELD, *key, NONE);
+  int transition = transitions->Search(DATA, *key, NONE);
   if (transition == TransitionArray::kNotFound) return Handle<Map>::null();
   PropertyDetails details = transitions->GetTargetDetails(transition);
   if (details.type() != FIELD) return Handle<Map>::null();
@@ -2062,10 +2077,24 @@ void JSObject::SetInternalField(int index, Smi* value) {
 }
 
 
+bool JSObject::IsUnboxedDoubleField(FieldIndex index) {
+  if (!FLAG_unbox_double_fields) return false;
+  return map()->IsUnboxedDoubleField(index);
+}
+
+
+bool Map::IsUnboxedDoubleField(FieldIndex index) {
+  if (!FLAG_unbox_double_fields) return false;
+  if (index.is_hidden_field() || !index.is_inobject()) return false;
+  return !layout_descriptor()->IsTagged(index.property_index());
+}
+
+
 // Access fast-case object properties at index. The use of these routines
 // is needed to correctly distinguish between properties stored in-object and
 // properties stored in the properties array.
 Object* JSObject::RawFastPropertyAt(FieldIndex index) {
+  DCHECK(!IsUnboxedDoubleField(index));
   if (index.is_inobject()) {
     return READ_FIELD(this, index.offset());
   } else {
@@ -2074,7 +2103,13 @@ Object* JSObject::RawFastPropertyAt(FieldIndex index) {
 }
 
 
-void JSObject::FastPropertyAtPut(FieldIndex index, Object* value) {
+double JSObject::RawFastDoublePropertyAt(FieldIndex index) {
+  DCHECK(IsUnboxedDoubleField(index));
+  return READ_DOUBLE_FIELD(this, index.offset());
+}
+
+
+void JSObject::RawFastPropertyAtPut(FieldIndex index, Object* value) {
   if (index.is_inobject()) {
     int offset = index.offset();
     WRITE_FIELD(this, offset, value);
@@ -2085,6 +2120,21 @@ void JSObject::FastPropertyAtPut(FieldIndex index, Object* value) {
 }
 
 
+void JSObject::RawFastDoublePropertyAtPut(FieldIndex index, double value) {
+  WRITE_DOUBLE_FIELD(this, index.offset(), value);
+}
+
+
+void JSObject::FastPropertyAtPut(FieldIndex index, Object* value) {
+  if (IsUnboxedDoubleField(index)) {
+    DCHECK(value->IsMutableHeapNumber());
+    RawFastDoublePropertyAtPut(index, HeapNumber::cast(value)->value());
+  } else {
+    RawFastPropertyAtPut(index, value);
+  }
+}
+
+
 int JSObject::GetInObjectPropertyOffset(int index) {
   return map()->GetInObjectPropertyOffset(index);
 }
@@ -2319,6 +2369,39 @@ void FixedDoubleArray::FillWithHoles(int from, int to) {
 }
 
 
+Object* WeakFixedArray::Get(int index) const {
+  Object* raw = FixedArray::cast(this)->get(index + kFirstIndex);
+  if (raw->IsSmi()) return raw;
+  return WeakCell::cast(raw)->value();
+}
+
+
+bool WeakFixedArray::IsEmptySlot(int index) const {
+  DCHECK(index < Length());
+  return Get(index)->IsSmi();
+}
+
+
+void WeakFixedArray::clear(int index) {
+  FixedArray::cast(this)->set(index + kFirstIndex, Smi::FromInt(0));
+}
+
+
+int WeakFixedArray::Length() const {
+  return FixedArray::cast(this)->length() - kFirstIndex;
+}
+
+
+int WeakFixedArray::last_used_index() const {
+  return Smi::cast(FixedArray::cast(this)->get(kLastUsedIndexIndex))->value();
+}
+
+
+void WeakFixedArray::set_last_used_index(int index) {
+  FixedArray::cast(this)->set(kLastUsedIndexIndex, Smi::FromInt(index));
+}
+
+
 void ConstantPoolArray::NumberOfEntries::increment(Type type) {
   DCHECK(type < NUMBER_OF_TYPES);
   element_counts_[type]++;
@@ -2709,6 +2792,17 @@ WriteBarrierMode HeapObject::GetWriteBarrierMode(
 }
 
 
+bool HeapObject::NeedsToEnsureDoubleAlignment() {
+#ifndef V8_HOST_ARCH_64_BIT
+  return (IsFixedFloat64Array() || IsFixedDoubleArray() ||
+          IsConstantPoolArray()) &&
+         FixedArrayBase::cast(this)->length() != 0;
+#else
+  return false;
+#endif  // V8_HOST_ARCH_64_BIT
+}
+
+
 void FixedArray::set(int index,
                      Object* value,
                      WriteBarrierMode mode) {
@@ -2946,7 +3040,7 @@ void Map::LookupDescriptor(JSObject* holder,
 void Map::LookupTransition(JSObject* holder, Name* name,
                            PropertyAttributes attributes,
                            LookupResult* result) {
-  int transition_index = this->SearchTransition(FIELD, name, attributes);
+  int transition_index = this->SearchTransition(DATA, name, attributes);
   if (transition_index == TransitionArray::kNotFound) return result->NotFound();
   result->TransitionResult(holder, this->GetTransition(transition_index));
 }
@@ -3103,8 +3197,7 @@ void DescriptorArray::Set(int descriptor_number,
   NoIncrementalWriteBarrierSet(this,
                                ToValueIndex(descriptor_number),
                                *desc->GetValue());
-  NoIncrementalWriteBarrierSet(this,
-                               ToDetailsIndex(descriptor_number),
+  NoIncrementalWriteBarrierSet(this, ToDetailsIndex(descriptor_number),
                                desc->GetDetails().AsSmi());
 }
 
@@ -3279,8 +3372,8 @@ CAST_ACCESSOR(JSTypedArray)
 CAST_ACCESSOR(JSValue)
 CAST_ACCESSOR(JSWeakMap)
 CAST_ACCESSOR(JSWeakSet)
+CAST_ACCESSOR(LayoutDescriptor)
 CAST_ACCESSOR(Map)
-CAST_ACCESSOR(MapCache)
 CAST_ACCESSOR(Name)
 CAST_ACCESSOR(NameDictionary)
 CAST_ACCESSOR(NormalizedMapCache)
@@ -3305,6 +3398,7 @@ CAST_ACCESSOR(Struct)
 CAST_ACCESSOR(Symbol)
 CAST_ACCESSOR(UnseededNumberDictionary)
 CAST_ACCESSOR(WeakCell)
+CAST_ACCESSOR(WeakFixedArray)
 CAST_ACCESSOR(WeakHashTable)
 
 
@@ -3535,6 +3629,22 @@ ConsString* String::VisitFlat(Visitor* visitor,
 }
 
 
+template <>
+inline Vector<const uint8_t> String::GetCharVector() {
+  String::FlatContent flat = GetFlatContent();
+  DCHECK(flat.IsOneByte());
+  return flat.ToOneByteVector();
+}
+
+
+template <>
+inline Vector<const uc16> String::GetCharVector() {
+  String::FlatContent flat = GetFlatContent();
+  DCHECK(flat.IsTwoByte());
+  return flat.ToUC16Vector();
+}
+
+
 uint16_t SeqOneByteString::SeqOneByteStringGet(int index) {
   DCHECK(index >= 0 && index < length());
   return READ_BYTE_FIELD(this, kHeaderSize + index * kCharSize);
@@ -4321,6 +4431,14 @@ int Map::GetInObjectPropertyOffset(int index) {
 }
 
 
+Handle<Map> Map::CopyInstallDescriptorsForTesting(
+    Handle<Map> map, int new_descriptor, Handle<DescriptorArray> descriptors,
+    Handle<LayoutDescriptor> layout_descriptor) {
+  return CopyInstallDescriptors(map, new_descriptor, descriptors,
+                                layout_descriptor);
+}
+
+
 int HeapObject::SizeFromMap(Map* map) {
   int instance_size = map->instance_size();
   if (instance_size != kVariableSizeSentinel) return instance_size;
@@ -4546,34 +4664,12 @@ bool Map::is_migration_target() {
 }
 
 
-void Map::set_done_inobject_slack_tracking(bool value) {
-  set_bit_field3(DoneInobjectSlackTracking::update(bit_field3(), value));
+void Map::set_counter(int value) {
+  set_bit_field3(Counter::update(bit_field3(), value));
 }
 
 
-bool Map::done_inobject_slack_tracking() {
-  return DoneInobjectSlackTracking::decode(bit_field3());
-}
-
-
-void Map::set_construction_count(int value) {
-  set_bit_field3(ConstructionCount::update(bit_field3(), value));
-}
-
-
-int Map::construction_count() {
-  return ConstructionCount::decode(bit_field3());
-}
-
-
-void Map::freeze() {
-  set_bit_field3(IsFrozen::update(bit_field3(), true));
-}
-
-
-bool Map::is_frozen() {
-  return IsFrozen::decode(bit_field3());
-}
+int Map::counter() { return Counter::decode(bit_field3()); }
 
 
 void Map::mark_unstable() {
@@ -4825,6 +4921,21 @@ void Code::set_compiled_optimizable(bool value) {
 }
 
 
+bool Code::has_reloc_info_for_serialization() {
+  DCHECK_EQ(FUNCTION, kind());
+  byte flags = READ_BYTE_FIELD(this, kFullCodeFlags);
+  return FullCodeFlagsHasRelocInfoForSerialization::decode(flags);
+}
+
+
+void Code::set_has_reloc_info_for_serialization(bool value) {
+  DCHECK_EQ(FUNCTION, kind());
+  byte flags = READ_BYTE_FIELD(this, kFullCodeFlags);
+  flags = FullCodeFlagsHasRelocInfoForSerialization::update(flags, value);
+  WRITE_BYTE_FIELD(this, kFullCodeFlags, flags);
+}
+
+
 int Code::allow_osr_at_loop_nesting_level() {
   DCHECK_EQ(FUNCTION, kind());
   int fields = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset);
@@ -4957,34 +5068,6 @@ void Code::set_marked_for_deoptimization(bool flag) {
 }
 
 
-bool Code::is_weak_stub() {
-  return CanBeWeakStub() && WeakStubField::decode(
-      READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
-}
-
-
-void Code::mark_as_weak_stub() {
-  DCHECK(CanBeWeakStub());
-  int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
-  int updated = WeakStubField::update(previous, true);
-  WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
-}
-
-
-bool Code::is_invalidated_weak_stub() {
-  return is_weak_stub() && InvalidatedWeakStubField::decode(
-      READ_UINT32_FIELD(this, kKindSpecificFlags1Offset));
-}
-
-
-void Code::mark_as_invalidated_weak_stub() {
-  DCHECK(is_inline_cache_stub());
-  int previous = READ_UINT32_FIELD(this, kKindSpecificFlags1Offset);
-  int updated = InvalidatedWeakStubField::update(previous, true);
-  WRITE_UINT32_FIELD(this, kKindSpecificFlags1Offset, updated);
-}
-
-
 bool Code::is_inline_cache_stub() {
   Kind kind = this->kind();
   switch (kind) {
@@ -5131,13 +5214,6 @@ class Code::FindAndReplacePattern {
 };
 
 
-bool Code::IsWeakObjectInIC(Object* object) {
-  return object->IsMap() && Map::cast(object)->CanTransition() &&
-         FLAG_collect_maps &&
-         FLAG_weak_embedded_maps_in_ic;
-}
-
-
 Object* Map::prototype() const {
   return READ_FIELD(this, kPrototypeOffset);
 }
@@ -5166,14 +5242,47 @@ static void EnsureHasTransitionArray(Handle<Map> map) {
 }
 
 
-void Map::InitializeDescriptors(DescriptorArray* descriptors) {
+LayoutDescriptor* Map::layout_descriptor_gc_safe() {
+  Object* layout_desc = READ_FIELD(this, kLayoutDecriptorOffset);
+  return LayoutDescriptor::cast_gc_safe(layout_desc);
+}
+
+
+bool Map::HasFastPointerLayout() const {
+  Object* layout_desc = READ_FIELD(this, kLayoutDecriptorOffset);
+  return LayoutDescriptor::IsFastPointerLayout(layout_desc);
+}
+
+
+void Map::UpdateDescriptors(DescriptorArray* descriptors,
+                            LayoutDescriptor* layout_desc) {
+  set_instance_descriptors(descriptors);
+  if (FLAG_unbox_double_fields) {
+    if (layout_descriptor()->IsSlowLayout()) {
+      set_layout_descriptor(layout_desc);
+    }
+    SLOW_DCHECK(layout_descriptor()->IsConsistentWithMap(this));
+    DCHECK(visitor_id() == StaticVisitorBase::GetVisitorId(this));
+  }
+}
+
+
+void Map::InitializeDescriptors(DescriptorArray* descriptors,
+                                LayoutDescriptor* layout_desc) {
   int len = descriptors->number_of_descriptors();
   set_instance_descriptors(descriptors);
   SetNumberOfOwnDescriptors(len);
+
+  if (FLAG_unbox_double_fields) {
+    set_layout_descriptor(layout_desc);
+    SLOW_DCHECK(layout_descriptor()->IsConsistentWithMap(this));
+    set_visitor_id(StaticVisitorBase::GetVisitorId(this));
+  }
 }
 
 
 ACCESSORS(Map, instance_descriptors, DescriptorArray, kDescriptorsOffset)
+ACCESSORS(Map, layout_descriptor, LayoutDescriptor, kLayoutDecriptorOffset)
 
 
 void Map::set_bit_field3(uint32_t bits) {
@@ -5189,18 +5298,31 @@ uint32_t Map::bit_field3() {
 }
 
 
+LayoutDescriptor* Map::GetLayoutDescriptor() {
+  return FLAG_unbox_double_fields ? layout_descriptor()
+                                  : LayoutDescriptor::FastPointerLayout();
+}
+
+
 void Map::AppendDescriptor(Descriptor* desc) {
   DescriptorArray* descriptors = instance_descriptors();
   int number_of_own_descriptors = NumberOfOwnDescriptors();
   DCHECK(descriptors->number_of_descriptors() == number_of_own_descriptors);
   descriptors->Append(desc);
   SetNumberOfOwnDescriptors(number_of_own_descriptors + 1);
+
+// This function does not support appending double field descriptors and
+// it should never try to (otherwise, layout descriptor must be updated too).
+#ifdef DEBUG
+  PropertyDetails details = desc->GetDetails();
+  CHECK(details.type() != FIELD || !details.representation().IsDouble());
+#endif
 }
 
 
 Object* Map::GetBackPointer() {
   Object* object = READ_FIELD(this, kTransitionsOrBackPointerOffset);
-  if (object->IsDescriptorArray()) {
+  if (object->IsTransitionArray()) {
     return TransitionArray::cast(object)->back_pointer_storage();
   } else {
     DCHECK(object->IsMap() || object->IsUndefined());
@@ -5247,10 +5369,10 @@ int Map::SearchSpecialTransition(Symbol* name) {
 }
 
 
-int Map::SearchTransition(PropertyType type, Name* name,
+int Map::SearchTransition(PropertyKind kind, Name* name,
                           PropertyAttributes attributes) {
   if (HasTransitionArray()) {
-    return transitions()->Search(type, name, attributes);
+    return transitions()->Search(kind, name, attributes);
   }
   return TransitionArray::kNotFound;
 }
@@ -5269,12 +5391,10 @@ void Map::SetPrototypeTransitions(
     Handle<Map> map, Handle<FixedArray> proto_transitions) {
   EnsureHasTransitionArray(map);
   int old_number_of_transitions = map->NumberOfProtoTransitions();
-#ifdef DEBUG
-  if (map->HasPrototypeTransitions()) {
+  if (Heap::ShouldZapGarbage() && map->HasPrototypeTransitions()) {
     DCHECK(map->GetPrototypeTransitions() != *proto_transitions);
     map->ZapPrototypeTransitions();
   }
-#endif
   map->transitions()->SetPrototypeTransitions(*proto_transitions);
   map->SetNumberOfProtoTransitions(old_number_of_transitions);
 }
@@ -5310,7 +5430,7 @@ void Map::set_transitions(TransitionArray* transition_array,
         } else {
           PropertyDetails details =
               TransitionArray::GetTargetDetails(key, target);
-          new_target_index = transition_array->Search(details.type(), key,
+          new_target_index = transition_array->Search(details.kind(), key,
                                                       details.attributes());
         }
         DCHECK_NE(TransitionArray::kNotFound, new_target_index);
@@ -5359,7 +5479,6 @@ ACCESSORS(JSFunction, next_function_link, Object, kNextFunctionLinkOffset)
 
 ACCESSORS(GlobalObject, builtins, JSBuiltinsObject, kBuiltinsOffset)
 ACCESSORS(GlobalObject, native_context, Context, kNativeContextOffset)
-ACCESSORS(GlobalObject, global_context, Context, kGlobalContextOffset)
 ACCESSORS(GlobalObject, global_proxy, JSObject, kGlobalProxyOffset)
 
 ACCESSORS(JSGlobalProxy, native_context, Object, kNativeContextOffset)
@@ -5395,6 +5514,9 @@ ACCESSORS(InterceptorInfo, query, Object, kQueryOffset)
 ACCESSORS(InterceptorInfo, deleter, Object, kDeleterOffset)
 ACCESSORS(InterceptorInfo, enumerator, Object, kEnumeratorOffset)
 ACCESSORS(InterceptorInfo, data, Object, kDataOffset)
+SMI_ACCESSORS(InterceptorInfo, flags, kFlagsOffset)
+BOOL_ACCESSORS(InterceptorInfo, flags, can_intercept_symbols,
+               kCanInterceptSymbolsBit)
 
 ACCESSORS(CallHandlerInfo, callback, Object, kCallbackOffset)
 ACCESSORS(CallHandlerInfo, data, Object, kDataOffset)
@@ -5492,6 +5614,9 @@ ACCESSORS(SharedFunctionInfo, optimized_code_map, Object,
 ACCESSORS(SharedFunctionInfo, construct_stub, Code, kConstructStubOffset)
 ACCESSORS(SharedFunctionInfo, feedback_vector, TypeFeedbackVector,
           kFeedbackVectorOffset)
+#if TRACE_MAPS
+SMI_ACCESSORS(SharedFunctionInfo, unique_id, kUniqueIdOffset)
+#endif
 ACCESSORS(SharedFunctionInfo, instance_class_name, Object,
           kInstanceClassNameOffset)
 ACCESSORS(SharedFunctionInfo, function_data, Object, kFunctionDataOffset)
@@ -5517,9 +5642,7 @@ BOOL_ACCESSORS(SharedFunctionInfo, start_position_and_type, is_expression,
 BOOL_ACCESSORS(SharedFunctionInfo, start_position_and_type, is_toplevel,
                kIsTopLevelBit)
 
-BOOL_ACCESSORS(SharedFunctionInfo,
-               compiler_hints,
-               allows_lazy_compilation,
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, allows_lazy_compilation,
                kAllowLazyCompilation)
 BOOL_ACCESSORS(SharedFunctionInfo,
                compiler_hints,
@@ -5669,6 +5792,10 @@ void SharedFunctionInfo::set_kind(FunctionKind kind) {
 }
 
 
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, uses_super_property,
+               kUsesSuperProperty)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, uses_super_constructor_call,
+               kUsesSuperConstructorCall)
 BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, native, kNative)
 BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, inline_builtin,
                kInlineBuiltin)
@@ -5684,9 +5811,12 @@ BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_arrow, kIsArrow)
 BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_generator, kIsGenerator)
 BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_concise_method,
                kIsConciseMethod)
+BOOL_ACCESSORS(SharedFunctionInfo, compiler_hints, is_default_constructor,
+               kIsDefaultConstructor)
 
 ACCESSORS(CodeCache, default_cache, FixedArray, kDefaultCacheOffset)
 ACCESSORS(CodeCache, normal_type_cache, Object, kNormalTypeCacheOffset)
+ACCESSORS(CodeCache, weak_cell_cache, Object, kWeakCellCacheOffset)
 
 ACCESSORS(PolymorphicCodeCache, cache, Object, kCacheOffset)
 
@@ -5839,10 +5969,9 @@ void SharedFunctionInfo::set_opt_count(int opt_count) {
 }
 
 
-BailoutReason SharedFunctionInfo::DisableOptimizationReason() {
-  BailoutReason reason = static_cast<BailoutReason>(
+BailoutReason SharedFunctionInfo::disable_optimization_reason() {
+  return static_cast<BailoutReason>(
       DisabledOptimizationReasonBits::decode(opt_count_and_bailout_reason()));
-  return reason;
 }
 
 
@@ -5923,7 +6052,7 @@ bool JSFunction::IsInOptimizationQueue() {
 
 bool JSFunction::IsInobjectSlackTrackingInProgress() {
   return has_initial_map() &&
-      initial_map()->construction_count() != JSFunction::kNoSlackTracking;
+         initial_map()->counter() >= Map::kSlackTrackingCounterEnd;
 }
 
 
@@ -6399,7 +6528,7 @@ JSRegExp::Flags JSRegExp::GetFlags() {
 String* JSRegExp::Pattern() {
   DCHECK(this->data()->IsFixedArray());
   Object* data = this->data();
-  String* pattern= String::cast(FixedArray::cast(data)->get(kSourceIndex));
+  String* pattern = String::cast(FixedArray::cast(data)->get(kSourceIndex));
   return pattern;
 }
 
@@ -6419,7 +6548,7 @@ void JSRegExp::SetDataAt(int index, Object* value) {
 
 ElementsKind JSObject::GetElementsKind() {
   ElementsKind kind = map()->elements_kind();
-#if DEBUG
+#if VERIFY_HEAP && DEBUG
   FixedArrayBase* fixed_array =
       reinterpret_cast<FixedArrayBase*>(READ_FIELD(this, kElementsOffset));
 
@@ -6611,6 +6740,30 @@ uint32_t StringHasher::GetHashCore(uint32_t running_hash) {
 }
 
 
+uint32_t StringHasher::ComputeRunningHash(uint32_t running_hash,
+                                          const uc16* chars, int length) {
+  DCHECK_NOT_NULL(chars);
+  DCHECK(length >= 0);
+  for (int i = 0; i < length; ++i) {
+    running_hash = AddCharacterCore(running_hash, *chars++);
+  }
+  return running_hash;
+}
+
+
+uint32_t StringHasher::ComputeRunningHashOneByte(uint32_t running_hash,
+                                                 const char* chars,
+                                                 int length) {
+  DCHECK_NOT_NULL(chars);
+  DCHECK(length >= 0);
+  for (int i = 0; i < length; ++i) {
+    uint16_t c = static_cast<uint16_t>(*chars++);
+    running_hash = AddCharacterCore(running_hash, c);
+  }
+  return running_hash;
+}
+
+
 void StringHasher::AddCharacter(uint16_t c) {
   // Use the Jenkins one-at-a-time hash function to update the hash
   // for the given character.
@@ -6676,14 +6829,8 @@ uint32_t IteratingStringHasher::Hash(String* string, uint32_t seed) {
   // Nothing to do.
   if (hasher.has_trivial_hash()) return hasher.GetHashField();
   ConsString* cons_string = String::VisitFlat(&hasher, string);
-  // The string was flat.
-  if (cons_string == NULL) return hasher.GetHashField();
-  // This is a ConsString, iterate across it.
-  ConsStringIterator iter(cons_string);
-  int offset;
-  while (NULL != (string = iter.Next(&offset))) {
-    String::VisitFlat(&hasher, string, offset);
-  }
+  if (cons_string == nullptr) return hasher.GetHashField();
+  hasher.VisitConsString(cons_string);
   return hasher.GetHashField();
 }
 
@@ -6887,7 +7034,10 @@ bool AccessorInfo::IsCompatibleReceiver(Object* receiver) {
 
 
 void ExecutableAccessorInfo::clear_setter() {
-  set_setter(GetIsolate()->heap()->undefined_value(), SKIP_WRITE_BARRIER);
+  auto foreign = GetIsolate()->factory()->NewForeign(
+      reinterpret_cast<v8::internal::Address>(
+          reinterpret_cast<intptr_t>(nullptr)));
+  set_setter(*foreign);
 }
 
 
@@ -7263,12 +7413,36 @@ void ExternalTwoByteString::ExternalTwoByteStringIterateBody() {
 }
 
 
+static inline void IterateBodyUsingLayoutDescriptor(HeapObject* object,
+                                                    int start_offset,
+                                                    int end_offset,
+                                                    ObjectVisitor* v) {
+  DCHECK(FLAG_unbox_double_fields);
+  DCHECK(IsAligned(start_offset, kPointerSize) &&
+         IsAligned(end_offset, kPointerSize));
+
+  LayoutDescriptorHelper helper(object->map());
+  DCHECK(!helper.all_fields_tagged());
+
+  for (int offset = start_offset; offset < end_offset; offset += kPointerSize) {
+    // Visit all tagged fields.
+    if (helper.IsTagged(offset)) {
+      v->VisitPointer(HeapObject::RawField(object, offset));
+    }
+  }
+}
+
+
 template<int start_offset, int end_offset, int size>
 void FixedBodyDescriptor<start_offset, end_offset, size>::IterateBody(
     HeapObject* obj,
     ObjectVisitor* v) {
+  if (!FLAG_unbox_double_fields || obj->map()->HasFastPointerLayout()) {
     v->VisitPointers(HeapObject::RawField(obj, start_offset),
                      HeapObject::RawField(obj, end_offset));
+  } else {
+    IterateBodyUsingLayoutDescriptor(obj, start_offset, end_offset, v);
+  }
 }
 
 
@@ -7276,8 +7450,12 @@ template<int start_offset>
 void FlexibleBodyDescriptor<start_offset>::IterateBody(HeapObject* obj,
                                                        int object_size,
                                                        ObjectVisitor* v) {
-  v->VisitPointers(HeapObject::RawField(obj, start_offset),
-                   HeapObject::RawField(obj, object_size));
+  if (!FLAG_unbox_double_fields || obj->map()->HasFastPointerLayout()) {
+    v->VisitPointers(HeapObject::RawField(obj, start_offset),
+                     HeapObject::RawField(obj, object_size));
+  } else {
+    IterateBodyUsingLayoutDescriptor(obj, start_offset, object_size, v);
+  }
 }
 
 
index ba05b47..9805490 100644 (file)
@@ -233,18 +233,24 @@ void JSObject::PrintProperties(std::ostream& os) {  // NOLINT
       switch (descs->GetType(i)) {
         case FIELD: {
           FieldIndex index = FieldIndex::ForDescriptor(map(), i);
-          os << Brief(RawFastPropertyAt(index)) << " (field at offset "
-             << index.property_index() << ")\n";
+          if (IsUnboxedDoubleField(index)) {
+            os << "<unboxed double> " << RawFastDoublePropertyAt(index);
+          } else {
+            os << Brief(RawFastPropertyAt(index));
+          }
+          os << " (field at offset " << index.property_index() << ")\n";
+          break;
+        }
+        case ACCESSOR_FIELD: {
+          FieldIndex index = FieldIndex::ForDescriptor(map(), i);
+          os << " (accessor at offset " << index.property_index() << ")\n";
           break;
         }
         case CONSTANT:
           os << Brief(descs->GetConstant(i)) << " (constant)\n";
           break;
         case CALLBACKS:
-          os << Brief(descs->GetCallbacksObject(i)) << " (callback)\n";
-          break;
-        case NORMAL:  // only in slow mode
-          UNREACHABLE();
+          os << Brief(descs->GetCallbacksObject(i)) << " (callbacks)\n";
           break;
       }
     }
@@ -345,12 +351,6 @@ void JSObject::PrintElements(std::ostream& os) {  // NOLINT
 }
 
 
-void JSObject::PrintTransitions(std::ostream& os) {  // NOLINT
-  if (!map()->HasTransitionArray()) return;
-  map()->transitions()->PrintTransitions(os, false);
-}
-
-
 void JSObject::JSObjectPrint(std::ostream& os) {  // NOLINT
   HeapObject::PrintHeader(os, "JSObject");
   // Don't call GetElementsKind, its validation code can cause the printer to
@@ -413,6 +413,7 @@ void Map::MapPrint(std::ostream& os) {  // NOLINT
   os << "\n - pre-allocated property fields: "
      << pre_allocated_property_fields() << "\n";
   os << " - unused property fields: " << unused_property_fields() << "\n";
+  if (is_deprecated()) os << " - deprecated_map\n";
   if (is_dictionary_map()) os << " - dictionary_map\n";
   if (is_prototype_map()) os << " - prototype_map\n";
   if (is_hidden_prototype()) os << " - hidden_prototype\n";
@@ -421,15 +422,14 @@ void Map::MapPrint(std::ostream& os) {  // NOLINT
   if (is_undetectable()) os << " - undetectable\n";
   if (has_instance_call_handler()) os << " - instance_call_handler\n";
   if (is_access_check_needed()) os << " - access_check_needed\n";
-  if (is_frozen()) {
-    os << " - frozen\n";
-  } else if (!is_extensible()) {
-    os << " - sealed\n";
-  }
+  if (!is_extensible()) os << " - non-extensible\n";
   os << " - back pointer: " << Brief(GetBackPointer());
   os << "\n - instance descriptors " << (owns_descriptors() ? "(own) " : "")
      << "#" << NumberOfOwnDescriptors() << ": "
      << Brief(instance_descriptors());
+  if (FLAG_unbox_double_fields) {
+    os << "\n - layout descriptor: " << Brief(layout_descriptor());
+  }
   if (HasTransitionArray()) {
     os << "\n - transitions: " << Brief(transitions());
   }
@@ -588,21 +588,6 @@ void Name::NamePrint(std::ostream& os) {  // NOLINT
 }
 
 
-// This method is only meant to be called from gdb for debugging purposes.
-// Since the string can also be in two-byte encoding, non-Latin1 characters
-// will be ignored in the output.
-char* String::ToAsciiArray() {
-  // Static so that subsequent calls frees previously allocated space.
-  // This also means that previous results will be overwritten.
-  static char* buffer = NULL;
-  if (buffer != NULL) delete[] buffer;
-  buffer = new char[length()+1];
-  WriteToFlat(this, reinterpret_cast<uint8_t*>(buffer), 0, length());
-  buffer[length()] = 0;
-  return buffer;
-}
-
-
 static const char* const weekdays[] = {
   "???", "Sun", "Mon", "Tue", "Wed", "Thu", "Fri", "Sat"
 };
@@ -843,6 +828,11 @@ void PropertyCell::PropertyCellPrint(std::ostream& os) {  // NOLINT
 
 void WeakCell::WeakCellPrint(std::ostream& os) {  // NOLINT
   HeapObject::PrintHeader(os, "WeakCell");
+  if (cleared()) {
+    os << "\n - cleared";
+  } else {
+    os << "\n - value: " << Brief(value());
+  }
 }
 
 
@@ -1058,6 +1048,98 @@ void BreakPointInfo::BreakPointInfoPrint(std::ostream& os) {  // NOLINT
 }
 
 
+static void PrintBitMask(std::ostream& os, uint32_t value) {  // NOLINT
+  for (int i = 0; i < 32; i++) {
+    if ((i & 7) == 0) os << " ";
+    os << (((value & 1) == 0) ? "_" : "x");
+    value >>= 1;
+  }
+}
+
+
+void LayoutDescriptor::Print() {
+  OFStream os(stdout);
+  this->Print(os);
+  os << std::flush;
+}
+
+
+void LayoutDescriptor::Print(std::ostream& os) {  // NOLINT
+  os << "Layout descriptor: ";
+  if (IsUninitialized()) {
+    os << "<uninitialized>";
+  } else if (IsFastPointerLayout()) {
+    os << "<all tagged>";
+  } else if (IsSmi()) {
+    os << "fast";
+    PrintBitMask(os, static_cast<uint32_t>(Smi::cast(this)->value()));
+  } else {
+    os << "slow";
+    int len = length();
+    for (int i = 0; i < len; i++) {
+      if (i > 0) os << " |";
+      PrintBitMask(os, get_scalar(i));
+    }
+  }
+  os << "\n";
+}
+
+
+#endif  // OBJECT_PRINT
+
+
+#if TRACE_MAPS
+
+
+void Name::NameShortPrint() {
+  if (this->IsString()) {
+    PrintF("%s", String::cast(this)->ToCString().get());
+  } else {
+    DCHECK(this->IsSymbol());
+    Symbol* s = Symbol::cast(this);
+    if (s->name()->IsUndefined()) {
+      PrintF("#<%s>", s->PrivateSymbolToName());
+    } else {
+      PrintF("<%s>", String::cast(s->name())->ToCString().get());
+    }
+  }
+}
+
+
+int Name::NameShortPrint(Vector<char> str) {
+  if (this->IsString()) {
+    return SNPrintF(str, "%s", String::cast(this)->ToCString().get());
+  } else {
+    DCHECK(this->IsSymbol());
+    Symbol* s = Symbol::cast(this);
+    if (s->name()->IsUndefined()) {
+      return SNPrintF(str, "#<%s>", s->PrivateSymbolToName());
+    } else {
+      return SNPrintF(str, "<%s>", String::cast(s->name())->ToCString().get());
+    }
+  }
+}
+
+
+#endif  // TRACE_MAPS
+
+
+#if defined(DEBUG) || defined(OBJECT_PRINT)
+// This method is only meant to be called from gdb for debugging purposes.
+// Since the string can also be in two-byte encoding, non-Latin1 characters
+// will be ignored in the output.
+char* String::ToAsciiArray() {
+  // Static so that subsequent calls frees previously allocated space.
+  // This also means that previous results will be overwritten.
+  static char* buffer = NULL;
+  if (buffer != NULL) delete[] buffer;
+  buffer = new char[length() + 1];
+  WriteToFlat(this, reinterpret_cast<uint8_t*>(buffer), 0, length());
+  buffer[length()] = 0;
+  return buffer;
+}
+
+
 void DescriptorArray::Print() {
   OFStream os(stdout);
   this->PrintDescriptors(os);
@@ -1066,6 +1148,7 @@ void DescriptorArray::Print() {
 
 
 void DescriptorArray::PrintDescriptors(std::ostream& os) {  // NOLINT
+  HandleScope scope(GetIsolate());
   os << "Descriptor array " << number_of_descriptors() << "\n";
   for (int i = 0; i < number_of_descriptors(); i++) {
     Descriptor desc;
@@ -1092,9 +1175,17 @@ void TransitionArray::PrintTransitions(std::ostream& os,
     Name* key = GetKey(i);
     Map* target = GetTarget(i);
     os << "   ";
+#ifdef OBJECT_PRINT
     key->NamePrint(os);
+#else
+    key->ShortPrint(os);
+#endif
     os << ": ";
-    if (key == GetHeap()->frozen_symbol()) {
+    if (key == GetHeap()->nonextensible_symbol()) {
+      os << " (transition to non-extensible)";
+    } else if (key == GetHeap()->sealed_symbol()) {
+      os << " (transition to sealed)";
+    } else if (key == GetHeap()->frozen_symbol()) {
       os << " (transition to frozen)";
     } else if (key == GetHeap()->elements_transition_symbol()) {
       os << " (transition to " << ElementsKindToString(target->elements_kind())
@@ -1103,30 +1194,24 @@ void TransitionArray::PrintTransitions(std::ostream& os,
       os << " (transition to Object.observe)";
     } else {
       PropertyDetails details = GetTargetDetails(key, target);
-      switch (details.type()) {
-        case FIELD: {
-          os << " (transition to field)";
-          break;
-        }
-        case CONSTANT:
-          os << " (transition to constant " << Brief(GetTargetValue(i)) << ")";
-          break;
-        case CALLBACKS:
-          os << " (transition to callback " << Brief(GetTargetValue(i)) << ")";
-          break;
-        // Values below are never in the target descriptor array.
-        case NORMAL:
-          UNREACHABLE();
-          break;
+      os << " (transition to ";
+      if (details.location() == IN_DESCRIPTOR) {
+        os << "immutable ";
+      }
+      os << (details.kind() == DATA ? "data" : "accessor");
+      if (details.location() == IN_DESCRIPTOR) {
+        os << " " << Brief(GetTargetValue(i));
       }
-      os << ", attrs: " << details.attributes();
+      os << "), attrs: " << details.attributes();
     }
     os << " -> " << Brief(target) << "\n";
   }
 }
 
 
-#endif  // OBJECT_PRINT
-
-
+void JSObject::PrintTransitions(std::ostream& os) {  // NOLINT
+  if (!map()->HasTransitionArray()) return;
+  map()->transitions()->PrintTransitions(os, false);
+}
+#endif  // defined(DEBUG) || defined(OBJECT_PRINT)
 } }  // namespace v8::internal
index 258390c..414306f 100644 (file)
@@ -708,6 +708,13 @@ Handle<Object> JSObject::DeleteNormalizedProperty(Handle<JSObject> object,
         // the hole value.
         Handle<Map> new_map = Map::CopyDropDescriptors(handle(object->map()));
         DCHECK(new_map->is_dictionary_map());
+#if TRACE_MAPS
+        if (FLAG_trace_maps) {
+          PrintF("[TraceMaps: GlobalDeleteNormalized from= %p to= %p ]\n",
+                 reinterpret_cast<void*>(object->map()),
+                 reinterpret_cast<void*>(*new_map));
+        }
+#endif
         JSObject::MigrateToMap(object, new_map);
       }
       Handle<PropertyCell> cell(PropertyCell::cast(dictionary->ValueAt(entry)));
@@ -976,6 +983,9 @@ void Object::ShortPrint(StringStream* accumulator) {
 }
 
 
+void Object::ShortPrint(std::ostream& os) { os << Brief(this); }
+
+
 std::ostream& operator<<(std::ostream& os, const Brief& v) {
   if (v.value->IsSmi()) {
     Smi::cast(v.value)->SmiPrint(os);
@@ -1570,6 +1580,14 @@ void HeapObject::HeapObjectShortPrint(std::ostream& os) {  // NOLINT
       os << accumulator.ToCString().get();
       break;
     }
+    case WEAK_CELL_TYPE: {
+      os << "WeakCell for ";
+      HeapStringAllocator allocator;
+      StringStream accumulator(&allocator);
+      WeakCell::cast(this)->value()->ShortPrint(&accumulator);
+      os << accumulator.ToCString().get();
+      break;
+    }
     default:
       os << "<Other heap object (" << map()->instance_type() << ")>";
       break;
@@ -1829,7 +1847,7 @@ void JSObject::AddSlowProperty(Handle<JSObject> object,
       // Assign an enumeration index to the property and update
       // SetNextEnumerationIndex.
       int index = dict->NextEnumerationIndex();
-      PropertyDetails details = PropertyDetails(attributes, NORMAL, index);
+      PropertyDetails details(attributes, FIELD, index);
       dict->SetNextEnumerationIndex(index + 1);
       dict->SetEntry(entry, name, cell, details);
       return;
@@ -1838,7 +1856,7 @@ void JSObject::AddSlowProperty(Handle<JSObject> object,
     PropertyCell::SetValueInferType(cell, value);
     value = cell;
   }
-  PropertyDetails details = PropertyDetails(attributes, NORMAL, 0);
+  PropertyDetails details(attributes, FIELD, 0);
   Handle<NameDictionary> result =
       NameDictionary::Add(dict, name, value, details);
   if (*dict != *result) object->set_properties(*result);
@@ -1946,7 +1964,8 @@ void JSObject::MigrateToMap(Handle<JSObject> object, Handle<Map> new_map) {
         // Clear out the old descriptor array to avoid problems to sharing
         // the descriptor array without using an explicit.
         old_map->InitializeDescriptors(
-            old_map->GetHeap()->empty_descriptor_array());
+            old_map->GetHeap()->empty_descriptor_array(),
+            LayoutDescriptor::FastPointerLayout());
         // Ensure that no transition was inserted for prototype migrations.
         DCHECK(!old_map->HasTransitionArray());
         DCHECK(new_map->GetBackPointer()->IsUndefined());
@@ -2005,10 +2024,14 @@ void JSObject::MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
 
     if (old_map->unused_property_fields() > 0) {
       if (details.representation().IsDouble()) {
-        Handle<Object> value = isolate->factory()->NewHeapNumber(0, MUTABLE);
         FieldIndex index =
             FieldIndex::ForDescriptor(*new_map, new_map->LastAdded());
-        object->FastPropertyAtPut(index, *value);
+        if (new_map->IsUnboxedDoubleField(index)) {
+          object->RawFastDoublePropertyAtPut(index, 0);
+        } else {
+          Handle<Object> value = isolate->factory()->NewHeapNumber(0, MUTABLE);
+          object->RawFastPropertyAtPut(index, *value);
+        }
       }
       object->synchronized_set_map(*new_map);
       return;
@@ -2060,23 +2083,35 @@ void JSObject::MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
       DCHECK(details.representation().IsTagged());
       continue;
     }
+    Representation old_representation = old_details.representation();
+    Representation representation = details.representation();
     DCHECK(old_details.type() == CONSTANT ||
            old_details.type() == FIELD);
-    Object* raw_value = old_details.type() == CONSTANT
-        ? old_descriptors->GetValue(i)
-        : object->RawFastPropertyAt(FieldIndex::ForDescriptor(*old_map, i));
-    Handle<Object> value(raw_value, isolate);
-    if (!old_details.representation().IsDouble() &&
-        details.representation().IsDouble()) {
-      if (old_details.representation().IsNone()) {
-        value = handle(Smi::FromInt(0), isolate);
+    Handle<Object> value;
+    if (old_details.type() == CONSTANT) {
+      value = handle(old_descriptors->GetValue(i), isolate);
+      DCHECK(!old_representation.IsDouble() && !representation.IsDouble());
+    } else {
+      FieldIndex index = FieldIndex::ForDescriptor(*old_map, i);
+      if (object->IsUnboxedDoubleField(index)) {
+        double old = object->RawFastDoublePropertyAt(index);
+        value = isolate->factory()->NewHeapNumber(
+            old, representation.IsDouble() ? MUTABLE : IMMUTABLE);
+
+      } else {
+        value = handle(object->RawFastPropertyAt(index), isolate);
+        if (!old_representation.IsDouble() && representation.IsDouble()) {
+          if (old_representation.IsNone()) {
+            value = handle(Smi::FromInt(0), isolate);
+          }
+          value = Object::NewStorageFor(isolate, value, representation);
+        } else if (old_representation.IsDouble() &&
+                   !representation.IsDouble()) {
+          value = Object::WrapForRead(isolate, value, old_representation);
+        }
       }
-      value = Object::NewStorageFor(isolate, value, details.representation());
-    } else if (old_details.representation().IsDouble() &&
-               !details.representation().IsDouble()) {
-      value = Object::WrapForRead(isolate, value, old_details.representation());
     }
-    DCHECK(!(details.representation().IsDouble() && value->IsSmi()));
+    DCHECK(!(representation.IsDouble() && value->IsSmi()));
     int target_index = new_descriptors->GetFieldIndex(i) - inobject;
     if (target_index < 0) target_index += total_size;
     array->set(target_index, *value);
@@ -2104,7 +2139,16 @@ void JSObject::MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
   int limit = Min(inobject, number_of_fields);
   for (int i = 0; i < limit; i++) {
     FieldIndex index = FieldIndex::ForPropertyIndex(*new_map, i);
-    object->FastPropertyAtPut(index, array->get(external + i));
+    Object* value = array->get(external + i);
+    // Can't use JSObject::FastPropertyAtPut() because proper map was not set
+    // yet.
+    if (new_map->IsUnboxedDoubleField(index)) {
+      DCHECK(value->IsMutableHeapNumber());
+      object->RawFastDoublePropertyAtPut(index,
+                                         HeapNumber::cast(value)->value());
+    } else {
+      object->RawFastPropertyAtPut(index, value);
+    }
   }
 
   Heap* heap = isolate->heap();
@@ -2150,20 +2194,27 @@ Handle<Map> Map::CopyGeneralizeAllRepresentations(Handle<Map> map,
                                                   PropertyAttributes attributes,
                                                   const char* reason) {
   Isolate* isolate = map->GetIsolate();
-  Handle<Map> new_map = Copy(map);
+  Handle<DescriptorArray> old_descriptors(map->instance_descriptors(), isolate);
+  int number_of_own_descriptors = map->NumberOfOwnDescriptors();
+  Handle<DescriptorArray> descriptors =
+      DescriptorArray::CopyUpTo(old_descriptors, number_of_own_descriptors);
 
-  DescriptorArray* descriptors = new_map->instance_descriptors();
-  int length = descriptors->number_of_descriptors();
-  for (int i = 0; i < length; i++) {
+  for (int i = 0; i < number_of_own_descriptors; i++) {
     descriptors->SetRepresentation(i, Representation::Tagged());
     if (descriptors->GetDetails(i).type() == FIELD) {
       descriptors->SetValue(i, HeapType::Any());
     }
   }
 
+  Handle<LayoutDescriptor> new_layout_descriptor(
+      LayoutDescriptor::FastPointerLayout(), isolate);
+  Handle<Map> new_map = CopyReplaceDescriptors(
+      map, descriptors, new_layout_descriptor, OMIT_TRANSITION,
+      MaybeHandle<Name>(), reason, SPECIAL_TRANSITION);
+
   // Unless the instance is being migrated, ensure that modify_index is a field.
   PropertyDetails details = descriptors->GetDetails(modify_index);
-  if (store_mode == FORCE_FIELD &&
+  if (store_mode == FORCE_IN_OBJECT &&
       (details.type() != FIELD || details.attributes() != attributes)) {
     int field_index = details.type() == FIELD ? details.field_index()
                                               : new_map->NumberOfFields();
@@ -2185,12 +2236,12 @@ Handle<Map> Map::CopyGeneralizeAllRepresentations(Handle<Map> map,
     HeapType* field_type = (details.type() == FIELD)
         ? map->instance_descriptors()->GetFieldType(modify_index)
         : NULL;
-    map->PrintGeneralization(stdout, reason, modify_index,
-                        new_map->NumberOfOwnDescriptors(),
-                        new_map->NumberOfOwnDescriptors(),
-                        details.type() == CONSTANT && store_mode == FORCE_FIELD,
-                        details.representation(), Representation::Tagged(),
-                        field_type, HeapType::Any());
+    map->PrintGeneralization(
+        stdout, reason, modify_index, new_map->NumberOfOwnDescriptors(),
+        new_map->NumberOfOwnDescriptors(),
+        details.type() == CONSTANT && store_mode == FORCE_IN_OBJECT,
+        details.representation(), Representation::Tagged(), field_type,
+        HeapType::Any());
   }
   return new_map;
 }
@@ -2226,32 +2277,37 @@ void Map::DeprecateTransitionTree() {
 // Invalidates a transition target at |key|, and installs |new_descriptors| over
 // the current instance_descriptors to ensure proper sharing of descriptor
 // arrays.
-void Map::DeprecateTarget(PropertyType type, Name* key,
+// Returns true if the transition target at given key was deprecated.
+bool Map::DeprecateTarget(PropertyKind kind, Name* key,
                           PropertyAttributes attributes,
-                          DescriptorArray* new_descriptors) {
+                          DescriptorArray* new_descriptors,
+                          LayoutDescriptor* new_layout_descriptor) {
+  bool transition_target_deprecated = false;
   if (HasTransitionArray()) {
     TransitionArray* transitions = this->transitions();
-    int transition = transitions->Search(type, key, attributes);
+    int transition = transitions->Search(kind, key, attributes);
     if (transition != TransitionArray::kNotFound) {
       transitions->GetTarget(transition)->DeprecateTransitionTree();
+      transition_target_deprecated = true;
     }
   }
 
   // Don't overwrite the empty descriptor array.
-  if (NumberOfOwnDescriptors() == 0) return;
+  if (NumberOfOwnDescriptors() == 0) return transition_target_deprecated;
 
   DescriptorArray* to_replace = instance_descriptors();
   Map* current = this;
   GetHeap()->incremental_marking()->RecordWrites(to_replace);
   while (current->instance_descriptors() == to_replace) {
     current->SetEnumLength(kInvalidEnumCacheSentinel);
-    current->set_instance_descriptors(new_descriptors);
+    current->UpdateDescriptors(new_descriptors, new_layout_descriptor);
     Object* next = current->GetBackPointer();
     if (next->IsUndefined()) break;
     current = Map::cast(next);
   }
 
   set_owns_descriptors(false);
+  return transition_target_deprecated;
 }
 
 
@@ -2281,7 +2337,7 @@ Map* Map::FindLastMatchMap(int verbatim,
     PropertyDetails details = descriptors->GetDetails(i);
     TransitionArray* transitions = current->transitions();
     int transition =
-        transitions->Search(details.type(), name, details.attributes());
+        transitions->Search(details.kind(), name, details.attributes());
     if (transition == TransitionArray::kNotFound) break;
 
     Map* next = transitions->GetTarget(transition);
@@ -2320,6 +2376,7 @@ Map* Map::FindFieldOwner(int descriptor) {
 
 
 void Map::UpdateFieldType(int descriptor, Handle<Name> name,
+                          Representation new_representation,
                           Handle<HeapType> new_type) {
   DisallowHeapAllocation no_allocation;
   PropertyDetails details = instance_descriptors()->GetDetails(descriptor);
@@ -2327,13 +2384,18 @@ void Map::UpdateFieldType(int descriptor, Handle<Name> name,
   if (HasTransitionArray()) {
     TransitionArray* transitions = this->transitions();
     for (int i = 0; i < transitions->number_of_transitions(); ++i) {
-      transitions->GetTarget(i)->UpdateFieldType(descriptor, name, new_type);
+      transitions->GetTarget(i)
+          ->UpdateFieldType(descriptor, name, new_representation, new_type);
     }
   }
+  // It is allowed to change representation here only from None to something.
+  DCHECK(details.representation().Equals(new_representation) ||
+         details.representation().IsNone());
+
   // Skip if already updated the shared descriptor.
   if (instance_descriptors()->GetFieldType(descriptor) == *new_type) return;
   FieldDescriptor d(name, instance_descriptors()->GetFieldIndex(descriptor),
-                    new_type, details.attributes(), details.representation());
+                    new_type, details.attributes(), new_representation);
   instance_descriptors()->Replace(descriptor, &d);
 }
 
@@ -2359,15 +2421,20 @@ Handle<HeapType> Map::GeneralizeFieldType(Handle<HeapType> type1,
 
 
 // static
-void Map::GeneralizeFieldType(Handle<Map> map,
-                              int modify_index,
+void Map::GeneralizeFieldType(Handle<Map> map, int modify_index,
+                              Representation new_representation,
                               Handle<HeapType> new_field_type) {
   Isolate* isolate = map->GetIsolate();
 
   // Check if we actually need to generalize the field type at all.
-  Handle<HeapType> old_field_type(
-      map->instance_descriptors()->GetFieldType(modify_index), isolate);
-  if (new_field_type->NowIs(old_field_type)) {
+  Handle<DescriptorArray> old_descriptors(map->instance_descriptors(), isolate);
+  Representation old_representation =
+      old_descriptors->GetDetails(modify_index).representation();
+  Handle<HeapType> old_field_type(old_descriptors->GetFieldType(modify_index),
+                                  isolate);
+
+  if (old_representation.Equals(new_representation) &&
+      new_field_type->NowIs(old_field_type)) {
     DCHECK(Map::GeneralizeFieldType(old_field_type,
                                     new_field_type,
                                     isolate)->NowIs(old_field_type));
@@ -2386,7 +2453,8 @@ void Map::GeneralizeFieldType(Handle<Map> map,
 
   PropertyDetails details = descriptors->GetDetails(modify_index);
   Handle<Name> name(descriptors->GetKey(modify_index));
-  field_owner->UpdateFieldType(modify_index, name, new_field_type);
+  field_owner->UpdateFieldType(modify_index, name, new_representation,
+                               new_field_type);
   field_owner->dependent_code()->DeoptimizeDependentCodeGroup(
       isolate, DependentCode::kFieldTypeGroup);
 
@@ -2437,12 +2505,9 @@ Handle<Map> Map::GeneralizeRepresentation(Handle<Map> old_map,
   // modification to the object, because the default uninitialized value for
   // representation None can be overwritten by both smi and tagged values.
   // Doubles, however, would require a box allocation.
-  if (old_representation.IsNone() &&
-      !new_representation.IsNone() &&
+  if (old_representation.IsNone() && !new_representation.IsNone() &&
       !new_representation.IsDouble()) {
     DCHECK(old_details.type() == FIELD);
-    DCHECK(old_descriptors->GetFieldType(modify_index)->NowIs(
-            HeapType::None()));
     if (FLAG_trace_generalization) {
       old_map->PrintGeneralization(
           stdout, "uninitialized field",
@@ -2451,33 +2516,38 @@ Handle<Map> Map::GeneralizeRepresentation(Handle<Map> old_map,
           old_representation, new_representation,
           old_descriptors->GetFieldType(modify_index), *new_field_type);
     }
-    old_descriptors->SetRepresentation(modify_index, new_representation);
-    old_descriptors->SetValue(modify_index, *new_field_type);
+    Handle<Map> field_owner(old_map->FindFieldOwner(modify_index), isolate);
+
+    GeneralizeFieldType(field_owner, modify_index, new_representation,
+                        new_field_type);
+    DCHECK(old_descriptors->GetDetails(modify_index).representation().Equals(
+        new_representation));
+    DCHECK(old_descriptors->GetFieldType(modify_index)->NowIs(new_field_type));
     return old_map;
   }
 
   // Check the state of the root map.
   Handle<Map> root_map(old_map->FindRootMap(), isolate);
   if (!old_map->EquivalentToForTransition(*root_map)) {
-    return CopyGeneralizeAllRepresentations(
-        old_map, modify_index, store_mode, "not equivalent");
+    return CopyGeneralizeAllRepresentations(old_map, modify_index, store_mode,
+                                            "GenAll_NotEquivalent");
   }
   int root_nof = root_map->NumberOfOwnDescriptors();
   if (modify_index < root_nof) {
     PropertyDetails old_details = old_descriptors->GetDetails(modify_index);
-    if ((old_details.type() != FIELD && store_mode == FORCE_FIELD) ||
+    if ((old_details.type() != FIELD && store_mode == FORCE_IN_OBJECT) ||
         (old_details.type() == FIELD &&
          (!new_field_type->NowIs(old_descriptors->GetFieldType(modify_index)) ||
           !new_representation.fits_into(old_details.representation())))) {
-      return CopyGeneralizeAllRepresentations(
-          old_map, modify_index, store_mode, "root modification");
+      return CopyGeneralizeAllRepresentations(old_map, modify_index, store_mode,
+                                              "GenAll_RootModification");
     }
   }
 
   Handle<Map> target_map = root_map;
   for (int i = root_nof; i < old_nof; ++i) {
     PropertyDetails old_details = old_descriptors->GetDetails(i);
-    int j = target_map->SearchTransition(old_details.type(),
+    int j = target_map->SearchTransition(old_details.kind(),
                                          old_descriptors->GetKey(i),
                                          old_details.attributes());
     if (j == TransitionArray::kNotFound) break;
@@ -2493,8 +2563,8 @@ Handle<Map> Map::GeneralizeRepresentation(Handle<Map> old_map,
     if ((tmp_type == CALLBACKS || old_type == CALLBACKS) &&
         (tmp_type != old_type ||
          tmp_descriptors->GetValue(i) != old_descriptors->GetValue(i))) {
-      return CopyGeneralizeAllRepresentations(
-          old_map, modify_index, store_mode, "incompatible");
+      return CopyGeneralizeAllRepresentations(old_map, modify_index, store_mode,
+                                              "GenAll_Incompatible");
     }
     Representation old_representation = old_details.representation();
     Representation tmp_representation = tmp_details.representation();
@@ -2513,7 +2583,7 @@ Handle<Map> Map::GeneralizeRepresentation(Handle<Map> old_map,
         old_field_type = GeneralizeFieldType(
             new_field_type, old_field_type, isolate);
       }
-      GeneralizeFieldType(tmp_map, i, old_field_type);
+      GeneralizeFieldType(tmp_map, i, tmp_representation, old_field_type);
     } else if (tmp_type == CONSTANT) {
       if (old_type != CONSTANT ||
           old_descriptors->GetConstant(i) != tmp_descriptors->GetConstant(i)) {
@@ -2531,7 +2601,7 @@ Handle<Map> Map::GeneralizeRepresentation(Handle<Map> old_map,
       target_map->instance_descriptors(), isolate);
   int target_nof = target_map->NumberOfOwnDescriptors();
   if (target_nof == old_nof &&
-      (store_mode != FORCE_FIELD ||
+      (store_mode != FORCE_IN_OBJECT ||
        target_descriptors->GetDetails(modify_index).type() == FIELD)) {
     DCHECK(modify_index < target_nof);
     DCHECK(new_representation.fits_into(
@@ -2545,7 +2615,7 @@ Handle<Map> Map::GeneralizeRepresentation(Handle<Map> old_map,
   // Find the last compatible target map in the transition tree.
   for (int i = target_nof; i < old_nof; ++i) {
     PropertyDetails old_details = old_descriptors->GetDetails(i);
-    int j = target_map->SearchTransition(old_details.type(),
+    int j = target_map->SearchTransition(old_details.kind(),
                                          old_descriptors->GetKey(i),
                                          old_details.attributes());
     if (j == TransitionArray::kNotFound) break;
@@ -2559,8 +2629,8 @@ Handle<Map> Map::GeneralizeRepresentation(Handle<Map> old_map,
     if ((tmp_details.type() == CALLBACKS || old_details.type() == CALLBACKS) &&
         (tmp_details.type() != old_details.type() ||
          tmp_descriptors->GetValue(i) != old_descriptors->GetValue(i))) {
-      return CopyGeneralizeAllRepresentations(
-          old_map, modify_index, store_mode, "incompatible");
+      return CopyGeneralizeAllRepresentations(old_map, modify_index, store_mode,
+                                              "GenAll_Incompatible");
     }
     target_map = tmp_map;
   }
@@ -2584,7 +2654,9 @@ Handle<Map> Map::GeneralizeRepresentation(Handle<Map> old_map,
   int current_offset = 0;
   for (int i = 0; i < root_nof; ++i) {
     PropertyDetails old_details = old_descriptors->GetDetails(i);
-    if (old_details.type() == FIELD) current_offset++;
+    if (old_details.type() == FIELD) {
+      current_offset += old_details.field_width_in_words();
+    }
     Descriptor d(handle(old_descriptors->GetKey(i), isolate),
                  handle(old_descriptors->GetValue(i), isolate),
                  old_details);
@@ -2604,9 +2676,8 @@ Handle<Map> Map::GeneralizeRepresentation(Handle<Map> old_map,
           new_representation.generalize(target_details.representation()));
     }
     DCHECK_EQ(old_details.attributes(), target_details.attributes());
-    if (old_details.type() == FIELD ||
-        target_details.type() == FIELD ||
-        (modify_index == i && store_mode == FORCE_FIELD) ||
+    if (old_details.type() == FIELD || target_details.type() == FIELD ||
+        (modify_index == i && store_mode == FORCE_IN_OBJECT) ||
         (target_descriptors->GetValue(i) != old_descriptors->GetValue(i))) {
       Handle<HeapType> old_field_type = (old_details.type() == FIELD)
           ? handle(old_descriptors->GetFieldType(i), isolate)
@@ -2622,11 +2693,10 @@ Handle<Map> Map::GeneralizeRepresentation(Handle<Map> old_map,
         target_field_type = GeneralizeFieldType(
             target_field_type, new_field_type, isolate);
       }
-      FieldDescriptor d(target_key,
-                        current_offset++,
-                        target_field_type,
+      FieldDescriptor d(target_key, current_offset, target_field_type,
                         target_details.attributes(),
                         target_details.representation());
+      current_offset += d.GetDetails().field_width_in_words();
       new_descriptors->Set(i, &d);
     } else {
       DCHECK_NE(FIELD, target_details.type());
@@ -2652,23 +2722,20 @@ Handle<Map> Map::GeneralizeRepresentation(Handle<Map> old_map,
         old_field_type = GeneralizeFieldType(
             old_field_type, new_field_type, isolate);
       }
-      FieldDescriptor d(old_key,
-                        current_offset++,
-                        old_field_type,
-                        old_details.attributes(),
-                        old_details.representation());
+      FieldDescriptor d(old_key, current_offset, old_field_type,
+                        old_details.attributes(), old_details.representation());
+      current_offset += d.GetDetails().field_width_in_words();
       new_descriptors->Set(i, &d);
     } else {
       DCHECK(old_details.type() == CONSTANT || old_details.type() == CALLBACKS);
-      if (modify_index == i && store_mode == FORCE_FIELD) {
-        FieldDescriptor d(old_key,
-                          current_offset++,
-                          GeneralizeFieldType(
-                              old_descriptors->GetValue(i)->OptimalType(
-                                  isolate, old_details.representation()),
-                              new_field_type, isolate),
-                          old_details.attributes(),
-                          old_details.representation());
+      if (modify_index == i && store_mode == FORCE_IN_OBJECT) {
+        FieldDescriptor d(
+            old_key, current_offset,
+            GeneralizeFieldType(old_descriptors->GetValue(i)->OptimalType(
+                                    isolate, old_details.representation()),
+                                new_field_type, isolate),
+            old_details.attributes(), old_details.representation());
+        current_offset += d.GetDetails().field_width_in_words();
         new_descriptors->Set(i, &d);
       } else {
         DCHECK_NE(FIELD, old_details.type());
@@ -2682,7 +2749,7 @@ Handle<Map> Map::GeneralizeRepresentation(Handle<Map> old_map,
 
   new_descriptors->Sort();
 
-  DCHECK(store_mode != FORCE_FIELD ||
+  DCHECK(store_mode != FORCE_IN_OBJECT ||
          new_descriptors->GetDetails(modify_index).type() == FIELD);
 
   Handle<Map> split_map(root_map->FindLastMatchMap(
@@ -2690,10 +2757,21 @@ Handle<Map> Map::GeneralizeRepresentation(Handle<Map> old_map,
   int split_nof = split_map->NumberOfOwnDescriptors();
   DCHECK_NE(old_nof, split_nof);
 
+  Handle<LayoutDescriptor> new_layout_descriptor =
+      LayoutDescriptor::New(split_map, new_descriptors, old_nof);
   PropertyDetails split_prop_details = old_descriptors->GetDetails(split_nof);
-  split_map->DeprecateTarget(split_prop_details.type(),
-                             old_descriptors->GetKey(split_nof),
-                             split_prop_details.attributes(), *new_descriptors);
+  bool transition_target_deprecated = split_map->DeprecateTarget(
+      split_prop_details.kind(), old_descriptors->GetKey(split_nof),
+      split_prop_details.attributes(), *new_descriptors,
+      *new_layout_descriptor);
+
+  // If |transition_target_deprecated| is true then the transition array
+  // already contains entry for given descriptor. This means that the transition
+  // could be inserted regardless of whether transitions array is full or not.
+  if (!transition_target_deprecated && !split_map->CanHaveMoreTransitions()) {
+    return CopyGeneralizeAllRepresentations(old_map, modify_index, store_mode,
+                                            "GenAll_CantHaveMoreTransitions");
+  }
 
   if (FLAG_trace_generalization) {
     PropertyDetails old_details = old_descriptors->GetDetails(modify_index);
@@ -2708,7 +2786,7 @@ Handle<Map> Map::GeneralizeRepresentation(Handle<Map> old_map,
                                     isolate), isolate);
     old_map->PrintGeneralization(
         stdout, "", modify_index, split_nof, old_nof,
-        old_details.type() == CONSTANT && store_mode == FORCE_FIELD,
+        old_details.type() == CONSTANT && store_mode == FORCE_IN_OBJECT,
         old_details.representation(), new_details.representation(),
         *old_field_type, *new_field_type);
   }
@@ -2716,11 +2794,8 @@ Handle<Map> Map::GeneralizeRepresentation(Handle<Map> old_map,
   // Add missing transitions.
   Handle<Map> new_map = split_map;
   for (int i = split_nof; i < old_nof; ++i) {
-    if (!new_map->CanHaveMoreTransitions()) {
-      return CopyGeneralizeAllRepresentations(old_map, modify_index, store_mode,
-                                              "can't have more transitions");
-    }
-    new_map = CopyInstallDescriptors(new_map, i, new_descriptors);
+    new_map = CopyInstallDescriptors(new_map, i, new_descriptors,
+                                     new_layout_descriptor);
   }
   new_map->set_owns_descriptors(true);
   return new_map;
@@ -2735,7 +2810,7 @@ Handle<Map> Map::GeneralizeAllFieldRepresentations(
     if (descriptors->GetDetails(i).type() == FIELD) {
       map = GeneralizeRepresentation(map, i, Representation::Tagged(),
                                      HeapType::Any(map->GetIsolate()),
-                                     FORCE_FIELD);
+                                     FORCE_IN_OBJECT);
     }
   }
   return map;
@@ -2761,7 +2836,7 @@ Handle<Map> Map::Update(Handle<Map> map) {
   if (!map->is_deprecated()) return map;
   return GeneralizeRepresentation(map, 0, Representation::None(),
                                   HeapType::None(map->GetIsolate()),
-                                  ALLOW_AS_CONSTANT);
+                                  ALLOW_IN_DESCRIPTOR);
 }
 
 
@@ -2783,7 +2858,7 @@ MaybeHandle<Map> Map::TryUpdateInternal(Handle<Map> old_map) {
   Map* new_map = root_map;
   for (int i = root_nof; i < old_nof; ++i) {
     PropertyDetails old_details = old_descriptors->GetDetails(i);
-    int j = new_map->SearchTransition(old_details.type(),
+    int j = new_map->SearchTransition(old_details.kind(),
                                       old_descriptors->GetKey(i),
                                       old_details.attributes());
     if (j == TransitionArray::kNotFound) return MaybeHandle<Map>();
@@ -2791,35 +2866,38 @@ MaybeHandle<Map> Map::TryUpdateInternal(Handle<Map> old_map) {
     DescriptorArray* new_descriptors = new_map->instance_descriptors();
 
     PropertyDetails new_details = new_descriptors->GetDetails(i);
-    if (old_details.attributes() != new_details.attributes() ||
-        !old_details.representation().fits_into(new_details.representation())) {
+    DCHECK_EQ(old_details.kind(), new_details.kind());
+    DCHECK_EQ(old_details.attributes(), new_details.attributes());
+    if (!old_details.representation().fits_into(new_details.representation())) {
       return MaybeHandle<Map>();
     }
-    PropertyType new_type = new_details.type();
-    PropertyType old_type = old_details.type();
     Object* new_value = new_descriptors->GetValue(i);
     Object* old_value = old_descriptors->GetValue(i);
-    switch (new_type) {
-      case FIELD:
-        if ((old_type == FIELD &&
-             !HeapType::cast(old_value)->NowIs(HeapType::cast(new_value))) ||
-            (old_type == CONSTANT &&
-             !HeapType::cast(new_value)->NowContains(old_value)) ||
-            (old_type == CALLBACKS &&
-             !HeapType::Any()->Is(HeapType::cast(new_value)))) {
-          return MaybeHandle<Map>();
+    switch (new_details.type()) {
+      case FIELD: {
+        PropertyType old_type = old_details.type();
+        if (old_type == FIELD) {
+          if (!HeapType::cast(old_value)->NowIs(HeapType::cast(new_value))) {
+            return MaybeHandle<Map>();
+          }
+        } else {
+          DCHECK(old_type == CONSTANT);
+          if (!HeapType::cast(new_value)->NowContains(old_value)) {
+            return MaybeHandle<Map>();
+          }
         }
         break;
+      }
+      case ACCESSOR_FIELD:
+        DCHECK(HeapType::Any()->Is(HeapType::cast(new_value)));
+        break;
 
       case CONSTANT:
       case CALLBACKS:
-        if (old_type != new_type || old_value != new_value) {
+        if (old_details.location() == IN_OBJECT || old_value != new_value) {
           return MaybeHandle<Map>();
         }
         break;
-
-      case NORMAL:
-        UNREACHABLE();
     }
   }
   if (new_map->NumberOfOwnDescriptors() != old_nof) return MaybeHandle<Map>();
@@ -2829,22 +2907,23 @@ MaybeHandle<Map> Map::TryUpdateInternal(Handle<Map> old_map) {
 
 MaybeHandle<Object> JSObject::SetPropertyWithInterceptor(LookupIterator* it,
                                                          Handle<Object> value) {
-  // TODO(rossberg): Support symbols in the API.
-  if (it->name()->IsSymbol()) return value;
-
-  Handle<String> name_string = Handle<String>::cast(it->name());
+  Handle<Name> name = it->name();
   Handle<JSObject> holder = it->GetHolder<JSObject>();
   Handle<InterceptorInfo> interceptor(holder->GetNamedInterceptor());
-  if (interceptor->setter()->IsUndefined()) return MaybeHandle<Object>();
+  if (interceptor->setter()->IsUndefined() ||
+      (name->IsSymbol() && !interceptor->can_intercept_symbols())) {
+    return MaybeHandle<Object>();
+  }
 
   LOG(it->isolate(),
-      ApiNamedPropertyAccess("interceptor-named-set", *holder, *name_string));
+      ApiNamedPropertyAccess("interceptor-named-set", *holder, *name));
   PropertyCallbackArguments args(it->isolate(), interceptor->data(), *holder,
                                  *holder);
-  v8::NamedPropertySetterCallback setter =
-      v8::ToCData<v8::NamedPropertySetterCallback>(interceptor->setter());
-  v8::Handle<v8::Value> result = args.Call(
-      setter, v8::Utils::ToLocal(name_string), v8::Utils::ToLocal(value));
+  v8::GenericNamedPropertySetterCallback setter =
+      v8::ToCData<v8::GenericNamedPropertySetterCallback>(
+          interceptor->setter());
+  v8::Handle<v8::Value> result =
+      args.Call(setter, v8::Utils::ToLocal(name), v8::Utils::ToLocal(value));
   RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(it->isolate(), Object);
   if (!result.IsEmpty()) return value;
 
@@ -3012,9 +3091,8 @@ MaybeHandle<Object> Object::SetDataProperty(LookupIterator* it,
   // Old value for the observation change record.
   // Fetch before transforming the object since the encoding may become
   // incompatible with what's cached in |it|.
-  bool is_observed =
-      receiver->map()->is_observed() &&
-      !it->name().is_identical_to(it->factory()->hidden_string());
+  bool is_observed = receiver->map()->is_observed() &&
+                     !it->isolate()->IsInternallyUsedPropertyName(it->name());
   MaybeHandle<Object> maybe_old;
   if (is_observed) maybe_old = it->GetDataValue();
 
@@ -3023,7 +3101,7 @@ MaybeHandle<Object> Object::SetDataProperty(LookupIterator* it,
   it->PrepareForDataProperty(value);
 
   // Write the property value.
-  it->WriteDataValue(value);
+  value = it->WriteDataValue(value);
 
   // Send the change record if there are observers.
   if (is_observed && !value->SameValue(*maybe_old.ToHandleChecked())) {
@@ -3079,12 +3157,12 @@ MaybeHandle<Object> Object::AddDataProperty(LookupIterator* it,
     JSObject::AddSlowProperty(receiver, it->name(), value, attributes);
   } else {
     // Write the property value.
-    it->WriteDataValue(value);
+    value = it->WriteDataValue(value);
   }
 
   // Send the change record if there are observers.
   if (receiver->map()->is_observed() &&
-      !it->name().is_identical_to(it->factory()->hidden_string())) {
+      !it->isolate()->IsInternallyUsedPropertyName(it->name())) {
     RETURN_ON_EXCEPTION(it->isolate(), JSObject::EnqueueChangeRecord(
                                            receiver, "add", it->name(),
                                            it->factory()->the_hole_value()),
@@ -3143,8 +3221,12 @@ void Map::EnsureDescriptorSlack(Handle<Map> map, int slack) {
   Handle<DescriptorArray> new_descriptors = DescriptorArray::CopyUpTo(
       descriptors, old_size, slack);
 
+  DisallowHeapAllocation no_allocation;
+  // The descriptors are still the same, so keep the layout descriptor.
+  LayoutDescriptor* layout_descriptor = map->GetLayoutDescriptor();
+
   if (old_size == 0) {
-    map->set_instance_descriptors(*new_descriptors);
+    map->UpdateDescriptors(*new_descriptors, layout_descriptor);
     return;
   }
 
@@ -3166,10 +3248,10 @@ void Map::EnsureDescriptorSlack(Handle<Map> map, int slack) {
        current = walk_map->GetBackPointer()) {
     walk_map = Map::cast(current);
     if (walk_map->instance_descriptors() != *descriptors) break;
-    walk_map->set_instance_descriptors(*new_descriptors);
+    walk_map->UpdateDescriptors(*new_descriptors, layout_descriptor);
   }
 
-  map->set_instance_descriptors(*new_descriptors);
+  map->UpdateDescriptors(*new_descriptors, layout_descriptor);
 }
 
 
@@ -3369,6 +3451,21 @@ bool Map::IsMapInArrayPrototypeChain() {
 }
 
 
+Handle<WeakCell> Map::WeakCellForMap(Handle<Map> map) {
+  Isolate* isolate = map->GetIsolate();
+  if (map->code_cache()->IsFixedArray()) {
+    return isolate->factory()->NewWeakCell(map);
+  }
+  Handle<CodeCache> code_cache(CodeCache::cast(map->code_cache()), isolate);
+  if (code_cache->weak_cell_cache()->IsWeakCell()) {
+    return Handle<WeakCell>(WeakCell::cast(code_cache->weak_cell_cache()));
+  }
+  Handle<WeakCell> weak_cell = isolate->factory()->NewWeakCell(map);
+  code_cache->set_weak_cell_cache(*weak_cell);
+  return weak_cell;
+}
+
+
 static Handle<Map> AddMissingElementsTransitions(Handle<Map> map,
                                                  ElementsKind to_kind) {
   DCHECK(IsTransitionElementsKind(map->elements_kind()));
@@ -3843,11 +3940,15 @@ void JSObject::WriteToField(int descriptor, Object* value) {
   if (details.representation().IsDouble()) {
     // Nothing more to be done.
     if (value->IsUninitialized()) return;
-    HeapNumber* box = HeapNumber::cast(RawFastPropertyAt(index));
-    DCHECK(box->IsMutableHeapNumber());
-    box->set_value(value->Number());
+    if (IsUnboxedDoubleField(index)) {
+      RawFastDoublePropertyAtPut(index, value->Number());
+    } else {
+      HeapNumber* box = HeapNumber::cast(RawFastPropertyAt(index));
+      DCHECK(box->IsMutableHeapNumber());
+      box->set_value(value->Number());
+    }
   } else {
-    FastPropertyAtPut(index, value);
+    RawFastPropertyAtPut(index, value);
   }
 }
 
@@ -3865,7 +3966,7 @@ void JSObject::AddProperty(Handle<JSObject> object, Handle<Name> name,
   DCHECK(maybe.has_value);
   DCHECK(!it.IsFound());
   DCHECK(object->map()->is_extensible() ||
-         name.is_identical_to(it.isolate()->factory()->hidden_string()));
+         it.isolate()->IsInternallyUsedPropertyName(name));
 #endif
   AddDataProperty(&it, value, attributes, STRICT,
                   CERTAINLY_NOT_STORE_FROM_KEYED).Check();
@@ -3883,7 +3984,7 @@ MaybeHandle<Object> JSObject::SetOwnPropertyIgnoreAttributes(
   DCHECK(!value->IsTheHole());
   LookupIterator it(object, name, LookupIterator::OWN_SKIP_INTERCEPTOR);
   bool is_observed = object->map()->is_observed() &&
-                     *name != it.isolate()->heap()->hidden_string();
+                     !it.isolate()->IsInternallyUsedPropertyName(name);
   for (; it.IsFound(); it.Next()) {
     switch (it.state()) {
       case LookupIterator::INTERCEPTOR:
@@ -3900,20 +4001,11 @@ MaybeHandle<Object> JSObject::SetOwnPropertyIgnoreAttributes(
 
       case LookupIterator::ACCESSOR: {
         PropertyDetails details = it.property_details();
-        Handle<Object> old_value = it.isolate()->factory()->the_hole_value();
         // Ensure the context isn't changed after calling into accessors.
         AssertNoContextChange ncc(it.isolate());
 
         Handle<Object> accessors = it.GetAccessors();
 
-        if (is_observed && accessors->IsAccessorInfo()) {
-          ASSIGN_RETURN_ON_EXCEPTION(
-              it.isolate(), old_value,
-              GetPropertyWithAccessor(it.GetReceiver(), it.name(),
-                                      it.GetHolder<JSObject>(), accessors),
-              Object);
-        }
-
         // Special handling for ExecutableAccessorInfo, which behaves like a
         // data property.
         if (handling == DONT_FORCE_FIELD &&
@@ -3928,21 +4020,6 @@ MaybeHandle<Object> JSObject::SetOwnPropertyIgnoreAttributes(
           DCHECK(result->SameValue(*value));
 
           if (details.attributes() == attributes) {
-            // Regular property update if the attributes match.
-            if (is_observed && !old_value->SameValue(*value)) {
-              // If we are setting the prototype of a function and are
-              // observed, don't send change records because the prototype
-              // handles that itself.
-              if (!object->IsJSFunction() ||
-                  !Name::Equals(it.isolate()->factory()->prototype_string(),
-                                name) ||
-                  !Handle<JSFunction>::cast(object)->should_have_prototype()) {
-                RETURN_ON_EXCEPTION(
-                    it.isolate(),
-                    EnqueueChangeRecord(object, "update", name, old_value),
-                    Object);
-              }
-            }
             return value;
           }
 
@@ -3956,12 +4033,10 @@ MaybeHandle<Object> JSObject::SetOwnPropertyIgnoreAttributes(
           if (attributes & READ_ONLY) new_data->clear_setter();
           SetPropertyCallback(object, name, new_data, attributes);
           if (is_observed) {
-            if (old_value->SameValue(*value)) {
-              old_value = it.isolate()->factory()->the_hole_value();
-            }
             RETURN_ON_EXCEPTION(
                 it.isolate(),
-                EnqueueChangeRecord(object, "reconfigure", name, old_value),
+                EnqueueChangeRecord(object, "reconfigure", name,
+                                    it.isolate()->factory()->the_hole_value()),
                 Object);
           }
           return value;
@@ -3969,15 +4044,13 @@ MaybeHandle<Object> JSObject::SetOwnPropertyIgnoreAttributes(
 
         it.ReconfigureDataProperty(value, attributes);
         it.PrepareForDataProperty(value);
-        it.WriteDataValue(value);
+        value = it.WriteDataValue(value);
 
         if (is_observed) {
-          if (old_value->SameValue(*value)) {
-            old_value = it.isolate()->factory()->the_hole_value();
-          }
           RETURN_ON_EXCEPTION(
               it.isolate(),
-              EnqueueChangeRecord(object, "reconfigure", name, old_value),
+              EnqueueChangeRecord(object, "reconfigure", name,
+                                  it.isolate()->factory()->the_hole_value()),
               Object);
         }
 
@@ -3996,7 +4069,7 @@ MaybeHandle<Object> JSObject::SetOwnPropertyIgnoreAttributes(
 
         it.ReconfigureDataProperty(value, attributes);
         it.PrepareForDataProperty(value);
-        it.WriteDataValue(value);
+        value = it.WriteDataValue(value);
 
         if (is_observed) {
           if (old_value->SameValue(*value)) {
@@ -4022,9 +4095,6 @@ Maybe<PropertyAttributes> JSObject::GetPropertyAttributesWithInterceptor(
     Handle<JSObject> holder,
     Handle<Object> receiver,
     Handle<Name> name) {
-  // TODO(rossberg): Support symbols in the API.
-  if (name->IsSymbol()) return maybe(ABSENT);
-
   Isolate* isolate = holder->GetIsolate();
   HandleScope scope(isolate);
 
@@ -4033,26 +4103,29 @@ Maybe<PropertyAttributes> JSObject::GetPropertyAttributesWithInterceptor(
   AssertNoContextChange ncc(isolate);
 
   Handle<InterceptorInfo> interceptor(holder->GetNamedInterceptor());
+  if (name->IsSymbol() && !interceptor->can_intercept_symbols()) {
+    return maybe(ABSENT);
+  }
   PropertyCallbackArguments args(
       isolate, interceptor->data(), *receiver, *holder);
   if (!interceptor->query()->IsUndefined()) {
-    v8::NamedPropertyQueryCallback query =
-        v8::ToCData<v8::NamedPropertyQueryCallback>(interceptor->query());
+    v8::GenericNamedPropertyQueryCallback query =
+        v8::ToCData<v8::GenericNamedPropertyQueryCallback>(
+            interceptor->query());
     LOG(isolate,
         ApiNamedPropertyAccess("interceptor-named-has", *holder, *name));
-    v8::Handle<v8::Integer> result =
-        args.Call(query, v8::Utils::ToLocal(Handle<String>::cast(name)));
+    v8::Handle<v8::Integer> result = args.Call(query, v8::Utils::ToLocal(name));
     if (!result.IsEmpty()) {
       DCHECK(result->IsInt32());
       return maybe(static_cast<PropertyAttributes>(result->Int32Value()));
     }
   } else if (!interceptor->getter()->IsUndefined()) {
-    v8::NamedPropertyGetterCallback getter =
-        v8::ToCData<v8::NamedPropertyGetterCallback>(interceptor->getter());
+    v8::GenericNamedPropertyGetterCallback getter =
+        v8::ToCData<v8::GenericNamedPropertyGetterCallback>(
+            interceptor->getter());
     LOG(isolate,
         ApiNamedPropertyAccess("interceptor-named-get-has", *holder, *name));
-    v8::Handle<v8::Value> result =
-        args.Call(getter, v8::Utils::ToLocal(Handle<String>::cast(name)));
+    v8::Handle<v8::Value> result = args.Call(getter, v8::Utils::ToLocal(name));
     if (!result.IsEmpty()) return maybe(DONT_ENUM);
   }
 
@@ -4260,11 +4333,12 @@ void HeapObject::UpdateMapCodeCache(Handle<HeapObject> object,
 
 void JSObject::NormalizeProperties(Handle<JSObject> object,
                                    PropertyNormalizationMode mode,
-                                   int expected_additional_properties) {
+                                   int expected_additional_properties,
+                                   const char* reason) {
   if (!object->HasFastProperties()) return;
 
   Handle<Map> map(object->map());
-  Handle<Map> new_map = Map::Normalize(map, mode);
+  Handle<Map> new_map = Map::Normalize(map, mode, reason);
 
   MigrateFastToSlow(object, new_map, expected_additional_properties);
 }
@@ -4296,41 +4370,45 @@ void JSObject::MigrateFastToSlow(Handle<JSObject> object,
   Handle<DescriptorArray> descs(map->instance_descriptors());
   for (int i = 0; i < real_size; i++) {
     PropertyDetails details = descs->GetDetails(i);
+    Handle<Name> key(descs->GetKey(i));
     switch (details.type()) {
       case CONSTANT: {
-        Handle<Name> key(descs->GetKey(i));
         Handle<Object> value(descs->GetConstant(i), isolate);
-        PropertyDetails d = PropertyDetails(
-            details.attributes(), NORMAL, i + 1);
+        PropertyDetails d(details.attributes(), FIELD, i + 1);
         dictionary = NameDictionary::Add(dictionary, key, value, d);
         break;
       }
       case FIELD: {
-        Handle<Name> key(descs->GetKey(i));
         FieldIndex index = FieldIndex::ForDescriptor(*map, i);
-        Handle<Object> value(
-            object->RawFastPropertyAt(index), isolate);
-        if (details.representation().IsDouble()) {
-          DCHECK(value->IsMutableHeapNumber());
-          Handle<HeapNumber> old = Handle<HeapNumber>::cast(value);
-          value = isolate->factory()->NewHeapNumber(old->value());
+        Handle<Object> value;
+        if (object->IsUnboxedDoubleField(index)) {
+          double old_value = object->RawFastDoublePropertyAt(index);
+          value = isolate->factory()->NewHeapNumber(old_value);
+        } else {
+          value = handle(object->RawFastPropertyAt(index), isolate);
+          if (details.representation().IsDouble()) {
+            DCHECK(value->IsMutableHeapNumber());
+            Handle<HeapNumber> old = Handle<HeapNumber>::cast(value);
+            value = isolate->factory()->NewHeapNumber(old->value());
+          }
         }
-        PropertyDetails d =
-            PropertyDetails(details.attributes(), NORMAL, i + 1);
+        PropertyDetails d(details.attributes(), FIELD, i + 1);
+        dictionary = NameDictionary::Add(dictionary, key, value, d);
+        break;
+      }
+      case ACCESSOR_FIELD: {
+        FieldIndex index = FieldIndex::ForDescriptor(*map, i);
+        Handle<Object> value(object->RawFastPropertyAt(index), isolate);
+        PropertyDetails d(details.attributes(), CALLBACKS, i + 1);
         dictionary = NameDictionary::Add(dictionary, key, value, d);
         break;
       }
       case CALLBACKS: {
-        Handle<Name> key(descs->GetKey(i));
         Handle<Object> value(descs->GetCallbacksObject(i), isolate);
-        PropertyDetails d = PropertyDetails(
-            details.attributes(), CALLBACKS, i + 1);
+        PropertyDetails d(details.attributes(), CALLBACKS, i + 1);
         dictionary = NameDictionary::Add(dictionary, key, value, d);
         break;
       }
-      case NORMAL:
-        UNREACHABLE();
-        break;
     }
   }
 
@@ -4359,6 +4437,14 @@ void JSObject::MigrateFastToSlow(Handle<JSObject> object,
 
   object->set_properties(*dictionary);
 
+  // Ensure that in-object space of slow-mode object does not contain random
+  // garbage.
+  int inobject_properties = new_map->inobject_properties();
+  for (int i = 0; i < inobject_properties; i++) {
+    FieldIndex index = FieldIndex::ForPropertyIndex(*new_map, i);
+    object->RawFastPropertyAtPut(index, Smi::FromInt(0));
+  }
+
   isolate->counters()->props_to_dictionary()->Increment();
 
 #ifdef DEBUG
@@ -4372,7 +4458,8 @@ void JSObject::MigrateFastToSlow(Handle<JSObject> object,
 
 
 void JSObject::MigrateSlowToFast(Handle<JSObject> object,
-                                 int unused_property_fields) {
+                                 int unused_property_fields,
+                                 const char* reason) {
   if (object->HasFastProperties()) return;
   DCHECK(!object->IsGlobalObject());
   Isolate* isolate = object->GetIsolate();
@@ -4402,8 +4489,7 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
 
     Object* value = dictionary->ValueAt(index);
     PropertyType type = dictionary->DetailsAt(index).type();
-    DCHECK(type != FIELD);
-    if (type == NORMAL && !value->IsJSFunction()) {
+    if (type == FIELD && !value->IsJSFunction()) {
       number_of_fields += 1;
     }
   }
@@ -4414,6 +4500,14 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
   Handle<Map> new_map = Map::CopyDropDescriptors(handle(object->map()));
   new_map->set_dictionary_map(false);
 
+#if TRACE_MAPS
+  if (FLAG_trace_maps) {
+    PrintF("[TraceMaps: SlowToFast from= %p to= %p reason= %s ]\n",
+           reinterpret_cast<void*>(object->map()),
+           reinterpret_cast<void*>(*new_map), reason);
+  }
+#endif
+
   if (instance_descriptor_length == 0) {
     DisallowHeapAllocation no_gc;
     DCHECK_LE(unused_property_fields, inobject_props);
@@ -4466,7 +4560,7 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
     if (value->IsJSFunction()) {
       ConstantDescriptor d(key, handle(value, isolate), details.attributes());
       descriptors->Set(enumeration_index - 1, &d);
-    } else if (type == NORMAL) {
+    } else if (type == FIELD) {
       if (current_offset < inobject_props) {
         object->InObjectPropertyAtPut(current_offset, value,
                                       UPDATE_WRITE_BARRIER);
@@ -4474,9 +4568,10 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
         int offset = current_offset - inobject_props;
         fields->set(offset, value);
       }
-      FieldDescriptor d(key, current_offset++, details.attributes(),
+      FieldDescriptor d(key, current_offset, details.attributes(),
                         // TODO(verwaest): value->OptimalRepresentation();
                         Representation::Tagged());
+      current_offset += d.GetDetails().field_width_in_words();
       descriptors->Set(enumeration_index - 1, &d);
     } else if (type == CALLBACKS) {
       CallbacksDescriptor d(key, handle(value, isolate), details.attributes());
@@ -4489,8 +4584,11 @@ void JSObject::MigrateSlowToFast(Handle<JSObject> object,
 
   descriptors->Sort();
 
+  Handle<LayoutDescriptor> layout_descriptor = LayoutDescriptor::New(
+      new_map, descriptors, descriptors->number_of_descriptors());
+
   DisallowHeapAllocation no_gc;
-  new_map->InitializeDescriptors(*descriptors);
+  new_map->InitializeDescriptors(*descriptors, *layout_descriptor);
   new_map->set_unused_property_fields(unused_property_fields);
 
   // Transform the object.
@@ -4538,7 +4636,7 @@ static Handle<SeededNumberDictionary> CopyFastElementsToDictionary(
       value = handle(Handle<FixedArray>::cast(array)->get(i), isolate);
     }
     if (!value->IsTheHole()) {
-      PropertyDetails details = PropertyDetails(NONE, NORMAL, 0);
+      PropertyDetails details(NONE, FIELD, 0);
       dictionary =
           SeededNumberDictionary::AddNumberEntry(dictionary, i, value, details);
     }
@@ -4868,20 +4966,20 @@ MaybeHandle<Object> JSObject::DeletePropertyWithInterceptor(
     Handle<JSObject> holder, Handle<JSObject> receiver, Handle<Name> name) {
   Isolate* isolate = holder->GetIsolate();
 
-  // TODO(rossberg): Support symbols in the API.
-  if (name->IsSymbol()) return MaybeHandle<Object>();
-
   Handle<InterceptorInfo> interceptor(holder->GetNamedInterceptor());
-  if (interceptor->deleter()->IsUndefined()) return MaybeHandle<Object>();
+  if (interceptor->deleter()->IsUndefined() ||
+      (name->IsSymbol() && !interceptor->can_intercept_symbols())) {
+    return MaybeHandle<Object>();
+  }
 
-  v8::NamedPropertyDeleterCallback deleter =
-      v8::ToCData<v8::NamedPropertyDeleterCallback>(interceptor->deleter());
+  v8::GenericNamedPropertyDeleterCallback deleter =
+      v8::ToCData<v8::GenericNamedPropertyDeleterCallback>(
+          interceptor->deleter());
   LOG(isolate,
       ApiNamedPropertyAccess("interceptor-named-delete", *holder, *name));
   PropertyCallbackArguments args(isolate, interceptor->data(), *receiver,
                                  *holder);
-  v8::Handle<v8::Boolean> result =
-      args.Call(deleter, v8::Utils::ToLocal(Handle<String>::cast(name)));
+  v8::Handle<v8::Boolean> result = args.Call(deleter, v8::Utils::ToLocal(name));
   RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
   if (result.IsEmpty()) return MaybeHandle<Object>();
 
@@ -5021,7 +5119,7 @@ MaybeHandle<Object> JSObject::DeleteProperty(Handle<JSObject> object,
   LookupIterator it(object, name, config);
 
   bool is_observed = object->map()->is_observed() &&
-                     *name != it.isolate()->heap()->hidden_string();
+                     !it.isolate()->IsInternallyUsedPropertyName(name);
   Handle<Object> old_value = it.isolate()->factory()->the_hole_value();
 
   for (; it.IsFound(); it.Next()) {
@@ -5074,7 +5172,7 @@ MaybeHandle<Object> JSObject::DeleteProperty(Handle<JSObject> object,
             !(object->IsJSGlobalProxy() && holder->IsJSGlobalObject())) {
           return it.isolate()->factory()->true_value();
         }
-        NormalizeProperties(holder, mode, 0);
+        NormalizeProperties(holder, mode, 0, "DeletingProperty");
         Handle<Object> result =
             DeleteNormalizedProperty(holder, name, delete_mode);
         ReoptimizeIfPrototype(holder);
@@ -5238,7 +5336,7 @@ bool JSObject::ReferencesObject(Object* obj) {
     if (context->has_extension() && !context->IsCatchContext()) {
       // With harmony scoping, a JSFunction may have a global context.
       // TODO(mvstanton): walk into the ScopeInfo.
-      if (FLAG_harmony_scoping && context->IsGlobalContext()) {
+      if (FLAG_harmony_scoping && context->IsScriptContext()) {
         return false;
       }
 
@@ -5252,10 +5350,14 @@ bool JSObject::ReferencesObject(Object* obj) {
 
 
 MaybeHandle<Object> JSObject::PreventExtensions(Handle<JSObject> object) {
-  Isolate* isolate = object->GetIsolate();
-
   if (!object->map()->is_extensible()) return object;
 
+  if (!object->HasSloppyArgumentsElements() && !object->map()->is_observed()) {
+    return PreventExtensionsWithTransition<NONE>(object);
+  }
+
+  Isolate* isolate = object->GetIsolate();
+
   if (object->IsAccessCheckNeeded() &&
       !isolate->MayNamedAccess(
           object, isolate->factory()->undefined_value(), v8::ACCESS_KEYS)) {
@@ -5292,7 +5394,7 @@ MaybeHandle<Object> JSObject::PreventExtensions(Handle<JSObject> object) {
   // Do a map transition, other objects with this map may still
   // be extensible.
   // TODO(adamk): Extend the NormalizedMapCache to handle non-extensible maps.
-  Handle<Map> new_map = Map::Copy(handle(object->map()));
+  Handle<Map> new_map = Map::Copy(handle(object->map()), "PreventExtensions");
 
   new_map->set_is_extensible(false);
   JSObject::MigrateToMap(object, new_map);
@@ -5309,22 +5411,45 @@ MaybeHandle<Object> JSObject::PreventExtensions(Handle<JSObject> object) {
 }
 
 
-template<typename Dictionary>
-static void FreezeDictionary(Dictionary* dictionary) {
+Handle<SeededNumberDictionary> JSObject::GetNormalizedElementDictionary(
+    Handle<JSObject> object) {
+  DCHECK(!object->elements()->IsDictionary());
+  Isolate* isolate = object->GetIsolate();
+  int length = object->IsJSArray()
+                   ? Smi::cast(Handle<JSArray>::cast(object)->length())->value()
+                   : object->elements()->length();
+  if (length > 0) {
+    int capacity = 0;
+    int used = 0;
+    object->GetElementsCapacityAndUsage(&capacity, &used);
+    Handle<SeededNumberDictionary> new_element_dictionary =
+        SeededNumberDictionary::New(isolate, used);
+
+    // Move elements to a dictionary; avoid calling NormalizeElements to avoid
+    // unnecessary transitions.
+    return CopyFastElementsToDictionary(handle(object->elements()), length,
+                                        new_element_dictionary);
+  }
+  // No existing elements, use a pre-allocated empty backing store
+  return isolate->factory()->empty_slow_element_dictionary();
+}
+
+
+template <typename Dictionary>
+static void ApplyAttributesToDictionary(Dictionary* dictionary,
+                                        const PropertyAttributes attributes) {
   int capacity = dictionary->Capacity();
   for (int i = 0; i < capacity; i++) {
     Object* k = dictionary->KeyAt(i);
     if (dictionary->IsKey(k) &&
         !(k->IsSymbol() && Symbol::cast(k)->is_private())) {
       PropertyDetails details = dictionary->DetailsAt(i);
-      int attrs = DONT_DELETE;
+      int attrs = attributes;
       // READ_ONLY is an invalid attribute for JS setters/getters.
-      if (details.type() == CALLBACKS) {
+      if ((attributes & READ_ONLY) && details.type() == CALLBACKS) {
         Object* v = dictionary->ValueAt(i);
         if (v->IsPropertyCell()) v = PropertyCell::cast(v)->value();
-        if (!v->IsAccessorPair()) attrs |= READ_ONLY;
-      } else {
-        attrs |= READ_ONLY;
+        if (v->IsAccessorPair()) attrs &= ~READ_ONLY;
       }
       details = details.CopyAddAttributes(
           static_cast<PropertyAttributes>(attrs));
@@ -5334,13 +5459,15 @@ static void FreezeDictionary(Dictionary* dictionary) {
 }
 
 
-MaybeHandle<Object> JSObject::Freeze(Handle<JSObject> object) {
-  // Freezing sloppy arguments should be handled elsewhere.
+template <PropertyAttributes attrs>
+MaybeHandle<Object> JSObject::PreventExtensionsWithTransition(
+    Handle<JSObject> object) {
+  STATIC_ASSERT(attrs == NONE || attrs == SEALED || attrs == FROZEN);
+
+  // Sealing/freezing sloppy arguments should be handled elsewhere.
   DCHECK(!object->HasSloppyArgumentsElements());
   DCHECK(!object->map()->is_observed());
 
-  if (object->map()->is_frozen()) return object;
-
   Isolate* isolate = object->GetIsolate();
   if (object->IsAccessCheckNeeded() &&
       !isolate->MayNamedAccess(
@@ -5354,10 +5481,11 @@ MaybeHandle<Object> JSObject::Freeze(Handle<JSObject> object) {
     PrototypeIterator iter(isolate, object);
     if (iter.IsAtEnd()) return object;
     DCHECK(PrototypeIterator::GetCurrent(iter)->IsJSGlobalObject());
-    return Freeze(Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)));
+    return PreventExtensionsWithTransition<attrs>(
+        Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)));
   }
 
-  // It's not possible to freeze objects with external array elements
+  // It's not possible to seal or freeze objects with external array elements
   if (object->HasExternalArrayElements() ||
       object->HasFixedTypedArrayElements()) {
     THROW_NEW_ERROR(isolate,
@@ -5368,54 +5496,48 @@ MaybeHandle<Object> JSObject::Freeze(Handle<JSObject> object) {
 
   Handle<SeededNumberDictionary> new_element_dictionary;
   if (!object->elements()->IsDictionary()) {
-    int length = object->IsJSArray()
-        ? Smi::cast(Handle<JSArray>::cast(object)->length())->value()
-        : object->elements()->length();
-    if (length > 0) {
-      int capacity = 0;
-      int used = 0;
-      object->GetElementsCapacityAndUsage(&capacity, &used);
-      new_element_dictionary = SeededNumberDictionary::New(isolate, used);
-
-      // Move elements to a dictionary; avoid calling NormalizeElements to avoid
-      // unnecessary transitions.
-      new_element_dictionary = CopyFastElementsToDictionary(
-          handle(object->elements()), length, new_element_dictionary);
-    } else {
-      // No existing elements, use a pre-allocated empty backing store
-      new_element_dictionary =
-          isolate->factory()->empty_slow_element_dictionary();
-    }
+    new_element_dictionary = GetNormalizedElementDictionary(object);
+  }
+
+  Handle<Symbol> transition_marker;
+  if (attrs == NONE) {
+    transition_marker = isolate->factory()->nonextensible_symbol();
+  } else if (attrs == SEALED) {
+    transition_marker = isolate->factory()->sealed_symbol();
+  } else {
+    DCHECK(attrs == FROZEN);
+    transition_marker = isolate->factory()->frozen_symbol();
   }
 
   Handle<Map> old_map(object->map(), isolate);
-  int transition_index =
-      old_map->SearchSpecialTransition(isolate->heap()->frozen_symbol());
+  int transition_index = old_map->SearchSpecialTransition(*transition_marker);
   if (transition_index != TransitionArray::kNotFound) {
     Handle<Map> transition_map(old_map->GetTransition(transition_index));
     DCHECK(transition_map->has_dictionary_elements());
-    DCHECK(transition_map->is_frozen());
     DCHECK(!transition_map->is_extensible());
     JSObject::MigrateToMap(object, transition_map);
   } else if (object->HasFastProperties() && old_map->CanHaveMoreTransitions()) {
-    // Create a new descriptor array with fully-frozen properties
-    Handle<Map> new_map = Map::CopyForFreeze(old_map);
+    // Create a new descriptor array with the appropriate property attributes
+    Handle<Map> new_map = Map::CopyForPreventExtensions(
+        old_map, attrs, transition_marker, "CopyForPreventExtensions");
     JSObject::MigrateToMap(object, new_map);
   } else {
     DCHECK(old_map->is_dictionary_map() || !old_map->is_prototype_map());
     // Slow path: need to normalize properties for safety
-    NormalizeProperties(object, CLEAR_INOBJECT_PROPERTIES, 0);
+    NormalizeProperties(object, CLEAR_INOBJECT_PROPERTIES, 0,
+                        "SlowPreventExtensions");
 
     // Create a new map, since other objects with this map may be extensible.
     // TODO(adamk): Extend the NormalizedMapCache to handle non-extensible maps.
-    Handle<Map> new_map = Map::Copy(handle(object->map()));
-    new_map->freeze();
+    Handle<Map> new_map =
+        Map::Copy(handle(object->map()), "SlowCopyForPreventExtensions");
     new_map->set_is_extensible(false);
     new_map->set_elements_kind(DICTIONARY_ELEMENTS);
     JSObject::MigrateToMap(object, new_map);
 
-    // Freeze dictionary-mode properties
-    FreezeDictionary(object->property_dictionary());
+    if (attrs != NONE) {
+      ApplyAttributesToDictionary(object->property_dictionary(), attrs);
+    }
   }
 
   DCHECK(object->map()->has_dictionary_elements());
@@ -5427,14 +5549,25 @@ MaybeHandle<Object> JSObject::Freeze(Handle<JSObject> object) {
     SeededNumberDictionary* dictionary = object->element_dictionary();
     // Make sure we never go back to the fast case
     dictionary->set_requires_slow_elements();
-    // Freeze all elements in the dictionary
-    FreezeDictionary(dictionary);
+    if (attrs != NONE) {
+      ApplyAttributesToDictionary(dictionary, attrs);
+    }
   }
 
   return object;
 }
 
 
+MaybeHandle<Object> JSObject::Freeze(Handle<JSObject> object) {
+  return PreventExtensionsWithTransition<FROZEN>(object);
+}
+
+
+MaybeHandle<Object> JSObject::Seal(Handle<JSObject> object) {
+  return PreventExtensionsWithTransition<SEALED>(object);
+}
+
+
 void JSObject::SetObserved(Handle<JSObject> object) {
   DCHECK(!object->IsJSGlobalProxy());
   DCHECK(!object->IsJSGlobalObject());
@@ -5450,7 +5583,7 @@ void JSObject::SetObserved(Handle<JSObject> object) {
   } else if (object->HasFastProperties() && old_map->CanHaveMoreTransitions()) {
     new_map = Map::CopyForObserved(old_map);
   } else {
-    new_map = Map::Copy(old_map);
+    new_map = Map::Copy(old_map, "SlowObserved");
     new_map->set_is_observed();
   }
   JSObject::MigrateToMap(object, new_map);
@@ -5461,6 +5594,10 @@ Handle<Object> JSObject::FastPropertyAt(Handle<JSObject> object,
                                         Representation representation,
                                         FieldIndex index) {
   Isolate* isolate = object->GetIsolate();
+  if (object->IsUnboxedDoubleField(index)) {
+    double value = object->RawFastDoublePropertyAt(index);
+    return isolate->factory()->NewHeapNumber(value);
+  }
   Handle<Object> raw_value(object->RawFastPropertyAt(index), isolate);
   return Object::WrapForRead(isolate, raw_value, representation);
 }
@@ -5551,18 +5688,28 @@ MaybeHandle<JSObject> JSObjectWalkVisitor<ContextObject>::StructureWalk(
         PropertyDetails details = descriptors->GetDetails(i);
         if (details.type() != FIELD) continue;
         FieldIndex index = FieldIndex::ForDescriptor(copy->map(), i);
-        Handle<Object> value(object->RawFastPropertyAt(index), isolate);
-        if (value->IsJSObject()) {
-          ASSIGN_RETURN_ON_EXCEPTION(
-              isolate, value,
-              VisitElementOrProperty(copy, Handle<JSObject>::cast(value)),
-              JSObject);
+        if (object->IsUnboxedDoubleField(index)) {
+          if (copying) {
+            double value = object->RawFastDoublePropertyAt(index);
+            copy->RawFastDoublePropertyAtPut(index, value);
+          }
         } else {
-          Representation representation = details.representation();
-          value = Object::NewStorageFor(isolate, value, representation);
-        }
-        if (copying) {
-          copy->FastPropertyAtPut(index, *value);
+          Handle<Object> value(object->RawFastPropertyAt(index), isolate);
+          if (value->IsJSObject()) {
+            ASSIGN_RETURN_ON_EXCEPTION(
+                isolate, value,
+                VisitElementOrProperty(copy, Handle<JSObject>::cast(value)),
+                JSObject);
+            if (copying) {
+              copy->FastPropertyAtPut(index, *value);
+            }
+          } else {
+            if (copying) {
+              Representation representation = details.representation();
+              value = Object::NewStorageFor(isolate, value, representation);
+              copy->FastPropertyAtPut(index, *value);
+            }
+          }
         }
       }
     } else {
@@ -5759,16 +5906,17 @@ int Map::NumberOfDescribedProperties(DescriptorFlag which,
 
 
 int Map::NextFreePropertyIndex() {
-  int max_index = -1;
+  int free_index = 0;
   int number_of_own_descriptors = NumberOfOwnDescriptors();
   DescriptorArray* descs = instance_descriptors();
   for (int i = 0; i < number_of_own_descriptors; i++) {
-    if (descs->GetType(i) == FIELD) {
-      int current_index = descs->GetFieldIndex(i);
-      if (current_index > max_index) max_index = current_index;
+    PropertyDetails details = descs->GetDetails(i);
+    if (details.type() == FIELD) {
+      int candidate = details.field_index() + details.field_width_in_words();
+      if (candidate > free_index) free_index = candidate;
     }
   }
-  return max_index + 1;
+  return free_index;
 }
 
 
@@ -5776,7 +5924,7 @@ static bool ContainsOnlyValidKeys(Handle<FixedArray> array) {
   int len = array->length();
   for (int i = 0; i < len; i++) {
     Object* e = array->get(i);
-    if (!(e->IsString() || e->IsNumber())) return false;
+    if (!(e->IsName() || e->IsNumber())) return false;
   }
   return true;
 }
@@ -5984,14 +6132,14 @@ MaybeHandle<FixedArray> JSReceiver::GetKeys(Handle<JSReceiver> object,
         FixedArray);
     DCHECK(ContainsOnlyValidKeys(content));
 
-    // Add the property keys from the interceptor.
+    // Add the non-symbol property keys from the interceptor.
     if (current->HasNamedInterceptor()) {
       Handle<JSObject> result;
       if (JSObject::GetKeysForNamedInterceptor(
               current, object).ToHandle(&result)) {
         ASSIGN_RETURN_ON_EXCEPTION(
-            isolate, content,
-            FixedArray::AddKeysFromArrayLike(content, result),
+            isolate, content, FixedArray::AddKeysFromArrayLike(
+                                  content, result, FixedArray::NON_SYMBOL_KEYS),
             FixedArray);
       }
       DCHECK(ContainsOnlyValidKeys(content));
@@ -6167,13 +6315,20 @@ void JSObject::SetPropertyCallback(Handle<JSObject> object,
                                        ? KEEP_INOBJECT_PROPERTIES
                                        : CLEAR_INOBJECT_PROPERTIES;
   // Normalize object to make this operation simple.
-  NormalizeProperties(object, mode, 0);
+  NormalizeProperties(object, mode, 0, "SetPropertyCallback");
 
   // For the global object allocate a new map to invalidate the global inline
   // caches which have a global property cell reference directly in the code.
   if (object->IsGlobalObject()) {
     Handle<Map> new_map = Map::CopyDropDescriptors(handle(object->map()));
     DCHECK(new_map->is_dictionary_map());
+#if TRACE_MAPS
+    if (FLAG_trace_maps) {
+      PrintF("[TraceMaps: GlobalPropertyCallback from= %p to= %p ]\n",
+             reinterpret_cast<void*>(object->map()),
+             reinterpret_cast<void*>(*new_map));
+    }
+#endif
     JSObject::MigrateToMap(object, new_map);
 
     // When running crankshaft, changing the map is not enough. We
@@ -6225,7 +6380,7 @@ MaybeHandle<Object> JSObject::DefineAccessor(Handle<JSObject> object,
 
   Handle<Object> old_value = isolate->factory()->the_hole_value();
   bool is_observed = object->map()->is_observed() &&
-                     *name != isolate->heap()->hidden_string();
+                     !isolate->IsInternallyUsedPropertyName(name);
   bool preexists = false;
   if (is_observed) {
     if (is_element) {
@@ -6446,17 +6601,27 @@ Object* JSObject::SlowReverseLookup(Object* value) {
   if (HasFastProperties()) {
     int number_of_own_descriptors = map()->NumberOfOwnDescriptors();
     DescriptorArray* descs = map()->instance_descriptors();
+    bool value_is_number = value->IsNumber();
     for (int i = 0; i < number_of_own_descriptors; i++) {
       if (descs->GetType(i) == FIELD) {
-        Object* property =
-            RawFastPropertyAt(FieldIndex::ForDescriptor(map(), i));
-        if (descs->GetDetails(i).representation().IsDouble()) {
-          DCHECK(property->IsMutableHeapNumber());
-          if (value->IsNumber() && property->Number() == value->Number()) {
+        FieldIndex field_index = FieldIndex::ForDescriptor(map(), i);
+        if (IsUnboxedDoubleField(field_index)) {
+          if (value_is_number) {
+            double property = RawFastDoublePropertyAt(field_index);
+            if (property == value->Number()) {
+              return descs->GetKey(i);
+            }
+          }
+        } else {
+          Object* property = RawFastPropertyAt(field_index);
+          if (field_index.is_double()) {
+            DCHECK(property->IsMutableHeapNumber());
+            if (value_is_number && property->Number() == value->Number()) {
+              return descs->GetKey(i);
+            }
+          } else if (property == value) {
             return descs->GetKey(i);
           }
-        } else if (property == value) {
-          return descs->GetKey(i);
         }
       } else if (descs->GetType(i) == CONSTANT) {
         if (descs->GetConstant(i) == value) {
@@ -6474,7 +6639,7 @@ Object* JSObject::SlowReverseLookup(Object* value) {
 Handle<Map> Map::RawCopy(Handle<Map> map, int instance_size) {
   Handle<Map> result = map->GetIsolate()->factory()->NewMap(
       map->instance_type(), instance_size);
-  result->set_prototype(map->prototype());
+  result->SetPrototype(handle(map->prototype(), map->GetIsolate()));
   result->set_constructor(map->constructor());
   result->set_bit_field(map->bit_field());
   result->set_bit_field2(map->bit_field2());
@@ -6487,15 +6652,14 @@ Handle<Map> Map::RawCopy(Handle<Map> map, int instance_size) {
   if (!map->is_dictionary_map()) {
     new_bit_field3 = IsUnstable::update(new_bit_field3, false);
   }
-  new_bit_field3 = ConstructionCount::update(new_bit_field3,
-                                             JSFunction::kNoSlackTracking);
+  new_bit_field3 = Counter::update(new_bit_field3, kRetainingCounterStart);
   result->set_bit_field3(new_bit_field3);
   return result;
 }
 
 
-Handle<Map> Map::Normalize(Handle<Map> fast_map,
-                           PropertyNormalizationMode mode) {
+Handle<Map> Map::Normalize(Handle<Map> fast_map, PropertyNormalizationMode mode,
+                           const char* reason) {
   DCHECK(!fast_map->is_dictionary_map());
 
   Isolate* isolate = fast_map->GetIsolate();
@@ -6534,6 +6698,13 @@ Handle<Map> Map::Normalize(Handle<Map> fast_map,
       cache->Set(fast_map, new_map);
       isolate->counters()->normalized_maps()->Increment();
     }
+#if TRACE_MAPS
+    if (FLAG_trace_maps) {
+      PrintF("[TraceMaps: Normalize from= %p to= %p reason= %s ]\n",
+             reinterpret_cast<void*>(*fast_map),
+             reinterpret_cast<void*>(*new_map), reason);
+    }
+#endif
   }
   fast_map->NotifyLeafMapLayoutChange();
   return new_map;
@@ -6603,10 +6774,15 @@ Handle<Map> Map::ShareDescriptor(Handle<Map> map,
     }
   }
 
+  Handle<LayoutDescriptor> layout_descriptor =
+      FLAG_unbox_double_fields
+          ? LayoutDescriptor::Append(map, descriptor->GetDetails())
+          : handle(LayoutDescriptor::FastPointerLayout(), map->GetIsolate());
+
   {
     DisallowHeapAllocation no_gc;
     descriptors->Append(descriptor);
-    result->InitializeDescriptors(*descriptors);
+    result->InitializeDescriptors(*descriptors, *layout_descriptor);
   }
 
   DCHECK(result->NumberOfOwnDescriptors() == map->NumberOfOwnDescriptors() + 1);
@@ -6616,11 +6792,41 @@ Handle<Map> Map::ShareDescriptor(Handle<Map> map,
 }
 
 
+#if TRACE_MAPS
+
+// static
+void Map::TraceTransition(const char* what, Map* from, Map* to, Name* name) {
+  if (FLAG_trace_maps) {
+    PrintF("[TraceMaps: %s from= %p to= %p name= ", what,
+           reinterpret_cast<void*>(from), reinterpret_cast<void*>(to));
+    name->NameShortPrint();
+    PrintF(" ]\n");
+  }
+}
+
+
+// static
+void Map::TraceAllTransitions(Map* map) {
+  if (!map->HasTransitionArray()) return;
+  TransitionArray* transitions = map->transitions();
+  for (int i = 0; i < transitions->number_of_transitions(); ++i) {
+    Map* target = transitions->GetTarget(i);
+    Map::TraceTransition("Transition", map, target, transitions->GetKey(i));
+    Map::TraceAllTransitions(target);
+  }
+}
+
+#endif  // TRACE_MAPS
+
+
 void Map::ConnectTransition(Handle<Map> parent, Handle<Map> child,
                             Handle<Name> name, SimpleTransitionFlag flag) {
   parent->set_owns_descriptors(false);
   if (parent->is_prototype_map()) {
     DCHECK(child->is_prototype_map());
+#if TRACE_MAPS
+    Map::TraceTransition("NoTransition", *parent, *child, *name);
+#endif
   } else {
     Handle<TransitionArray> transitions =
         TransitionArray::Insert(parent, name, child, flag);
@@ -6629,22 +6835,32 @@ void Map::ConnectTransition(Handle<Map> parent, Handle<Map> child,
       parent->set_transitions(*transitions);
     }
     child->SetBackPointer(*parent);
+    if (child->prototype()->IsJSObject()) {
+      Handle<JSObject> proto(JSObject::cast(child->prototype()));
+      if (!child->ShouldRegisterAsPrototypeUser(proto)) {
+        JSObject::UnregisterPrototypeUser(proto, child);
+      }
+    }
+#if TRACE_MAPS
+    Map::TraceTransition("Transition", *parent, *child, *name);
+#endif
   }
 }
 
 
-Handle<Map> Map::CopyReplaceDescriptors(Handle<Map> map,
-                                        Handle<DescriptorArray> descriptors,
-                                        TransitionFlag flag,
-                                        MaybeHandle<Name> maybe_name,
-                                        SimpleTransitionFlag simple_flag) {
+Handle<Map> Map::CopyReplaceDescriptors(
+    Handle<Map> map, Handle<DescriptorArray> descriptors,
+    Handle<LayoutDescriptor> layout_descriptor, TransitionFlag flag,
+    MaybeHandle<Name> maybe_name, const char* reason,
+    SimpleTransitionFlag simple_flag) {
   DCHECK(descriptors->IsSortedNoDuplicates());
 
   Handle<Map> result = CopyDropDescriptors(map);
-  result->InitializeDescriptors(*descriptors);
 
   if (!map->is_prototype_map()) {
     if (flag == INSERT_TRANSITION && map->CanHaveMoreTransitions()) {
+      result->InitializeDescriptors(*descriptors, *layout_descriptor);
+
       Handle<Name> name;
       CHECK(maybe_name.ToHandle(&name));
       ConnectTransition(map, result, name, simple_flag);
@@ -6656,8 +6872,22 @@ Handle<Map> Map::CopyReplaceDescriptors(Handle<Map> map,
           descriptors->SetValue(i, HeapType::Any());
         }
       }
+      result->InitializeDescriptors(*descriptors,
+                                    LayoutDescriptor::FastPointerLayout());
     }
+  } else {
+    result->InitializeDescriptors(*descriptors, *layout_descriptor);
+  }
+#if TRACE_MAPS
+  if (FLAG_trace_maps &&
+      // Mirror conditions above that did not call ConnectTransition().
+      (map->is_prototype_map() ||
+       !(flag == INSERT_TRANSITION && map->CanHaveMoreTransitions()))) {
+    PrintF("[TraceMaps: ReplaceDescriptors from= %p to= %p reason= %s ]\n",
+           reinterpret_cast<void*>(*map), reinterpret_cast<void*>(*result),
+           reason);
   }
+#endif
 
   return result;
 }
@@ -6665,26 +6895,35 @@ Handle<Map> Map::CopyReplaceDescriptors(Handle<Map> map,
 
 // Since this method is used to rewrite an existing transition tree, it can
 // always insert transitions without checking.
-Handle<Map> Map::CopyInstallDescriptors(Handle<Map> map,
-                                        int new_descriptor,
-                                        Handle<DescriptorArray> descriptors) {
+Handle<Map> Map::CopyInstallDescriptors(
+    Handle<Map> map, int new_descriptor, Handle<DescriptorArray> descriptors,
+    Handle<LayoutDescriptor> full_layout_descriptor) {
   DCHECK(descriptors->IsSortedNoDuplicates());
 
   Handle<Map> result = CopyDropDescriptors(map);
 
-  result->InitializeDescriptors(*descriptors);
+  result->set_instance_descriptors(*descriptors);
   result->SetNumberOfOwnDescriptors(new_descriptor + 1);
 
   int unused_property_fields = map->unused_property_fields();
-  if (descriptors->GetDetails(new_descriptor).type() == FIELD) {
+  PropertyDetails details = descriptors->GetDetails(new_descriptor);
+  if (details.type() == FIELD) {
     unused_property_fields = map->unused_property_fields() - 1;
     if (unused_property_fields < 0) {
       unused_property_fields += JSObject::kFieldsAdded;
     }
   }
-
   result->set_unused_property_fields(unused_property_fields);
 
+  if (FLAG_unbox_double_fields) {
+    Handle<LayoutDescriptor> layout_descriptor =
+        LayoutDescriptor::AppendIfFastOrUseFull(map, details,
+                                                full_layout_descriptor);
+    result->set_layout_descriptor(*layout_descriptor);
+    SLOW_DCHECK(result->layout_descriptor()->IsConsistentWithMap(*result));
+    result->set_visitor_id(StaticVisitorBase::GetVisitorId(*result));
+  }
+
   Handle<Name> name = handle(descriptors->GetKey(new_descriptor));
   ConnectTransition(map, result, name, SIMPLE_PROPERTY_TRANSITION);
 
@@ -6719,14 +6958,16 @@ Handle<Map> Map::CopyAsElementsKind(Handle<Map> map, ElementsKind kind,
     ConnectElementsTransition(map, new_map);
 
     new_map->set_elements_kind(kind);
-    new_map->InitializeDescriptors(map->instance_descriptors());
+    // The properties did not change, so reuse descriptors.
+    new_map->InitializeDescriptors(map->instance_descriptors(),
+                                   map->GetLayoutDescriptor());
     return new_map;
   }
 
   // In case the map did not own its own descriptors, a split is forced by
   // copying the map; creating a new descriptor array cell.
   // Create a new free-floating map only if we are not allowed to store it.
-  Handle<Map> new_map = Copy(map);
+  Handle<Map> new_map = Copy(map, "CopyAsElementsKind");
 
   new_map->set_elements_kind(kind);
 
@@ -6750,12 +6991,14 @@ Handle<Map> Map::CopyForObserved(Handle<Map> map) {
     new_map = CopyDropDescriptors(map);
   } else {
     DCHECK(!map->is_prototype_map());
-    new_map = Copy(map);
+    new_map = Copy(map, "CopyForObserved");
   }
 
   new_map->set_is_observed();
   if (map->owns_descriptors()) {
-    new_map->InitializeDescriptors(map->instance_descriptors());
+    // The properties did not change, so reuse descriptors.
+    new_map->InitializeDescriptors(map->instance_descriptors(),
+                                   map->GetLayoutDescriptor());
   }
 
   if (map->CanHaveMoreTransitions()) {
@@ -6766,18 +7009,22 @@ Handle<Map> Map::CopyForObserved(Handle<Map> map) {
 }
 
 
-Handle<Map> Map::Copy(Handle<Map> map) {
+Handle<Map> Map::Copy(Handle<Map> map, const char* reason) {
   Handle<DescriptorArray> descriptors(map->instance_descriptors());
   int number_of_own_descriptors = map->NumberOfOwnDescriptors();
   Handle<DescriptorArray> new_descriptors =
       DescriptorArray::CopyUpTo(descriptors, number_of_own_descriptors);
-  return CopyReplaceDescriptors(map, new_descriptors, OMIT_TRANSITION,
-                                MaybeHandle<Name>(), SPECIAL_TRANSITION);
+  Handle<LayoutDescriptor> new_layout_descriptor(map->GetLayoutDescriptor(),
+                                                 map->GetIsolate());
+  return CopyReplaceDescriptors(map, new_descriptors, new_layout_descriptor,
+                                OMIT_TRANSITION, MaybeHandle<Name>(), reason,
+                                SPECIAL_TRANSITION);
 }
 
 
 Handle<Map> Map::Create(Isolate* isolate, int inobject_properties) {
-  Handle<Map> copy = Copy(handle(isolate->object_function()->initial_map()));
+  Handle<Map> copy =
+      Copy(handle(isolate->object_function()->initial_map()), "MapCreate");
 
   // Check that we do not overflow the instance size when adding the extra
   // inobject properties. If the instance size overflows, we allocate as many
@@ -6801,15 +7048,20 @@ Handle<Map> Map::Create(Isolate* isolate, int inobject_properties) {
 }
 
 
-Handle<Map> Map::CopyForFreeze(Handle<Map> map) {
+Handle<Map> Map::CopyForPreventExtensions(Handle<Map> map,
+                                          PropertyAttributes attrs_to_add,
+                                          Handle<Symbol> transition_marker,
+                                          const char* reason) {
   int num_descriptors = map->NumberOfOwnDescriptors();
   Isolate* isolate = map->GetIsolate();
   Handle<DescriptorArray> new_desc = DescriptorArray::CopyUpToAddAttributes(
-      handle(map->instance_descriptors(), isolate), num_descriptors, FROZEN);
+      handle(map->instance_descriptors(), isolate), num_descriptors,
+      attrs_to_add);
+  Handle<LayoutDescriptor> new_layout_descriptor(map->GetLayoutDescriptor(),
+                                                 isolate);
   Handle<Map> new_map = CopyReplaceDescriptors(
-      map, new_desc, INSERT_TRANSITION, isolate->factory()->frozen_symbol(),
-      SPECIAL_TRANSITION);
-  new_map->freeze();
+      map, new_desc, new_layout_descriptor, INSERT_TRANSITION,
+      transition_marker, reason, SPECIAL_TRANSITION);
   new_map->set_is_extensible(false);
   new_map->set_elements_kind(DICTIONARY_ELEMENTS);
   return new_map;
@@ -6828,12 +7080,9 @@ bool DescriptorArray::CanHoldValue(int descriptor, Object* value) {
              value->FitsRepresentation(details.representation()));
       return GetConstant(descriptor) == value;
 
+    case ACCESSOR_FIELD:
     case CALLBACKS:
       return false;
-
-    case NORMAL:
-      UNREACHABLE();
-      break;
   }
 
   UNREACHABLE();
@@ -6858,7 +7107,7 @@ Handle<Map> Map::PrepareForDataProperty(Handle<Map> map, int descriptor,
   Handle<HeapType> type = value->OptimalType(isolate, representation);
 
   return GeneralizeRepresentation(map, descriptor, representation, type,
-                                  FORCE_FIELD);
+                                  FORCE_IN_OBJECT);
 }
 
 
@@ -6872,7 +7121,7 @@ Handle<Map> Map::TransitionToDataProperty(Handle<Map> map, Handle<Name> name,
   // Migrate to the newest map before storing the property.
   map = Update(map);
 
-  int index = map->SearchTransition(FIELD, *name, attributes);
+  int index = map->SearchTransition(DATA, *name, attributes);
   if (index != TransitionArray::kNotFound) {
     Handle<Map> transition(map->GetTransition(index));
     int descriptor = transition->LastAdded();
@@ -6898,7 +7147,17 @@ Handle<Map> Map::TransitionToDataProperty(Handle<Map> map, Handle<Name> name,
 
   Handle<Map> result;
   if (!maybe_map.ToHandle(&result)) {
-    return Map::Normalize(map, CLEAR_INOBJECT_PROPERTIES);
+#if TRACE_MAPS
+    if (FLAG_trace_maps) {
+      Vector<char> name_buffer = Vector<char>::New(100);
+      name->NameShortPrint(name_buffer);
+      Vector<char> buffer = Vector<char>::New(128);
+      SNPrintF(buffer, "TooManyFastProperties %s", name_buffer.start());
+      return Map::Normalize(map, CLEAR_INOBJECT_PROPERTIES, buffer.start());
+    }
+#endif
+    return Map::Normalize(map, CLEAR_INOBJECT_PROPERTIES,
+                          "TooManyFastProperties");
   }
 
   return result;
@@ -6912,8 +7171,9 @@ Handle<Map> Map::ReconfigureDataProperty(Handle<Map> map, int descriptor,
 
   // For now, give up on transitioning and just create a unique map.
   // TODO(verwaest/ishell): Cache transitions with different attributes.
-  return CopyGeneralizeAllRepresentations(map, descriptor, FORCE_FIELD,
-                                          attributes, "attributes mismatch");
+  return CopyGeneralizeAllRepresentations(map, descriptor, FORCE_IN_OBJECT,
+                                          attributes,
+                                          "GenAll_AttributesMismatch");
 }
 
 
@@ -6928,7 +7188,7 @@ Handle<Map> Map::TransitionToAccessorProperty(Handle<Map> map,
   if (map->is_dictionary_map()) {
     // For global objects, property cells are inlined. We need to change the
     // map.
-    if (map->IsGlobalObjectMap()) return Copy(map);
+    if (map->IsGlobalObjectMap()) return Copy(map, "GlobalAccessor");
     return map;
   }
 
@@ -6939,24 +7199,24 @@ Handle<Map> Map::TransitionToAccessorProperty(Handle<Map> map,
                                        ? KEEP_INOBJECT_PROPERTIES
                                        : CLEAR_INOBJECT_PROPERTIES;
 
-  int index = map->SearchTransition(CALLBACKS, *name, attributes);
+  int index = map->SearchTransition(ACCESSOR, *name, attributes);
   if (index != TransitionArray::kNotFound) {
     Handle<Map> transition(map->GetTransition(index));
     DescriptorArray* descriptors = transition->instance_descriptors();
     int descriptor = transition->LastAdded();
     DCHECK(descriptors->GetKey(descriptor)->Equals(*name));
 
-    DCHECK_EQ(CALLBACKS, descriptors->GetDetails(descriptor).type());
+    DCHECK_EQ(ACCESSOR, descriptors->GetDetails(descriptor).kind());
     DCHECK_EQ(attributes, descriptors->GetDetails(descriptor).attributes());
 
     Handle<Object> maybe_pair(descriptors->GetValue(descriptor), isolate);
     if (!maybe_pair->IsAccessorPair()) {
-      return Map::Normalize(map, mode);
+      return Map::Normalize(map, mode, "TransitionToAccessorFromNonPair");
     }
 
     Handle<AccessorPair> pair = Handle<AccessorPair>::cast(maybe_pair);
     if (pair->get(component) != *accessor) {
-      return Map::Normalize(map, mode);
+      return Map::Normalize(map, mode, "TransitionToDifferentAccessor");
     }
 
     return transition;
@@ -6967,33 +7227,33 @@ Handle<Map> Map::TransitionToAccessorProperty(Handle<Map> map,
   int descriptor = old_descriptors->SearchWithCache(*name, *map);
   if (descriptor != DescriptorArray::kNotFound) {
     if (descriptor != map->LastAdded()) {
-      return Map::Normalize(map, mode);
+      return Map::Normalize(map, mode, "AccessorsOverwritingNonLast");
     }
     PropertyDetails old_details = old_descriptors->GetDetails(descriptor);
     if (old_details.type() != CALLBACKS) {
-      return Map::Normalize(map, mode);
+      return Map::Normalize(map, mode, "AccessorsOverwritingNonAccessors");
     }
 
     if (old_details.attributes() != attributes) {
-      return Map::Normalize(map, mode);
+      return Map::Normalize(map, mode, "AccessorsWithAttributes");
     }
 
     Handle<Object> maybe_pair(old_descriptors->GetValue(descriptor), isolate);
     if (!maybe_pair->IsAccessorPair()) {
-      return Map::Normalize(map, mode);
+      return Map::Normalize(map, mode, "AccessorsOverwritingNonPair");
     }
 
     Object* current = Handle<AccessorPair>::cast(maybe_pair)->get(component);
     if (current == *accessor) return map;
 
     if (!current->IsTheHole()) {
-      return Map::Normalize(map, mode);
+      return Map::Normalize(map, mode, "AccessorsOverwritingAccessors");
     }
 
     pair = AccessorPair::Copy(Handle<AccessorPair>::cast(maybe_pair));
   } else if (map->NumberOfOwnDescriptors() >= kMaxNumberOfDescriptors ||
              map->TooManyFastProperties(CERTAINLY_NOT_STORE_FROM_KEYED)) {
-    return Map::Normalize(map, CLEAR_INOBJECT_PROPERTIES);
+    return Map::Normalize(map, CLEAR_INOBJECT_PROPERTIES, "TooManyAccessors");
   } else {
     pair = isolate->factory()->NewAccessorPair();
   }
@@ -7023,8 +7283,13 @@ Handle<Map> Map::CopyAddDescriptor(Handle<Map> map,
       descriptors, map->NumberOfOwnDescriptors(), 1);
   new_descriptors->Append(descriptor);
 
-  return CopyReplaceDescriptors(map, new_descriptors, flag,
-                                descriptor->GetKey(),
+  Handle<LayoutDescriptor> new_layout_descriptor =
+      FLAG_unbox_double_fields
+          ? LayoutDescriptor::Append(map, descriptor->GetDetails())
+          : handle(LayoutDescriptor::FastPointerLayout(), map->GetIsolate());
+
+  return CopyReplaceDescriptors(map, new_descriptors, new_layout_descriptor,
+                                flag, descriptor->GetKey(), "CopyAddDescriptor",
                                 SIMPLE_PROPERTY_TRANSITION);
 }
 
@@ -7116,12 +7381,16 @@ Handle<Map> Map::CopyReplaceDescriptor(Handle<Map> map,
       descriptors, map->NumberOfOwnDescriptors());
 
   new_descriptors->Replace(insertion_index, descriptor);
+  Handle<LayoutDescriptor> new_layout_descriptor = LayoutDescriptor::New(
+      map, new_descriptors, new_descriptors->number_of_descriptors());
 
   SimpleTransitionFlag simple_flag =
       (insertion_index == descriptors->number_of_descriptors() - 1)
           ? SIMPLE_PROPERTY_TRANSITION
           : PROPERTY_TRANSITION;
-  return CopyReplaceDescriptors(map, new_descriptors, flag, key, simple_flag);
+  return CopyReplaceDescriptors(map, new_descriptors, new_layout_descriptor,
+                                flag, key, "CopyReplaceDescriptor",
+                                simple_flag);
 }
 
 
@@ -7795,14 +8064,13 @@ void FixedArray::Shrink(int new_length) {
 
 
 MaybeHandle<FixedArray> FixedArray::AddKeysFromArrayLike(
-    Handle<FixedArray> content,
-    Handle<JSObject> array) {
+    Handle<FixedArray> content, Handle<JSObject> array, KeyFilter filter) {
   DCHECK(array->IsJSArray() || array->HasSloppyArgumentsElements());
   ElementsAccessor* accessor = array->GetElementsAccessor();
   Handle<FixedArray> result;
   ASSIGN_RETURN_ON_EXCEPTION(
       array->GetIsolate(), result,
-      accessor->AddElementsToFixedArray(array, array, content),
+      accessor->AddElementsToFixedArray(array, array, content, filter),
       FixedArray);
 
 #ifdef ENABLE_SLOW_DCHECKS
@@ -7825,10 +8093,9 @@ MaybeHandle<FixedArray> FixedArray::UnionOfKeys(Handle<FixedArray> first,
   ASSIGN_RETURN_ON_EXCEPTION(
       first->GetIsolate(), result,
       accessor->AddElementsToFixedArray(
-          Handle<Object>::null(),     // receiver
-          Handle<JSObject>::null(),   // holder
-          first,
-          Handle<FixedArrayBase>::cast(second)),
+          Handle<Object>::null(),    // receiver
+          Handle<JSObject>::null(),  // holder
+          first, Handle<FixedArrayBase>::cast(second), ALL_KEYS),
       FixedArray);
 
 #ifdef ENABLE_SLOW_DCHECKS
@@ -7885,6 +8152,116 @@ bool FixedArray::IsEqualTo(FixedArray* other) {
 #endif
 
 
+// static
+void WeakFixedArray::Set(Handle<WeakFixedArray> array, int index,
+                         Handle<HeapObject> value) {
+  DCHECK(array->IsEmptySlot(index));  // Don't overwrite anything.
+  Handle<WeakCell> cell =
+      value->IsMap() ? Map::WeakCellForMap(Handle<Map>::cast(value))
+                     : array->GetIsolate()->factory()->NewWeakCell(value);
+  Handle<FixedArray>::cast(array)->set(index + kFirstIndex, *cell);
+  if (FLAG_trace_weak_arrays) {
+    PrintF("[WeakFixedArray: storing at index %d ]\n", index);
+  }
+  array->set_last_used_index(index);
+}
+
+
+// static
+Handle<WeakFixedArray> WeakFixedArray::Add(
+    Handle<Object> maybe_array, Handle<HeapObject> value,
+    SearchForDuplicates search_for_duplicates) {
+  Handle<WeakFixedArray> array =
+      (maybe_array.is_null() || !maybe_array->IsWeakFixedArray())
+          ? Allocate(value->GetIsolate(), 1, Handle<WeakFixedArray>::null())
+          : Handle<WeakFixedArray>::cast(maybe_array);
+
+  if (search_for_duplicates == kAddIfNotFound) {
+    for (int i = 0; i < array->Length(); ++i) {
+      if (array->Get(i) == *value) return array;
+    }
+  } else {
+#ifdef DEBUG
+    for (int i = 0; i < array->Length(); ++i) {
+      DCHECK_NE(*value, array->Get(i));
+    }
+#endif
+  }
+
+  // Try to store the new entry if there's room. Optimize for consecutive
+  // accesses.
+  int first_index = array->last_used_index();
+  for (int i = first_index;;) {
+    if (array->IsEmptySlot((i))) {
+      WeakFixedArray::Set(array, i, value);
+      return array;
+    }
+    if (FLAG_trace_weak_arrays) {
+      PrintF("[WeakFixedArray: searching for free slot]\n");
+    }
+    i = (i + 1) % array->Length();
+    if (i == first_index) break;
+  }
+
+  // No usable slot found, grow the array.
+  int new_length = array->Length() + (array->Length() >> 1) + 4;
+  Handle<WeakFixedArray> new_array =
+      Allocate(array->GetIsolate(), new_length, array);
+  if (FLAG_trace_weak_arrays) {
+    PrintF("[WeakFixedArray: growing to size %d ]\n", new_length);
+  }
+  WeakFixedArray::Set(new_array, array->Length(), value);
+  return new_array;
+}
+
+
+void WeakFixedArray::Remove(Handle<HeapObject> value) {
+  // Optimize for the most recently added element to be removed again.
+  int first_index = last_used_index();
+  for (int i = first_index;;) {
+    if (Get(i) == *value) {
+      clear(i);
+      // Users of WeakFixedArray should make sure that there are no duplicates,
+      // they can use Add(..., kAddIfNotFound) if necessary.
+      return;
+    }
+    i = (i + 1) % Length();
+    if (i == first_index) break;
+  }
+}
+
+
+// static
+Handle<WeakFixedArray> WeakFixedArray::Allocate(
+    Isolate* isolate, int size, Handle<WeakFixedArray> initialize_from) {
+  DCHECK(0 <= size);
+  Handle<FixedArray> result =
+      isolate->factory()->NewUninitializedFixedArray(size + kFirstIndex);
+  Handle<WeakFixedArray> casted_result = Handle<WeakFixedArray>::cast(result);
+  if (initialize_from.is_null()) {
+    for (int i = 0; i < result->length(); ++i) {
+      result->set(i, Smi::FromInt(0));
+    }
+  } else {
+    DCHECK(initialize_from->Length() <= size);
+    Handle<FixedArray> raw_source = Handle<FixedArray>::cast(initialize_from);
+    int target_index = kFirstIndex;
+    for (int source_index = kFirstIndex; source_index < raw_source->length();
+         ++source_index) {
+      // The act of allocating might have caused entries in the source array
+      // to be cleared. Copy only what's needed.
+      if (initialize_from->IsEmptySlot(source_index - kFirstIndex)) continue;
+      result->set(target_index++, raw_source->get(source_index));
+    }
+    casted_result->set_last_used_index(target_index - 1 - kFirstIndex);
+    for (; target_index < result->length(); ++target_index) {
+      result->set(target_index, Smi::FromInt(0));
+    }
+  }
+  return casted_result;
+}
+
+
 Handle<DescriptorArray> DescriptorArray::Allocate(Isolate* isolate,
                                                   int number_of_descriptors,
                                                   int slack) {
@@ -7928,8 +8305,7 @@ void DescriptorArray::SetEnumCache(FixedArray* bridge_storage,
 }
 
 
-void DescriptorArray::CopyFrom(int index,
-                               DescriptorArray* src,
+void DescriptorArray::CopyFrom(int index, DescriptorArray* src,
                                const WhitenessWitness& witness) {
   Object* value = src->GetValue(index);
   PropertyDetails details = src->GetDetails(index);
@@ -9084,6 +9460,35 @@ uint32_t StringHasher::ComputeUtf8Hash(Vector<const char> chars,
 }
 
 
+void IteratingStringHasher::VisitConsString(ConsString* cons_string) {
+  // Run small ConsStrings through ConsStringIterator.
+  if (cons_string->length() < 64) {
+    ConsStringIterator iter(cons_string);
+    int offset;
+    String* string;
+    while (nullptr != (string = iter.Next(&offset))) {
+      DCHECK_EQ(0, offset);
+      String::VisitFlat(this, string, 0);
+    }
+    return;
+  }
+  // Slow case.
+  const int max_length = String::kMaxHashCalcLength;
+  int length = std::min(cons_string->length(), max_length);
+  if (cons_string->HasOnlyOneByteChars()) {
+    uint8_t* buffer = new uint8_t[length];
+    String::WriteToFlat(cons_string, buffer, 0, length);
+    AddCharacters(buffer, length);
+    delete[] buffer;
+  } else {
+    uint16_t* buffer = new uint16_t[length];
+    String::WriteToFlat(cons_string, buffer, 0, length);
+    AddCharacters(buffer, length);
+    delete[] buffer;
+  }
+}
+
+
 void String::PrintOn(FILE* file) {
   int length = this->length();
   for (int i = 0; i < length; i++) {
@@ -9123,7 +9528,6 @@ static bool CheckEquivalent(Map* first, Map* second) {
     first->instance_type() == second->instance_type() &&
     first->bit_field() == second->bit_field() &&
     first->bit_field2() == second->bit_field2() &&
-    first->is_frozen() == second->is_frozen() &&
     first->has_instance_call_handler() == second->has_instance_call_handler();
 }
 
@@ -9194,22 +9598,40 @@ void JSFunction::JSFunctionIterateBody(int object_size, ObjectVisitor* v) {
 
 
 void JSFunction::MarkForOptimization() {
+  Isolate* isolate = GetIsolate();
+  DCHECK(isolate->use_crankshaft());
   DCHECK(!IsOptimized());
   DCHECK(shared()->allows_lazy_compilation() ||
          code()->optimizable());
   DCHECK(!shared()->is_generator());
   set_code_no_write_barrier(
-      GetIsolate()->builtins()->builtin(Builtins::kCompileOptimized));
+      isolate->builtins()->builtin(Builtins::kCompileOptimized));
   // No write barrier required, since the builtin is part of the root set.
 }
 
 
-void JSFunction::MarkForConcurrentOptimization() {
-  DCHECK(is_compiled() || GetIsolate()->DebuggerHasBreakPoints());
+void JSFunction::AttemptConcurrentOptimization() {
+  Isolate* isolate = GetIsolate();
+  if (!isolate->concurrent_recompilation_enabled() ||
+      isolate->bootstrapper()->IsActive()) {
+    MarkForOptimization();
+    return;
+  }
+  if (isolate->concurrent_osr_enabled() &&
+      isolate->optimizing_compiler_thread()->IsQueuedForOSR(this)) {
+    // Do not attempt regular recompilation if we already queued this for OSR.
+    // TODO(yangguo): This is necessary so that we don't install optimized
+    // code on a function that is already optimized, since OSR and regular
+    // recompilation race.  This goes away as soon as OSR becomes one-shot.
+    return;
+  }
+  DCHECK(isolate->use_crankshaft());
+  DCHECK(!IsInOptimizationQueue());
+  DCHECK(is_compiled() || isolate->DebuggerHasBreakPoints());
   DCHECK(!IsOptimized());
   DCHECK(shared()->allows_lazy_compilation() || code()->optimizable());
   DCHECK(!shared()->is_generator());
-  DCHECK(GetIsolate()->concurrent_recompilation_enabled());
+  DCHECK(isolate->concurrent_recompilation_enabled());
   if (FLAG_trace_concurrent_recompilation) {
     PrintF("  ** Marking ");
     ShortPrint();
@@ -9221,24 +9643,6 @@ void JSFunction::MarkForConcurrentOptimization() {
 }
 
 
-void JSFunction::MarkInOptimizationQueue() {
-  // We can only arrive here via the concurrent-recompilation builtin.  If
-  // break points were set, the code would point to the lazy-compile builtin.
-  DCHECK(!GetIsolate()->DebuggerHasBreakPoints());
-  DCHECK(IsMarkedForConcurrentOptimization() && !IsOptimized());
-  DCHECK(shared()->allows_lazy_compilation() || code()->optimizable());
-  DCHECK(GetIsolate()->concurrent_recompilation_enabled());
-  if (FLAG_trace_concurrent_recompilation) {
-    PrintF("  ** Queueing ");
-    ShortPrint();
-    PrintF(" for concurrent recompilation.\n");
-  }
-  set_code_no_write_barrier(
-      GetIsolate()->builtins()->builtin(Builtins::kInOptimizationQueue));
-  // No write barrier required, since the builtin is part of the root set.
-}
-
-
 Handle<JSFunction> JSFunction::CloneClosure(Handle<JSFunction> function) {
   Isolate* isolate = function->GetIsolate();
   Handle<Map> map(function->map());
@@ -9414,15 +9818,20 @@ void JSObject::OptimizeAsPrototype(Handle<JSObject> object,
   if (object->IsJSGlobalProxy()) return;
   if (mode == FAST_PROTOTYPE && !object->map()->is_prototype_map()) {
     // First normalize to ensure all JSFunctions are CONSTANT.
-    JSObject::NormalizeProperties(object, KEEP_INOBJECT_PROPERTIES, 0);
+    JSObject::NormalizeProperties(object, KEEP_INOBJECT_PROPERTIES, 0,
+                                  "NormalizeAsPrototype");
   }
+  bool has_just_copied_map = false;
   if (!object->HasFastProperties()) {
-    JSObject::MigrateSlowToFast(object, 0);
+    JSObject::MigrateSlowToFast(object, 0, "OptimizeAsPrototype");
+    has_just_copied_map = true;
   }
   if (mode == FAST_PROTOTYPE && object->HasFastProperties() &&
       !object->map()->is_prototype_map()) {
-    Handle<Map> new_map = Map::Copy(handle(object->map()));
-    JSObject::MigrateToMap(object, new_map);
+    if (!has_just_copied_map) {
+      Handle<Map> new_map = Map::Copy(handle(object->map()), "CopyAsPrototype");
+      JSObject::MigrateToMap(object, new_map);
+    }
     object->map()->set_is_prototype_map(true);
   }
 }
@@ -9434,6 +9843,74 @@ void JSObject::ReoptimizeIfPrototype(Handle<JSObject> object) {
 }
 
 
+void JSObject::RegisterPrototypeUser(Handle<JSObject> prototype,
+                                     Handle<HeapObject> user) {
+  DCHECK(FLAG_track_prototype_users);
+  Isolate* isolate = prototype->GetIsolate();
+  Handle<Name> symbol = isolate->factory()->prototype_users_symbol();
+
+  // Get prototype users array, create it if it doesn't exist yet.
+  Handle<Object> maybe_array =
+      JSObject::GetProperty(prototype, symbol).ToHandleChecked();
+
+  Handle<WeakFixedArray> new_array = WeakFixedArray::Add(maybe_array, user);
+  if (!maybe_array.is_identical_to(new_array)) {
+    JSObject::SetOwnPropertyIgnoreAttributes(prototype, symbol, new_array,
+                                             DONT_ENUM).Assert();
+  }
+}
+
+
+void JSObject::UnregisterPrototypeUser(Handle<JSObject> prototype,
+                                       Handle<HeapObject> user) {
+  Isolate* isolate = prototype->GetIsolate();
+  Handle<Name> symbol = isolate->factory()->prototype_users_symbol();
+
+  Handle<Object> maybe_array =
+      JSObject::GetProperty(prototype, symbol).ToHandleChecked();
+  if (!maybe_array->IsWeakFixedArray()) return;
+  Handle<WeakFixedArray>::cast(maybe_array)->Remove(user);
+}
+
+
+void Map::SetPrototype(Handle<Object> prototype,
+                       PrototypeOptimizationMode proto_mode) {
+  if (this->prototype()->IsJSObject() && FLAG_track_prototype_users) {
+    Handle<JSObject> old_prototype(JSObject::cast(this->prototype()));
+    JSObject::UnregisterPrototypeUser(old_prototype, handle(this));
+  }
+  if (prototype->IsJSObject()) {
+    Handle<JSObject> prototype_jsobj = Handle<JSObject>::cast(prototype);
+    if (ShouldRegisterAsPrototypeUser(prototype_jsobj)) {
+      JSObject::RegisterPrototypeUser(prototype_jsobj, handle(this));
+    }
+    JSObject::OptimizeAsPrototype(prototype_jsobj, proto_mode);
+  }
+  WriteBarrierMode wb_mode =
+      prototype->IsNull() ? SKIP_WRITE_BARRIER : UPDATE_WRITE_BARRIER;
+  set_prototype(*prototype, wb_mode);
+}
+
+
+bool Map::ShouldRegisterAsPrototypeUser(Handle<JSObject> prototype) {
+  if (!FLAG_track_prototype_users) return false;
+  if (this->is_prototype_map()) return true;
+  if (this->is_dictionary_map()) return false;
+  Object* back = GetBackPointer();
+  if (!back->IsMap()) return true;
+  if (Map::cast(back)->prototype() != *prototype) return true;
+  return false;
+}
+
+
+bool Map::CanUseOptimizationsBasedOnPrototypeRegistry() {
+  if (!FLAG_track_prototype_users) return false;
+  if (this->is_prototype_map()) return true;
+  if (GetBackPointer()->IsMap()) return true;
+  return false;
+}
+
+
 Handle<Object> CacheInitialJSArrayMaps(
     Handle<Context> native_context, Handle<Map> initial_map) {
   // Replace all of the cached initial array maps in the native context with
@@ -9491,7 +9968,7 @@ void JSFunction::SetInstancePrototype(Handle<JSFunction> function,
       // into the initial map where it belongs.
       function->set_prototype_or_initial_map(*value);
     } else {
-      Handle<Map> new_map = Map::Copy(initial_map);
+      Handle<Map> new_map = Map::Copy(initial_map, "SetInstancePrototype");
       JSFunction::SetInitialMap(function, new_map, value);
 
       // If the function is used as the global Array function, cache the
@@ -9531,7 +10008,7 @@ void JSFunction::SetPrototype(Handle<JSFunction> function,
     // Copy the map so this does not affect unrelated functions.
     // Remove map transitions because they point to maps with a
     // different prototype.
-    Handle<Map> new_map = Map::Copy(handle(function->map()));
+    Handle<Map> new_map = Map::Copy(handle(function->map()), "SetPrototype");
 
     JSObject::MigrateToMap(function, new_map);
     new_map->set_constructor(*value);
@@ -9572,13 +10049,18 @@ bool JSFunction::RemovePrototype() {
 
 void JSFunction::SetInitialMap(Handle<JSFunction> function, Handle<Map> map,
                                Handle<Object> prototype) {
-  if (prototype->IsJSObject()) {
-    Handle<JSObject> js_proto = Handle<JSObject>::cast(prototype);
-    JSObject::OptimizeAsPrototype(js_proto, FAST_PROTOTYPE);
+  if (map->prototype() != *prototype) {
+    map->SetPrototype(prototype, FAST_PROTOTYPE);
   }
-  map->set_prototype(*prototype);
   function->set_prototype_or_initial_map(*map);
   map->set_constructor(*function);
+#if TRACE_MAPS
+  if (FLAG_trace_maps) {
+    PrintF("[TraceMaps: InitialMap map= %p SFI= %d_%s ]\n",
+           reinterpret_cast<void*>(*map), function->shared()->unique_id(),
+           function->shared()->DebugName()->ToCString().get());
+  }
+#endif
 }
 
 
@@ -9728,27 +10210,31 @@ int Script::GetColumnNumber(Handle<Script> script, int code_pos) {
 }
 
 
-int Script::GetLineNumberWithArray(int code_pos) {
+int Script::GetLineNumberWithArray(int position) {
   DisallowHeapAllocation no_allocation;
-  DCHECK(line_ends()->IsFixedArray());
-  FixedArray* line_ends_array = FixedArray::cast(line_ends());
-  int line_ends_len = line_ends_array->length();
-  if (line_ends_len == 0) return -1;
+  FixedArray* line_ends = FixedArray::cast(this->line_ends());
+  int upper = line_ends->length() - 1;
+  if (upper < 0) return -1;
+  int offset = line_offset()->value();
 
-  if ((Smi::cast(line_ends_array->get(0)))->value() >= code_pos) {
-    return line_offset()->value();
+  if (position > Smi::cast(line_ends->get(upper))->value()) {
+    return upper + 1 + offset;
   }
+  if (position <= Smi::cast(line_ends->get(0))->value()) return offset;
 
-  int left = 0;
-  int right = line_ends_len;
-  while (int half = (right - left) / 2) {
-    if ((Smi::cast(line_ends_array->get(left + half)))->value() > code_pos) {
-      right -= half;
+  int lower = 1;
+  // Binary search.
+  while (true) {
+    int mid = (lower + upper) / 2;
+    if (position <= Smi::cast(line_ends->get(mid - 1))->value()) {
+      upper = mid - 1;
+    } else if (position > Smi::cast(line_ends->get(mid))->value()) {
+      lower = mid + 1;
     } else {
-      left += half;
+      return mid + offset;
     }
   }
-  return right + line_offset()->value();
+  return -1;
 }
 
 
@@ -9947,8 +10433,9 @@ void SharedFunctionInfo::DisableOptimization(BailoutReason reason) {
   // regenerated and set on the shared function info it is marked as
   // non-optimizable if optimization is disabled for the shared
   // function info.
+  DCHECK(reason != kNoReason);
   set_optimization_disabled(true);
-  set_bailout_reason(reason);
+  set_disable_optimization_reason(reason);
   // Code should be the lazy compilation stub or else unoptimized.  If the
   // latter, disable optimization for the code too.
   DCHECK(code()->kind() == Code::FUNCTION || code()->kind() == Code::BUILTIN);
@@ -9978,20 +10465,15 @@ bool SharedFunctionInfo::VerifyBailoutId(BailoutId id) {
 void JSFunction::StartInobjectSlackTracking() {
   DCHECK(has_initial_map() && !IsInobjectSlackTrackingInProgress());
 
-  if (!FLAG_clever_optimizations) return;
   Map* map = initial_map();
 
-  // Only initiate the tracking the first time.
-  if (map->done_inobject_slack_tracking()) return;
-  map->set_done_inobject_slack_tracking(true);
-
   // No tracking during the snapshot construction phase.
   Isolate* isolate = GetIsolate();
   if (isolate->serializer_enabled()) return;
 
   if (map->unused_property_fields() == 0) return;
 
-  map->set_construction_count(kGenerousAllocationCount);
+  map->set_counter(Map::kSlackTrackingCounterStart);
 }
 
 
@@ -10038,8 +10520,8 @@ void JSFunction::CompleteInobjectSlackTracking() {
   DCHECK(has_initial_map());
   Map* map = initial_map();
 
-  DCHECK(map->done_inobject_slack_tracking());
-  map->set_construction_count(kNoSlackTracking);
+  DCHECK(map->counter() >= Map::kSlackTrackingCounterEnd - 1);
+  map->set_counter(Map::kRetainingCounterStart);
 
   int slack = map->unused_property_fields();
   map->TraverseTransitionTree(&GetMinInobjectSlack, &slack);
@@ -10309,6 +10791,7 @@ Object* Code::FindNthObject(int n, Map* match_map) {
   for (RelocIterator it(this, mask); !it.done(); it.next()) {
     RelocInfo* info = it.rinfo();
     Object* object = info->target_object();
+    if (object->IsWeakCell()) object = WeakCell::cast(object)->value();
     if (object->IsHeapObject()) {
       if (HeapObject::cast(object)->map() == match_map) {
         if (--n == 0) return object;
@@ -10341,6 +10824,9 @@ void Code::FindAndReplace(const FindAndReplacePattern& pattern) {
     RelocInfo* info = it.rinfo();
     Object* object = info->target_object();
     if (object->IsHeapObject()) {
+      if (object->IsWeakCell()) {
+        object = HeapObject::cast(WeakCell::cast(object)->value());
+      }
       Map* map = HeapObject::cast(object)->map();
       if (map == *pattern.find_[current_pattern]) {
         info->set_target_object(*pattern.replace_[current_pattern]);
@@ -10359,6 +10845,7 @@ void Code::FindAllMaps(MapHandleList* maps) {
   for (RelocIterator it(this, mask); !it.done(); it.next()) {
     RelocInfo* info = it.rinfo();
     Object* object = info->target_object();
+    if (object->IsWeakCell()) object = WeakCell::cast(object)->value();
     if (object->IsMap()) maps->Add(handle(Map::cast(object)));
   }
 }
@@ -10367,11 +10854,21 @@ void Code::FindAllMaps(MapHandleList* maps) {
 Code* Code::FindFirstHandler() {
   DCHECK(is_inline_cache_stub());
   DisallowHeapAllocation no_allocation;
-  int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET);
+  int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
+             RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
+  bool skip_next_handler = false;
   for (RelocIterator it(this, mask); !it.done(); it.next()) {
     RelocInfo* info = it.rinfo();
-    Code* code = Code::GetCodeFromTargetAddress(info->target_address());
-    if (code->kind() == Code::HANDLER) return code;
+    if (info->rmode() == RelocInfo::EMBEDDED_OBJECT) {
+      Object* obj = info->target_object();
+      skip_next_handler |= obj->IsWeakCell() && WeakCell::cast(obj)->cleared();
+    } else {
+      Code* code = Code::GetCodeFromTargetAddress(info->target_address());
+      if (code->kind() == Code::HANDLER) {
+        if (!skip_next_handler) return code;
+        skip_next_handler = false;
+      }
+    }
   }
   return NULL;
 }
@@ -10380,17 +10877,27 @@ Code* Code::FindFirstHandler() {
 bool Code::FindHandlers(CodeHandleList* code_list, int length) {
   DCHECK(is_inline_cache_stub());
   DisallowHeapAllocation no_allocation;
-  int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET);
+  int mask = RelocInfo::ModeMask(RelocInfo::CODE_TARGET) |
+             RelocInfo::ModeMask(RelocInfo::EMBEDDED_OBJECT);
+  bool skip_next_handler = false;
   int i = 0;
   for (RelocIterator it(this, mask); !it.done(); it.next()) {
     if (i == length) return true;
     RelocInfo* info = it.rinfo();
-    Code* code = Code::GetCodeFromTargetAddress(info->target_address());
-    // IC stubs with handlers never contain non-handler code objects before
-    // handler targets.
-    if (code->kind() != Code::HANDLER) break;
-    code_list->Add(Handle<Code>(code));
-    i++;
+    if (info->rmode() == RelocInfo::EMBEDDED_OBJECT) {
+      Object* obj = info->target_object();
+      skip_next_handler |= obj->IsWeakCell() && WeakCell::cast(obj)->cleared();
+    } else {
+      Code* code = Code::GetCodeFromTargetAddress(info->target_address());
+      // IC stubs with handlers never contain non-handler code objects before
+      // handler targets.
+      if (code->kind() != Code::HANDLER) break;
+      if (!skip_next_handler) {
+        code_list->Add(Handle<Code>(code));
+        i++;
+      }
+      skip_next_handler = false;
+    }
   }
   return i == length;
 }
@@ -10405,6 +10912,7 @@ MaybeHandle<Code> Code::FindHandlerForMap(Map* map) {
     RelocInfo* info = it.rinfo();
     if (info->rmode() == RelocInfo::EMBEDDED_OBJECT) {
       Object* object = info->target_object();
+      if (object->IsWeakCell()) object = WeakCell::cast(object)->value();
       if (object == map) return_next = true;
     } else if (return_next) {
       Code* code = Code::GetCodeFromTargetAddress(info->target_address());
@@ -10507,9 +11015,9 @@ static Code::Age EffectiveAge(Code::Age age) {
 }
 
 
-void Code::MakeYoung() {
+void Code::MakeYoung(Isolate* isolate) {
   byte* sequence = FindCodeAgeSequence();
-  if (sequence != NULL) MakeCodeAgeSequenceYoung(sequence, GetIsolate());
+  if (sequence != NULL) MakeCodeAgeSequenceYoung(sequence, isolate);
 }
 
 
@@ -11301,8 +11809,8 @@ MaybeHandle<Object> JSArray::SetElementsLength(
       // Skip deletions where the property was an accessor, leaving holes
       // in the array of old values.
       if (old_values[i]->IsTheHole()) continue;
-      JSObject::SetElement(
-          deleted, indices[i] - index, old_values[i], NONE, SLOPPY).Assert();
+      JSObject::SetOwnElement(deleted, indices[i] - index, old_values[i],
+                              SLOPPY).Assert();
     }
 
     SetProperty(deleted, isolate->factory()->length_string(),
@@ -11416,23 +11924,6 @@ void Map::AddDependentCode(Handle<Map> map,
 }
 
 
-// static
-void Map::AddDependentIC(Handle<Map> map,
-                         Handle<Code> stub) {
-  DCHECK(stub->next_code_link()->IsUndefined());
-  int n = map->dependent_code()->number_of_entries(DependentCode::kWeakICGroup);
-  if (n == 0) {
-    // Slow path: insert the head of the list with possible heap allocation.
-    Map::AddDependentCode(map, DependentCode::kWeakICGroup, stub);
-  } else {
-    // Fast path: link the stub to the existing head of the list without any
-    // heap allocation.
-    DCHECK(n == 1);
-    map->dependent_code()->AddToDependentICList(stub);
-  }
-}
-
-
 DependentCode::GroupStartIndexes::GroupStartIndexes(DependentCode* entries) {
   Recompute(entries);
 }
@@ -11562,22 +12053,10 @@ void DependentCode::RemoveCompilationInfo(DependentCode::DependencyGroup group,
 }
 
 
-static bool CodeListContains(Object* head, Code* code) {
-  while (!head->IsUndefined()) {
-    if (head == code) return true;
-    head = Code::cast(head)->next_code_link();
-  }
-  return false;
-}
-
-
 bool DependentCode::Contains(DependencyGroup group, Code* code) {
   GroupStartIndexes starts(this);
   int start = starts.at(group);
   int end = starts.at(group + 1);
-  if (group == kWeakICGroup) {
-    return CodeListContains(object_at(start), code);
-  }
   for (int i = start; i < end; i++) {
     if (object_at(i) == code) return true;
   }
@@ -11634,24 +12113,6 @@ void DependentCode::DeoptimizeDependentCodeGroup(
 }
 
 
-void DependentCode::AddToDependentICList(Handle<Code> stub) {
-  DisallowHeapAllocation no_heap_allocation;
-  GroupStartIndexes starts(this);
-  int i = starts.at(kWeakICGroup);
-  Object* head = object_at(i);
-  // Try to insert the stub after the head of the list to minimize number of
-  // writes to the DependentCode array, since a write to the array can make it
-  // strong if it was alread marked by incremental marker.
-  if (head->IsCode()) {
-    stub->set_next_code_link(Code::cast(head)->next_code_link());
-    Code::cast(head)->set_next_code_link(*stub);
-  } else {
-    stub->set_next_code_link(head);
-    set_object_at(i, *stub);
-  }
-}
-
-
 void DependentCode::SetMarkedForDeoptimization(Code* code,
                                                DependencyGroup group) {
   code->set_marked_for_deoptimization(true);
@@ -11670,8 +12131,6 @@ void DependentCode::SetMarkedForDeoptimization(Code* code,
 
 const char* DependentCode::DependencyGroupName(DependencyGroup group) {
   switch (group) {
-    case kWeakICGroup:
-      return "weak-ic";
     case kWeakCodeGroup:
       return "weak-code";
     case kTransitionGroup:
@@ -11697,12 +12156,13 @@ const char* DependentCode::DependencyGroupName(DependencyGroup group) {
 
 
 Handle<Map> Map::TransitionToPrototype(Handle<Map> map,
-                                       Handle<Object> prototype) {
+                                       Handle<Object> prototype,
+                                       PrototypeOptimizationMode mode) {
   Handle<Map> new_map = GetPrototypeTransition(map, prototype);
   if (new_map.is_null()) {
-    new_map = Copy(map);
+    new_map = Copy(map, "TransitionToPrototype");
     PutPrototypeTransition(map, prototype, new_map);
-    new_map->set_prototype(*prototype);
+    new_map->SetPrototype(prototype, mode);
   }
   return new_map;
 }
@@ -11772,13 +12232,9 @@ MaybeHandle<Object> JSObject::SetPrototype(Handle<JSObject> object,
   // Nothing to do if prototype is already set.
   if (map->prototype() == *value) return value;
 
-  if (value->IsJSObject()) {
-    PrototypeOptimizationMode mode =
-        from_javascript ? REGULAR_PROTOTYPE : FAST_PROTOTYPE;
-    JSObject::OptimizeAsPrototype(Handle<JSObject>::cast(value), mode);
-  }
-
-  Handle<Map> new_map = Map::TransitionToPrototype(map, value);
+  PrototypeOptimizationMode mode =
+      from_javascript ? REGULAR_PROTOTYPE : FAST_PROTOTYPE;
+  Handle<Map> new_map = Map::TransitionToPrototype(map, value, mode);
   DCHECK(new_map->prototype() == *value);
   JSObject::MigrateToMap(real_receiver, new_map);
 
@@ -12157,8 +12613,8 @@ MaybeHandle<Object> JSObject::SetDictionaryElement(
       // is read-only (a declared const that has not been initialized).  If a
       // value is being defined we skip attribute checks completely.
       if (set_mode == DEFINE_PROPERTY) {
-        details = PropertyDetails(
-            attributes, NORMAL, details.dictionary_index());
+        details =
+            PropertyDetails(attributes, FIELD, details.dictionary_index());
         dictionary->DetailsAtPut(entry, details);
       } else if (details.IsReadOnly() && !element->IsTheHole()) {
         if (strict_mode == SLOPPY) {
@@ -12209,7 +12665,7 @@ MaybeHandle<Object> JSObject::SetDictionaryElement(
       }
     }
 
-    PropertyDetails details = PropertyDetails(attributes, NORMAL, 0);
+    PropertyDetails details(attributes, FIELD, 0);
     Handle<SeededNumberDictionary> new_dictionary =
         SeededNumberDictionary::AddNumberEntry(dictionary, index, value,
                                                details);
@@ -12723,10 +13179,32 @@ void AllocationSite::DigestTransitionFeedback(Handle<AllocationSite> site,
 
 
 // static
-void AllocationSite::AddDependentCompilationInfo(Handle<AllocationSite> site,
-                                                 Reason reason,
-                                                 CompilationInfo* info) {
-  DependentCode::DependencyGroup group = site->ToDependencyGroup(reason);
+void AllocationSite::RegisterForDeoptOnTenureChange(Handle<AllocationSite> site,
+                                                    CompilationInfo* info) {
+  AddDependentCompilationInfo(
+      site, DependentCode::kAllocationSiteTenuringChangedGroup, info);
+}
+
+
+// static
+void AllocationSite::RegisterForDeoptOnTransitionChange(
+    Handle<AllocationSite> site, CompilationInfo* info) {
+  // Do nothing if the object doesn't have any useful element transitions left.
+  ElementsKind kind =
+      site->SitePointsToLiteral()
+          ? JSObject::cast(site->transition_info())->GetElementsKind()
+          : site->GetElementsKind();
+  if (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE) {
+    AddDependentCompilationInfo(
+        site, DependentCode::kAllocationSiteTransitionChangedGroup, info);
+  }
+}
+
+
+// static
+void AllocationSite::AddDependentCompilationInfo(
+    Handle<AllocationSite> site, DependentCode::DependencyGroup group,
+    CompilationInfo* info) {
   Handle<DependentCode> dep(site->dependent_code());
   Handle<DependentCode> codes =
       DependentCode::Insert(dep, group, info->object_wrapper());
@@ -12876,18 +13354,21 @@ bool JSArray::IsReadOnlyLengthDescriptor(Handle<Map> jsarray_map) {
 }
 
 
+bool JSArray::HasReadOnlyLength(Handle<JSArray> array) {
+  LookupIterator it(array, array->GetIsolate()->factory()->length_string(),
+                    LookupIterator::OWN_SKIP_INTERCEPTOR);
+  CHECK_NE(LookupIterator::ACCESS_CHECK, it.state());
+  CHECK(it.IsFound());
+  CHECK_EQ(LookupIterator::ACCESSOR, it.state());
+  return it.IsReadOnly();
+}
+
+
 bool JSArray::WouldChangeReadOnlyLength(Handle<JSArray> array,
                                         uint32_t index) {
   uint32_t length = 0;
   CHECK(array->length()->ToArrayIndex(&length));
-  if (length <= index) {
-    LookupIterator it(array, array->GetIsolate()->factory()->length_string(),
-                      LookupIterator::OWN_SKIP_INTERCEPTOR);
-    CHECK_NE(LookupIterator::ACCESS_CHECK, it.state());
-    CHECK(it.IsFound());
-    CHECK_EQ(LookupIterator::ACCESSOR, it.state());
-    return it.IsReadOnly();
-  }
+  if (length <= index) return HasReadOnlyLength(array);
   return false;
 }
 
@@ -13191,22 +13672,21 @@ MaybeHandle<Object> JSObject::GetPropertyWithInterceptor(
     Handle<Name> name) {
   Isolate* isolate = holder->GetIsolate();
 
-  // TODO(rossberg): Support symbols in the API.
-  if (name->IsSymbol()) return isolate->factory()->undefined_value();
-
   Handle<InterceptorInfo> interceptor(holder->GetNamedInterceptor(), isolate);
-  Handle<String> name_string = Handle<String>::cast(name);
-
   if (interceptor->getter()->IsUndefined()) return MaybeHandle<Object>();
 
-  v8::NamedPropertyGetterCallback getter =
-      v8::ToCData<v8::NamedPropertyGetterCallback>(interceptor->getter());
+  if (name->IsSymbol() && !interceptor->can_intercept_symbols()) {
+    return MaybeHandle<Object>();
+  }
+
+  v8::GenericNamedPropertyGetterCallback getter =
+      v8::ToCData<v8::GenericNamedPropertyGetterCallback>(
+          interceptor->getter());
   LOG(isolate,
       ApiNamedPropertyAccess("interceptor-named-get", *holder, *name));
   PropertyCallbackArguments
       args(isolate, interceptor->data(), *receiver, *holder);
-  v8::Handle<v8::Value> result =
-      args.Call(getter, v8::Utils::ToLocal(name_string));
+  v8::Handle<v8::Value> result = args.Call(getter, v8::Utils::ToLocal(name));
   RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
   if (result.IsEmpty()) return MaybeHandle<Object>();
 
@@ -13218,7 +13698,6 @@ MaybeHandle<Object> JSObject::GetPropertyWithInterceptor(
 
 
 // Compute the property keys from the interceptor.
-// TODO(rossberg): support symbols in API, and filter here if needed.
 MaybeHandle<JSObject> JSObject::GetKeysForNamedInterceptor(
     Handle<JSObject> object, Handle<JSReceiver> receiver) {
   Isolate* isolate = receiver->GetIsolate();
@@ -13227,8 +13706,8 @@ MaybeHandle<JSObject> JSObject::GetKeysForNamedInterceptor(
       args(isolate, interceptor->data(), *receiver, *object);
   v8::Handle<v8::Object> result;
   if (!interceptor->enumerator()->IsUndefined()) {
-    v8::NamedPropertyEnumeratorCallback enum_fun =
-        v8::ToCData<v8::NamedPropertyEnumeratorCallback>(
+    v8::GenericNamedPropertyEnumeratorCallback enum_fun =
+        v8::ToCData<v8::GenericNamedPropertyEnumeratorCallback>(
             interceptor->enumerator());
     LOG(isolate, ApiObjectAccess("interceptor-named-enum", *object));
     result = args.Call(enum_fun);
@@ -13803,17 +14282,17 @@ class InternalizedStringKey : public HashTableKey {
   explicit InternalizedStringKey(Handle<String> string)
       : string_(string) { }
 
-  virtual bool IsMatch(Object* string) OVERRIDE {
+  bool IsMatch(Object* string) OVERRIDE {
     return String::cast(string)->Equals(*string_);
   }
 
-  virtual uint32_t Hash() OVERRIDE { return string_->Hash(); }
+  uint32_t Hash() OVERRIDE { return string_->Hash(); }
 
-  virtual uint32_t HashForObject(Object* other) OVERRIDE {
+  uint32_t HashForObject(Object* other) OVERRIDE {
     return String::cast(other)->Hash();
   }
 
-  virtual Handle<Object> AsHandle(Isolate* isolate) OVERRIDE {
+  Handle<Object> AsHandle(Isolate* isolate) OVERRIDE {
     // Internalize the string if possible.
     MaybeHandle<Map> maybe_map =
         isolate->factory()->InternalizedStringMapForString(string_);
@@ -14111,8 +14590,6 @@ template class HashTable<CompilationCacheTable,
                          CompilationCacheShape,
                          HashTableKey*>;
 
-template class HashTable<MapCache, MapCacheShape, HashTableKey*>;
-
 template class HashTable<ObjectHashTable,
                          ObjectHashTableShape,
                          Handle<Object> >;
@@ -14319,7 +14796,7 @@ Handle<Object> JSObject::PrepareSlowElementsForSort(
   }
 
   uint32_t result = pos;
-  PropertyDetails no_details = PropertyDetails(NONE, NORMAL, 0);
+  PropertyDetails no_details(NONE, FIELD, 0);
   while (undefs > 0) {
     if (pos > static_cast<uint32_t>(Smi::kMaxValue)) {
       // Adding an entry with the key beyond smi-range requires
@@ -14677,6 +15154,25 @@ Handle<Object> ExternalFloat64Array::SetValue(
 }
 
 
+void GlobalObject::InvalidatePropertyCell(Handle<GlobalObject> global,
+                                          Handle<Name> name) {
+  DCHECK(!global->HasFastProperties());
+  Isolate* isolate = global->GetIsolate();
+  int entry = global->property_dictionary()->FindEntry(name);
+  if (entry != NameDictionary::kNotFound) {
+    Handle<PropertyCell> cell(
+        PropertyCell::cast(global->property_dictionary()->ValueAt(entry)));
+
+    Handle<Object> value(cell->value(), isolate);
+    Handle<PropertyCell> new_cell = isolate->factory()->NewPropertyCell(value);
+    global->property_dictionary()->ValueAtPut(entry, *new_cell);
+
+    Handle<Object> hole = global->GetIsolate()->factory()->the_hole_value();
+    PropertyCell::SetValueInferType(cell, hole);
+  }
+}
+
+
 Handle<PropertyCell> JSGlobalObject::EnsurePropertyCell(
     Handle<JSGlobalObject> global,
     Handle<Name> name) {
@@ -14686,7 +15182,7 @@ Handle<PropertyCell> JSGlobalObject::EnsurePropertyCell(
     Isolate* isolate = global->GetIsolate();
     Handle<PropertyCell> cell = isolate->factory()->NewPropertyCell(
         isolate->factory()->the_hole_value());
-    PropertyDetails details(NONE, NORMAL, 0);
+    PropertyDetails details(NONE, FIELD, 0);
     details = details.AsDeleted();
     Handle<NameDictionary> dictionary = NameDictionary::Add(
         handle(global->property_dictionary()), name, cell, details);
@@ -14899,16 +15395,19 @@ Handle<CompilationCacheTable> CompilationCacheTable::Put(
   Handle<SharedFunctionInfo> shared(context->closure()->shared());
   StringSharedKey key(src, shared, FLAG_use_strict ? STRICT : SLOPPY,
                       RelocInfo::kNoPosition);
-  int entry = cache->FindEntry(&key);
-  if (entry != kNotFound) {
+  {
     Handle<Object> k = key.AsHandle(isolate);
-    cache->set(EntryToIndex(entry), *k);
-    cache->set(EntryToIndex(entry) + 1, *value);
-    return cache;
+    DisallowHeapAllocation no_allocation_scope;
+    int entry = cache->FindEntry(&key);
+    if (entry != kNotFound) {
+      cache->set(EntryToIndex(entry), *k);
+      cache->set(EntryToIndex(entry) + 1, *value);
+      return cache;
+    }
   }
 
   cache = EnsureCapacity(cache, 1, &key);
-  entry = cache->FindInsertionEntry(key.Hash());
+  int entry = cache->FindInsertionEntry(key.Hash());
   Handle<Object> k =
       isolate->factory()->NewNumber(static_cast<double>(key.Hash()));
   cache->set(EntryToIndex(entry), *k);
@@ -14924,16 +15423,19 @@ Handle<CompilationCacheTable> CompilationCacheTable::PutEval(
     int scope_position) {
   Isolate* isolate = cache->GetIsolate();
   StringSharedKey key(src, outer_info, value->strict_mode(), scope_position);
-  int entry = cache->FindEntry(&key);
-  if (entry != kNotFound) {
+  {
     Handle<Object> k = key.AsHandle(isolate);
-    cache->set(EntryToIndex(entry), *k);
-    cache->set(EntryToIndex(entry) + 1, *value);
-    return cache;
+    DisallowHeapAllocation no_allocation_scope;
+    int entry = cache->FindEntry(&key);
+    if (entry != kNotFound) {
+      cache->set(EntryToIndex(entry), *k);
+      cache->set(EntryToIndex(entry) + 1, *value);
+      return cache;
+    }
   }
 
   cache = EnsureCapacity(cache, 1, &key);
-  entry = cache->FindInsertionEntry(key.Hash());
+  int entry = cache->FindInsertionEntry(key.Hash());
   Handle<Object> k =
       isolate->factory()->NewNumber(static_cast<double>(key.Hash()));
   cache->set(EntryToIndex(entry), *k);
@@ -15037,28 +15539,6 @@ class StringsKey : public HashTableKey {
 };
 
 
-Object* MapCache::Lookup(FixedArray* array) {
-  DisallowHeapAllocation no_alloc;
-  StringsKey key(handle(array));
-  int entry = FindEntry(&key);
-  if (entry == kNotFound) return GetHeap()->undefined_value();
-  return get(EntryToIndex(entry) + 1);
-}
-
-
-Handle<MapCache> MapCache::Put(
-    Handle<MapCache> map_cache, Handle<FixedArray> array, Handle<Map> value) {
-  StringsKey key(array);
-
-  Handle<MapCache> new_cache = EnsureCapacity(map_cache, 1, &key);
-  int entry = new_cache->FindInsertionEntry(key.Hash());
-  new_cache->set(EntryToIndex(entry), *array);
-  new_cache->set(EntryToIndex(entry) + 1, *value);
-  new_cache->ElementAdded();
-  return new_cache;
-}
-
-
 template<typename Derived, typename Shape, typename Key>
 Handle<Derived> Dictionary<Derived, Shape, Key>::New(
     Isolate* isolate,
@@ -15182,7 +15662,7 @@ Handle<Derived> Dictionary<Derived, Shape, Key>::AtPut(
 #ifdef DEBUG
   USE(Shape::AsHandle(dictionary->GetIsolate(), key));
 #endif
-  PropertyDetails details = PropertyDetails(NONE, NORMAL, 0);
+  PropertyDetails details(NONE, FIELD, 0);
 
   AddEntry(dictionary, key, value, details, dictionary->Hash(key));
   return dictionary;
@@ -15270,7 +15750,7 @@ Handle<UnseededNumberDictionary> UnseededNumberDictionary::AddNumberEntry(
     uint32_t key,
     Handle<Object> value) {
   SLOW_DCHECK(dictionary->FindEntry(key) == kNotFound);
-  return Add(dictionary, key, value, PropertyDetails(NONE, NORMAL, 0));
+  return Add(dictionary, key, value, PropertyDetails(NONE, FIELD, 0));
 }
 
 
@@ -15660,7 +16140,7 @@ Handle<Derived> OrderedHashTable<Derived, Iterator, entrysize>::Clear(
                table->GetHeap()->InNewSpace(*table) ? NOT_TENURED : TENURED);
 
   table->SetNextTable(*new_table);
-  table->SetNumberOfDeletedElements(-1);
+  table->SetNumberOfDeletedElements(kClearedTableSentinel);
 
   return new_table;
 }
@@ -15905,8 +16385,7 @@ void OrderedHashTableIterator<Derived, TableType>::Transition() {
     if (index > 0) {
       int nod = table->NumberOfDeletedElements();
 
-      // When we clear the table we set the number of deleted elements to -1.
-      if (nod == -1) {
+      if (nod == TableType::kClearedTableSentinel) {
         index = 0;
       } else {
         int old_index = index;
@@ -16551,13 +17030,24 @@ Handle<HeapType> PropertyCell::UpdatedType(Handle<PropertyCell> cell,
 }
 
 
-void PropertyCell::SetValueInferType(Handle<PropertyCell> cell,
-                                     Handle<Object> value) {
+Handle<Object> PropertyCell::SetValueInferType(Handle<PropertyCell> cell,
+                                               Handle<Object> value) {
+  // Heuristic: if a small-ish string is stored in a previously uninitialized
+  // property cell, internalize it.
+  const int kMaxLengthForInternalization = 200;
+  if ((cell->type()->Is(HeapType::None()) ||
+       cell->type()->Is(HeapType::Undefined())) &&
+      value->IsString() &&
+      Handle<String>::cast(value)->length() <= kMaxLengthForInternalization) {
+    value = cell->GetIsolate()->factory()->InternalizeString(
+        Handle<String>::cast(value));
+  }
   cell->set_value(*value);
   if (!HeapType::Any()->Is(cell->type())) {
     Handle<HeapType> new_type = UpdatedType(cell, value);
     cell->set_type(*new_type);
   }
+  return value;
 }
 
 
index 9333e9e..750ceb3 100644 (file)
@@ -87,6 +87,8 @@
 //         - JSFunctionResultCache
 //         - ScopeInfo
 //         - TransitionArray
+//         - ScriptContextTable
+//         - WeakFixedArray
 //       - FixedDoubleArray
 //       - ExternalArray
 //         - ExternalUint8ClampedArray
@@ -241,10 +243,7 @@ enum WriteBarrierMode { SKIP_WRITE_BARRIER, UPDATE_WRITE_BARRIER };
 
 
 // Indicates whether a value can be loaded as a constant.
-enum StoreMode {
-  ALLOW_AS_CONSTANT,
-  FORCE_FIELD
-};
+enum StoreMode { ALLOW_IN_DESCRIPTOR, FORCE_IN_OBJECT };
 
 
 // PropertyNormalizationMode is used to specify whether to keep
@@ -853,12 +852,14 @@ class AccessorPair;
 class AllocationSite;
 class AllocationSiteCreationContext;
 class AllocationSiteUsageContext;
+class ConsString;
 class DictionaryElementsAccessor;
 class ElementsAccessor;
 class FixedArrayBase;
 class GlobalObject;
-class ObjectVisitor;
+class LayoutDescriptor;
 class LookupIterator;
+class ObjectVisitor;
 class StringStream;
 class TypeFeedbackVector;
 class WeakCell;
@@ -933,6 +934,7 @@ template <class C> inline bool Is(Object* obj);
   V(JSContextExtensionObject)      \
   V(JSGeneratorObject)             \
   V(JSModule)                      \
+  V(LayoutDescriptor)              \
   V(Map)                           \
   V(DescriptorArray)               \
   V(TransitionArray)               \
@@ -942,8 +944,10 @@ template <class C> inline bool Is(Object* obj);
   V(DependentCode)                 \
   V(FixedArray)                    \
   V(FixedDoubleArray)              \
+  V(WeakFixedArray)                \
   V(ConstantPoolArray)             \
   V(Context)                       \
+  V(ScriptContextTable)            \
   V(NativeContext)                 \
   V(ScopeInfo)                     \
   V(JSFunction)                    \
@@ -1229,6 +1233,8 @@ class Object {
   // Prints this object without details to a message accumulator.
   void ShortPrint(StringStream* accumulator);
 
+  void ShortPrint(std::ostream& os);  // NOLINT
+
   DECLARE_CAST(Object)
 
   // Layout description.
@@ -1240,6 +1246,9 @@ class Object {
 
   // Prints this object with details.
   void Print(std::ostream& os);  // NOLINT
+#else
+  void Print() { ShortPrint(); }
+  void Print(std::ostream& os) { ShortPrint(os); }  // NOLINT
 #endif
 
  private:
@@ -1442,6 +1451,8 @@ class HeapObject: public Object {
   static void VerifyHeapPointer(Object* p);
 #endif
 
+  inline bool NeedsToEnsureDoubleAlignment();
+
   // Layout description.
   // First field in a heap object is map.
   static const int kMapOffset = Object::kHeaderSize;
@@ -1812,6 +1823,10 @@ class JSObject: public JSReceiver {
   static void OptimizeAsPrototype(Handle<JSObject> object,
                                   PrototypeOptimizationMode mode);
   static void ReoptimizeIfPrototype(Handle<JSObject> object);
+  static void RegisterPrototypeUser(Handle<JSObject> prototype,
+                                    Handle<HeapObject> user);
+  static void UnregisterPrototypeUser(Handle<JSObject> prototype,
+                                      Handle<HeapObject> user);
 
   // Retrieve interceptors.
   InterceptorInfo* GetNamedInterceptor();
@@ -2049,7 +2064,8 @@ class JSObject: public JSReceiver {
   // an initial capacity for holding these properties.
   static void NormalizeProperties(Handle<JSObject> object,
                                   PropertyNormalizationMode mode,
-                                  int expected_additional_properties);
+                                  int expected_additional_properties,
+                                  const char* reason);
 
   // Convert and update the elements backing store to be a
   // SeededNumberDictionary dictionary.  Returns the backing after conversion.
@@ -2058,14 +2074,20 @@ class JSObject: public JSReceiver {
 
   // Transform slow named properties to fast variants.
   static void MigrateSlowToFast(Handle<JSObject> object,
-                                int unused_property_fields);
+                                int unused_property_fields, const char* reason);
+
+  inline bool IsUnboxedDoubleField(FieldIndex index);
 
   // Access fast-case object properties at index.
   static Handle<Object> FastPropertyAt(Handle<JSObject> object,
                                        Representation representation,
                                        FieldIndex index);
   inline Object* RawFastPropertyAt(FieldIndex index);
+  inline double RawFastDoublePropertyAt(FieldIndex index);
+
   inline void FastPropertyAtPut(FieldIndex index, Object* value);
+  inline void RawFastPropertyAtPut(FieldIndex index, Object* value);
+  inline void RawFastDoublePropertyAtPut(FieldIndex index, double value);
   void WriteToField(int descriptor, Object* value);
 
   // Access to in object properties.
@@ -2096,6 +2118,9 @@ class JSObject: public JSReceiver {
   MUST_USE_RESULT static MaybeHandle<Object> PreventExtensions(
       Handle<JSObject> object);
 
+  // ES5 Object.seal
+  MUST_USE_RESULT static MaybeHandle<Object> Seal(Handle<JSObject> object);
+
   // ES5 Object.freeze
   MUST_USE_RESULT static MaybeHandle<Object> Freeze(Handle<JSObject> object);
 
@@ -2127,6 +2152,8 @@ class JSObject: public JSReceiver {
 #ifdef OBJECT_PRINT
   void PrintProperties(std::ostream& os);   // NOLINT
   void PrintElements(std::ostream& os);     // NOLINT
+#endif
+#if defined(DEBUG) || defined(OBJECT_PRINT)
   void PrintTransitions(std::ostream& os);  // NOLINT
 #endif
 
@@ -2383,6 +2410,15 @@ class JSObject: public JSReceiver {
 
   static Handle<Smi> GetOrCreateIdentityHash(Handle<JSObject> object);
 
+  static Handle<SeededNumberDictionary> GetNormalizedElementDictionary(
+      Handle<JSObject> object);
+
+  // Helper for fast versions of preventExtensions, seal, and freeze.
+  // attrs is one of NONE, SEALED, or FROZEN (depending on the operation).
+  template <PropertyAttributes attrs>
+  MUST_USE_RESULT static MaybeHandle<Object> PreventExtensionsWithTransition(
+      Handle<JSObject> object);
+
   DISALLOW_IMPLICIT_CONSTRUCTORS(JSObject);
 };
 
@@ -2448,10 +2484,12 @@ class FixedArray: public FixedArrayBase {
                                      int new_length,
                                      PretenureFlag pretenure = NOT_TENURED);
 
+  enum KeyFilter { ALL_KEYS, NON_SYMBOL_KEYS };
+
   // Add the elements of a JSArray to this FixedArray.
   MUST_USE_RESULT static MaybeHandle<FixedArray> AddKeysFromArrayLike(
-      Handle<FixedArray> content,
-      Handle<JSObject> array);
+      Handle<FixedArray> content, Handle<JSObject> array,
+      KeyFilter filter = ALL_KEYS);
 
   // Computes the union of keys and return the result.
   // Used for implementing "for (n in object) { }"
@@ -2576,6 +2614,45 @@ class FixedDoubleArray: public FixedArrayBase {
 };
 
 
+class WeakFixedArray : public FixedArray {
+ public:
+  enum SearchForDuplicates { kAlwaysAdd, kAddIfNotFound };
+
+  // If |maybe_array| is not a WeakFixedArray, a fresh one will be allocated.
+  static Handle<WeakFixedArray> Add(
+      Handle<Object> maybe_array, Handle<HeapObject> value,
+      SearchForDuplicates search_for_duplicates = kAlwaysAdd);
+
+  void Remove(Handle<HeapObject> value);
+
+  inline Object* Get(int index) const;
+  inline int Length() const;
+
+  DECLARE_CAST(WeakFixedArray)
+
+ private:
+  static const int kLastUsedIndexIndex = 0;
+  static const int kFirstIndex = 1;
+
+  static Handle<WeakFixedArray> Allocate(
+      Isolate* isolate, int size, Handle<WeakFixedArray> initialize_from);
+
+  static void Set(Handle<WeakFixedArray> array, int index,
+                  Handle<HeapObject> value);
+  inline void clear(int index);
+  inline bool IsEmptySlot(int index) const;
+
+  inline int last_used_index() const;
+  inline void set_last_used_index(int index);
+
+  // Disallow inherited setters.
+  void set(int index, Smi* value);
+  void set(int index, Object* value);
+  void set(int index, Object* value, WriteBarrierMode mode);
+  DISALLOW_IMPLICIT_CONSTRUCTORS(WeakFixedArray);
+};
+
+
 // ConstantPoolArray describes a fixed-sized array containing constant pool
 // entries.
 //
@@ -2613,11 +2690,7 @@ class FixedDoubleArray: public FixedArrayBase {
 //
 class ConstantPoolArray: public HeapObject {
  public:
-  enum WeakObjectState {
-    NO_WEAK_OBJECTS,
-    WEAK_OBJECTS_IN_OPTIMIZED_CODE,
-    WEAK_OBJECTS_IN_IC
-  };
+  enum WeakObjectState { NO_WEAK_OBJECTS, WEAK_OBJECTS_IN_OPTIMIZED_CODE };
 
   enum Type {
     INT64 = 0,
@@ -3051,7 +3124,7 @@ class DescriptorArray: public FixedArray {
   static const int kDescriptorValue = 2;
   static const int kDescriptorSize = 3;
 
-#ifdef OBJECT_PRINT
+#if defined(DEBUG) || defined(OBJECT_PRINT)
   // For our gdb macros, we should perhaps change these in the future.
   void Print();
 
@@ -3129,9 +3202,7 @@ class DescriptorArray: public FixedArray {
 
   // Transfer a complete descriptor from the src descriptor array to this
   // descriptor array.
-  void CopyFrom(int index,
-                DescriptorArray* src,
-                const WhitenessWitness&);
+  void CopyFrom(int index, DescriptorArray* src, const WhitenessWitness&);
 
   inline void Set(int descriptor_number,
                   Descriptor* desc,
@@ -3459,44 +3530,6 @@ class StringTable: public HashTable<StringTable,
 };
 
 
-class MapCacheShape : public BaseShape<HashTableKey*> {
- public:
-  static inline bool IsMatch(HashTableKey* key, Object* value) {
-    return key->IsMatch(value);
-  }
-
-  static inline uint32_t Hash(HashTableKey* key) {
-    return key->Hash();
-  }
-
-  static inline uint32_t HashForObject(HashTableKey* key, Object* object) {
-    return key->HashForObject(object);
-  }
-
-  static inline Handle<Object> AsHandle(Isolate* isolate, HashTableKey* key);
-
-  static const int kPrefixSize = 0;
-  static const int kEntrySize = 2;
-};
-
-
-// MapCache.
-//
-// Maps keys that are a fixed array of unique names to a map.
-// Used for canonicalize maps for object literals.
-class MapCache: public HashTable<MapCache, MapCacheShape, HashTableKey*> {
- public:
-  // Find cached value for a name key, otherwise return null.
-  Object* Lookup(FixedArray* key);
-  static Handle<MapCache> Put(
-      Handle<MapCache> map_cache, Handle<FixedArray> key, Handle<Map> value);
-  DECLARE_CAST(MapCache)
-
- private:
-  DISALLOW_IMPLICIT_CONSTRUCTORS(MapCache);
-};
-
-
 template <typename Derived, typename Shape, typename Key>
 class Dictionary: public HashTable<Derived, Shape, Key> {
  protected:
@@ -3934,6 +3967,33 @@ class OrderedHashTable: public FixedArray {
   static const int kNotFound = -1;
   static const int kMinCapacity = 4;
 
+  static const int kNumberOfBucketsIndex = 0;
+  static const int kNumberOfElementsIndex = kNumberOfBucketsIndex + 1;
+  static const int kNumberOfDeletedElementsIndex = kNumberOfElementsIndex + 1;
+  static const int kHashTableStartIndex = kNumberOfDeletedElementsIndex + 1;
+  static const int kNextTableIndex = kNumberOfElementsIndex;
+
+  static const int kNumberOfBucketsOffset =
+      kHeaderSize + kNumberOfBucketsIndex * kPointerSize;
+  static const int kNumberOfElementsOffset =
+      kHeaderSize + kNumberOfElementsIndex * kPointerSize;
+  static const int kNumberOfDeletedElementsOffset =
+      kHeaderSize + kNumberOfDeletedElementsIndex * kPointerSize;
+  static const int kHashTableStartOffset =
+      kHeaderSize + kHashTableStartIndex * kPointerSize;
+  static const int kNextTableOffset =
+      kHeaderSize + kNextTableIndex * kPointerSize;
+
+  static const int kEntrySize = entrysize + 1;
+  static const int kChainOffset = entrysize;
+
+  static const int kLoadFactor = 2;
+
+  // NumberOfDeletedElements is set to kClearedTableSentinel when
+  // the table is cleared, which allows iterator transitions to
+  // optimize that case.
+  static const int kClearedTableSentinel = -1;
+
  private:
   static Handle<Derived> Rehash(Handle<Derived> table, int new_capacity);
 
@@ -3975,18 +4035,8 @@ class OrderedHashTable: public FixedArray {
     return set(kRemovedHolesIndex + index, Smi::FromInt(removed_index));
   }
 
-  static const int kNumberOfBucketsIndex = 0;
-  static const int kNumberOfElementsIndex = kNumberOfBucketsIndex + 1;
-  static const int kNumberOfDeletedElementsIndex = kNumberOfElementsIndex + 1;
-  static const int kHashTableStartIndex = kNumberOfDeletedElementsIndex + 1;
-
-  static const int kNextTableIndex = kNumberOfElementsIndex;
   static const int kRemovedHolesIndex = kHashTableStartIndex;
 
-  static const int kEntrySize = entrysize + 1;
-  static const int kChainOffset = entrysize;
-
-  static const int kLoadFactor = 2;
   static const int kMaxCapacity =
       (FixedArray::kMaxLength - kHashTableStartIndex)
       / (1 + (kEntrySize * kLoadFactor));
@@ -4025,7 +4075,6 @@ class OrderedHashMap:public OrderedHashTable<
     return get(EntryToIndex(entry) + kValueOffset);
   }
 
- private:
   static const int kValueOffset = 1;
 };
 
@@ -4811,6 +4860,7 @@ TYPED_ARRAYS(FIXED_TYPED_ARRAY_TRAITS)
 
 #undef FIXED_TYPED_ARRAY_TRAITS
 
+
 // DeoptimizationInputData is a fixed array used to hold the deoptimization
 // data for code generated by the Hydrogen/Lithium compiler.  It also
 // contains information about functions that were inlined.  If N different
@@ -5084,12 +5134,7 @@ class Code: public HeapObject {
   inline bool is_to_boolean_ic_stub() { return kind() == TO_BOOLEAN_IC; }
   inline bool is_keyed_stub();
   inline bool is_optimized_code() { return kind() == OPTIMIZED_FUNCTION; }
-  inline bool is_weak_stub();
-  inline void mark_as_weak_stub();
-  inline bool is_invalidated_weak_stub();
-  inline void mark_as_invalidated_weak_stub();
-
-  inline bool CanBeWeakStub() {
+  inline bool embeds_maps_weakly() {
     Kind k = kind();
     return (k == LOAD_IC || k == STORE_IC || k == KEYED_LOAD_IC ||
             k == KEYED_STORE_IC || k == COMPARE_NIL_IC) &&
@@ -5132,6 +5177,12 @@ class Code: public HeapObject {
   inline bool is_compiled_optimizable();
   inline void set_compiled_optimizable(bool value);
 
+  // [has_reloc_info_for_serialization]: For FUNCTION kind, tells if its
+  // reloc info includes runtime and external references to support
+  // serialization/deserialization.
+  inline bool has_reloc_info_for_serialization();
+  inline void set_has_reloc_info_for_serialization(bool value);
+
   // [allow_osr_at_loop_nesting_level]: For FUNCTION kind, tells for
   // how long the function has been marked for OSR and therefore which
   // level of loop nesting we are willing to do on-stack replacement
@@ -5341,7 +5392,7 @@ class Code: public HeapObject {
   // compilation stub.
   static void MakeCodeAgeSequenceYoung(byte* sequence, Isolate* isolate);
   static void MarkCodeAsExecuted(byte* sequence, Isolate* isolate);
-  void MakeYoung();
+  void MakeYoung(Isolate* isolate);
   void MakeOlder(MarkingParity);
   static bool IsYoungSequence(Isolate* isolate, byte* sequence);
   bool IsOld();
@@ -5364,18 +5415,14 @@ class Code: public HeapObject {
   void VerifyEmbeddedObjectsInFullCode();
 #endif  // DEBUG
 
-  inline bool CanContainWeakObjects() {
-    return is_optimized_code() || is_weak_stub();
-  }
+  inline bool CanContainWeakObjects() { return is_optimized_code(); }
 
   inline bool IsWeakObject(Object* object) {
     return (is_optimized_code() && !is_turbofanned() &&
-            IsWeakObjectInOptimizedCode(object)) ||
-           (is_weak_stub() && IsWeakObjectInIC(object));
+            IsWeakObjectInOptimizedCode(object));
   }
 
   static inline bool IsWeakObjectInOptimizedCode(Object* object);
-  static inline bool IsWeakObjectInIC(Object* object);
 
   // Max loop nesting marker used to postpose OSR. We don't take loop
   // nesting that is deeper than 5 levels into account.
@@ -5399,7 +5446,7 @@ class Code: public HeapObject {
       kKindSpecificFlags1Offset + kIntSize;
   // Note: We might be able to squeeze this into the flags above.
   static const int kPrologueOffset = kKindSpecificFlags2Offset + kIntSize;
-  static const int kConstantPoolOffset = kPrologueOffset + kPointerSize;
+  static const int kConstantPoolOffset = kPrologueOffset + kIntSize;
 
   static const int kHeaderPaddingStart = kConstantPoolOffset + kPointerSize;
 
@@ -5407,6 +5454,8 @@ class Code: public HeapObject {
   // the Code object header.
   static const int kHeaderSize =
       (kHeaderPaddingStart + kCodeAlignmentMask) & ~kCodeAlignmentMask;
+  // Ensure that the slot for the constant pool pointer is aligned.
+  STATIC_ASSERT((kConstantPoolOffset & kPointerAlignmentMask) == 0);
 
   // Byte offsets within kKindSpecificFlags1Offset.
   static const int kOptimizableOffset = kKindSpecificFlags1Offset;
@@ -5416,6 +5465,8 @@ class Code: public HeapObject {
       public BitField<bool, 0, 1> {};  // NOLINT
   class FullCodeFlagsHasDebugBreakSlotsField: public BitField<bool, 1, 1> {};
   class FullCodeFlagsIsCompiledOptimizable: public BitField<bool, 2, 1> {};
+  class FullCodeFlagsHasRelocInfoForSerialization
+      : public BitField<bool, 3, 1> {};
 
   static const int kProfilerTicksOffset = kFullCodeFlags + 1;
 
@@ -5433,9 +5484,7 @@ class Code: public HeapObject {
   static const int kHasFunctionCacheBit =
       kStackSlotsFirstBit + kStackSlotsBitCount;
   static const int kMarkedForDeoptimizationBit = kHasFunctionCacheBit + 1;
-  static const int kWeakStubBit = kMarkedForDeoptimizationBit + 1;
-  static const int kInvalidatedWeakStubBit = kWeakStubBit + 1;
-  static const int kIsTurbofannedBit = kInvalidatedWeakStubBit + 1;
+  static const int kIsTurbofannedBit = kMarkedForDeoptimizationBit + 1;
 
   STATIC_ASSERT(kStackSlotsFirstBit + kStackSlotsBitCount <= 32);
   STATIC_ASSERT(kIsTurbofannedBit + 1 <= 32);
@@ -5446,9 +5495,6 @@ class Code: public HeapObject {
   };  // NOLINT
   class MarkedForDeoptimizationField
       : public BitField<bool, kMarkedForDeoptimizationBit, 1> {};   // NOLINT
-  class WeakStubField : public BitField<bool, kWeakStubBit, 1> {};  // NOLINT
-  class InvalidatedWeakStubField
-      : public BitField<bool, kInvalidatedWeakStubBit, 1> {};  // NOLINT
   class IsTurbofannedField : public BitField<bool, kIsTurbofannedBit, 1> {
   };  // NOLINT
 
@@ -5530,11 +5576,6 @@ class CompilationInfo;
 class DependentCode: public FixedArray {
  public:
   enum DependencyGroup {
-    // Group of IC stubs that weakly embed this map and depend on being
-    // invalidated when the map is garbage collected. Dependent IC stubs form
-    // a linked list. This group stores only the head of the list. This means
-    // that the number_of_entries(kWeakICGroup) is 0 or 1.
-    kWeakICGroup,
     // Group of code that weakly embed this map and depend on being
     // deoptimized when the map is garbage collected.
     kWeakCodeGroup,
@@ -5595,7 +5636,6 @@ class DependentCode: public FixedArray {
 
   bool MarkCodeForDeoptimization(Isolate* isolate,
                                  DependentCode::DependencyGroup group);
-  void AddToDependentICList(Handle<Code> stub);
 
   // The following low-level accessors should only be used by this class
   // and the mark compact collector.
@@ -5675,15 +5715,20 @@ class Map: public HeapObject {
   class OwnsDescriptors : public BitField<bool, 21, 1> {};
   class HasInstanceCallHandler : public BitField<bool, 22, 1> {};
   class Deprecated : public BitField<bool, 23, 1> {};
-  class IsFrozen : public BitField<bool, 24, 1> {};
-  class IsUnstable : public BitField<bool, 25, 1> {};
-  class IsMigrationTarget : public BitField<bool, 26, 1> {};
-  class DoneInobjectSlackTracking : public BitField<bool, 27, 1> {};
-  // Bit 28 is free.
+  class IsUnstable : public BitField<bool, 24, 1> {};
+  class IsMigrationTarget : public BitField<bool, 25, 1> {};
+  // Bits 26 and 27 are free.
 
   // Keep this bit field at the very end for better code in
   // Builtins::kJSConstructStubGeneric stub.
-  class ConstructionCount:          public BitField<int, 29, 3> {};
+  // This counter is used for in-object slack tracking and for map aging.
+  // The in-object slack tracking is considered enabled when the counter is
+  // in the range [kSlackTrackingCounterStart, kSlackTrackingCounterEnd].
+  class Counter : public BitField<int, 28, 4> {};
+  static const int kSlackTrackingCounterStart = 14;
+  static const int kSlackTrackingCounterEnd = 8;
+  static const int kRetainingCounterStart = kSlackTrackingCounterEnd - 1;
+  static const int kRetainingCounterEnd = 0;
 
   // Tells whether the object in the prototype property will be used
   // for instances created from this function.  If the prototype
@@ -5756,7 +5801,7 @@ class Map: public HeapObject {
   inline bool is_prototype_map();
 
   inline void set_elements_kind(ElementsKind elements_kind) {
-    DCHECK(elements_kind < kElementsKindCount);
+    DCHECK(static_cast<int>(elements_kind) < kElementsKindCount);
     DCHECK(kElementsKindCount <= (1 << Map::ElementsKindBits::kSize));
     set_bit_field2(Map::ElementsKindBits::update(bit_field2(), elements_kind));
     DCHECK(this->elements_kind() == elements_kind);
@@ -5822,7 +5867,7 @@ class Map: public HeapObject {
 
   inline Map* GetTransition(int transition_index);
   inline int SearchSpecialTransition(Symbol* name);
-  inline int SearchTransition(PropertyType type, Name* name,
+  inline int SearchTransition(PropertyKind kind, Name* name,
                               PropertyAttributes attributes);
   inline FixedArrayBase* GetInitialElements();
 
@@ -5853,8 +5898,8 @@ class Map: public HeapObject {
       Handle<HeapType> type1,
       Handle<HeapType> type2,
       Isolate* isolate);
-  static void GeneralizeFieldType(Handle<Map> map,
-                                  int modify_index,
+  static void GeneralizeFieldType(Handle<Map> map, int modify_index,
+                                  Representation new_representation,
                                   Handle<HeapType> new_field_type);
   static Handle<Map> GeneralizeRepresentation(
       Handle<Map> map,
@@ -5878,7 +5923,8 @@ class Map: public HeapObject {
                                             int descriptor_number,
                                             Handle<Object> value);
 
-  static Handle<Map> Normalize(Handle<Map> map, PropertyNormalizationMode mode);
+  static Handle<Map> Normalize(Handle<Map> map, PropertyNormalizationMode mode,
+                               const char* reason);
 
   // Returns the constructor name (the name (possibly, inferred name) of the
   // function that was used to instantiate the object).
@@ -5901,13 +5947,33 @@ class Map: public HeapObject {
 
   // [prototype]: implicit prototype object.
   DECL_ACCESSORS(prototype, Object)
+  // TODO(jkummerow): make set_prototype private.
+  void SetPrototype(Handle<Object> prototype,
+                    PrototypeOptimizationMode proto_mode = FAST_PROTOTYPE);
+  bool ShouldRegisterAsPrototypeUser(Handle<JSObject> prototype);
+  bool CanUseOptimizationsBasedOnPrototypeRegistry();
 
   // [constructor]: points back to the function responsible for this map.
   DECL_ACCESSORS(constructor, Object)
 
   // [instance descriptors]: describes the object.
   DECL_ACCESSORS(instance_descriptors, DescriptorArray)
-  inline void InitializeDescriptors(DescriptorArray* descriptors);
+
+  // [layout descriptor]: describes the object layout.
+  DECL_ACCESSORS(layout_descriptor, LayoutDescriptor)
+  // |layout descriptor| accessor which can be used from GC.
+  inline LayoutDescriptor* layout_descriptor_gc_safe();
+  inline bool HasFastPointerLayout() const;
+
+  // |layout descriptor| accessor that is safe to call even when
+  // FLAG_unbox_double_fields is disabled (in this case Map does not contain
+  // |layout_descriptor| field at all).
+  inline LayoutDescriptor* GetLayoutDescriptor();
+
+  inline void UpdateDescriptors(DescriptorArray* descriptors,
+                                LayoutDescriptor* layout_descriptor);
+  inline void InitializeDescriptors(DescriptorArray* descriptors,
+                                    LayoutDescriptor* layout_descriptor);
 
   // [stub cache]: contains stubs compiled for this map.
   DECL_ACCESSORS(code_cache, Object)
@@ -6006,16 +6072,12 @@ class Map: public HeapObject {
   inline void set_owns_descriptors(bool owns_descriptors);
   inline bool has_instance_call_handler();
   inline void set_has_instance_call_handler();
-  inline void freeze();
-  inline bool is_frozen();
   inline void mark_unstable();
   inline bool is_stable();
   inline void set_migration_target(bool value);
   inline bool is_migration_target();
-  inline void set_done_inobject_slack_tracking(bool value);
-  inline bool done_inobject_slack_tracking();
-  inline void set_construction_count(int value);
-  inline int construction_count();
+  inline void set_counter(int value);
+  inline int counter();
   inline void deprecate();
   inline bool is_deprecated();
   inline bool CanBeDeprecated();
@@ -6067,7 +6129,10 @@ class Map: public HeapObject {
 
   static Handle<Map> CopyForObserved(Handle<Map> map);
 
-  static Handle<Map> CopyForFreeze(Handle<Map> map);
+  static Handle<Map> CopyForPreventExtensions(Handle<Map> map,
+                                              PropertyAttributes attrs_to_add,
+                                              Handle<Symbol> transition_marker,
+                                              const char* reason);
   // Maximal number of fast properties. Used to restrict the number of map
   // transitions to avoid an explosion in the number of maps for objects used as
   // dictionaries.
@@ -6087,7 +6152,7 @@ class Map: public HeapObject {
 
   // Returns a copy of the map, with all transitions dropped from the
   // instance descriptors.
-  static Handle<Map> Copy(Handle<Map> map);
+  static Handle<Map> Copy(Handle<Map> map, const char* reason);
   static Handle<Map> Create(Isolate* isolate, int inobject_properties);
 
   // Returns the next free property index (only valid for FAST MODE).
@@ -6189,11 +6254,11 @@ class Map: public HeapObject {
   static void AddDependentCode(Handle<Map> map,
                                DependentCode::DependencyGroup group,
                                Handle<Code> code);
-  static void AddDependentIC(Handle<Map> map,
-                             Handle<Code> stub);
 
   bool IsMapInArrayPrototypeChain();
 
+  static Handle<WeakCell> WeakCellForMap(Handle<Map> map);
+
   // Dispatched behavior.
   DECLARE_PRINTER(Map)
   DECLARE_VERIFIER(Map)
@@ -6216,10 +6281,11 @@ class Map: public HeapObject {
   // the original map.  That way we can transition to the same map if the same
   // prototype is set, rather than creating a new map every time.  The
   // transitions are in the form of a map where the keys are prototype objects
-  // and the values are the maps the are transitioned to.
+  // and the values are the maps they transition to.
   static const int kMaxCachedPrototypeTransitions = 256;
   static Handle<Map> TransitionToPrototype(Handle<Map> map,
-                                           Handle<Object> prototype);
+                                           Handle<Object> prototype,
+                                           PrototypeOptimizationMode mode);
 
   static const int kMaxPreAllocatedPropertyFields = 255;
 
@@ -6237,7 +6303,13 @@ class Map: public HeapObject {
       kConstructorOffset + kPointerSize;
   static const int kDescriptorsOffset =
       kTransitionsOrBackPointerOffset + kPointerSize;
+#if V8_DOUBLE_FIELDS_UNBOXING
+  static const int kLayoutDecriptorOffset = kDescriptorsOffset + kPointerSize;
+  static const int kCodeCacheOffset = kLayoutDecriptorOffset + kPointerSize;
+#else
+  static const int kLayoutDecriptorOffset = 1;  // Must not be ever accessed.
   static const int kCodeCacheOffset = kDescriptorsOffset + kPointerSize;
+#endif
   static const int kDependentCodeOffset = kCodeCacheOffset + kPointerSize;
   static const int kSize = kDependentCodeOffset + kPointerSize;
 
@@ -6315,6 +6387,18 @@ class Map: public HeapObject {
   // The "shared" flags of both this map and |other| are ignored.
   bool EquivalentToForNormalization(Map* other, PropertyNormalizationMode mode);
 
+  // Returns true if given field is unboxed double.
+  inline bool IsUnboxedDoubleField(FieldIndex index);
+
+#if TRACE_MAPS
+  static void TraceTransition(const char* what, Map* from, Map* to, Name* name);
+  static void TraceAllTransitions(Map* map);
+#endif
+
+  static inline Handle<Map> CopyInstallDescriptorsForTesting(
+      Handle<Map> map, int new_descriptor, Handle<DescriptorArray> descriptors,
+      Handle<LayoutDescriptor> layout_descriptor);
+
  private:
   static void ConnectElementsTransition(Handle<Map> parent, Handle<Map> child);
   static void ConnectTransition(Handle<Map> parent, Handle<Map> child,
@@ -6326,17 +6410,17 @@ class Map: public HeapObject {
                                      Handle<DescriptorArray> descriptors,
                                      Descriptor* descriptor);
   static Handle<Map> CopyInstallDescriptors(
-      Handle<Map> map,
-      int new_descriptor,
-      Handle<DescriptorArray> descriptors);
+      Handle<Map> map, int new_descriptor, Handle<DescriptorArray> descriptors,
+      Handle<LayoutDescriptor> layout_descriptor);
   static Handle<Map> CopyAddDescriptor(Handle<Map> map,
                                        Descriptor* descriptor,
                                        TransitionFlag flag);
-  static Handle<Map> CopyReplaceDescriptors(Handle<Map> map,
-                                            Handle<DescriptorArray> descriptors,
-                                            TransitionFlag flag,
-                                            MaybeHandle<Name> maybe_name,
-                                            SimpleTransitionFlag simple_flag);
+  static Handle<Map> CopyReplaceDescriptors(
+      Handle<Map> map, Handle<DescriptorArray> descriptors,
+      Handle<LayoutDescriptor> layout_descriptor, TransitionFlag flag,
+      MaybeHandle<Name> maybe_name, const char* reason,
+      SimpleTransitionFlag simple_flag);
+
   static Handle<Map> CopyReplaceDescriptor(Handle<Map> map,
                                            Handle<DescriptorArray> descriptors,
                                            Descriptor* descriptor,
@@ -6364,13 +6448,15 @@ class Map: public HeapObject {
   void ZapTransitions();
 
   void DeprecateTransitionTree();
-  void DeprecateTarget(PropertyType type, Name* key,
+  bool DeprecateTarget(PropertyKind kind, Name* key,
                        PropertyAttributes attributes,
-                       DescriptorArray* new_descriptors);
+                       DescriptorArray* new_descriptors,
+                       LayoutDescriptor* new_layout_descriptor);
 
   Map* FindLastMatchMap(int verbatim, int length, DescriptorArray* descriptors);
 
   void UpdateFieldType(int descriptor_number, Handle<Name> name,
+                       Representation new_representation,
                        Handle<HeapType> new_type);
 
   void PrintGeneralization(FILE* file,
@@ -6709,6 +6795,13 @@ class SharedFunctionInfo: public HeapObject {
   // available.
   DECL_ACCESSORS(feedback_vector, TypeFeedbackVector)
 
+#if TRACE_MAPS
+  // [unique_id] - For --trace-maps purposes, an identifier that's persistent
+  // even if the GC moves this SharedFunctionInfo.
+  inline int unique_id() const;
+  inline void set_unique_id(int value);
+#endif
+
   // [instance class name]: class name for instances.
   DECL_ACCESSORS(instance_class_name, Object)
 
@@ -6811,6 +6904,13 @@ class SharedFunctionInfo: public HeapObject {
   // False if the function definitely does not allocate an arguments object.
   DECL_BOOLEAN_ACCESSORS(uses_arguments)
 
+  // Indicates that this function uses a super property.
+  // This is needed to set up the [[HomeObject]] on the function instance.
+  DECL_BOOLEAN_ACCESSORS(uses_super_property)
+
+  // Indicates that this function uses the super constructor.
+  DECL_BOOLEAN_ACCESSORS(uses_super_constructor_call)
+
   // True if the function has any duplicated parameter names.
   DECL_BOOLEAN_ACCESSORS(has_duplicate_parameters)
 
@@ -6855,6 +6955,9 @@ class SharedFunctionInfo: public HeapObject {
   // Indicates that this function is a concise method.
   DECL_BOOLEAN_ACCESSORS(is_concise_method)
 
+  // Indicates that this function is a default constructor.
+  DECL_BOOLEAN_ACCESSORS(is_default_constructor)
+
   // Indicates that this function is an asm function.
   DECL_BOOLEAN_ACCESSORS(asm_function)
 
@@ -6875,7 +6978,7 @@ class SharedFunctionInfo: public HeapObject {
   // shared function info.
   void DisableOptimization(BailoutReason reason);
 
-  inline BailoutReason DisableOptimizationReason();
+  inline BailoutReason disable_optimization_reason();
 
   // Lookup the bailout ID and DCHECK that it exists in the non-optimized
   // code, returns whether it asserted (i.e., always true if assertions are
@@ -6910,7 +7013,7 @@ class SharedFunctionInfo: public HeapObject {
   inline void set_opt_count_and_bailout_reason(int value);
   inline int opt_count_and_bailout_reason() const;
 
-  void set_bailout_reason(BailoutReason reason) {
+  void set_disable_optimization_reason(BailoutReason reason) {
     set_opt_count_and_bailout_reason(
         DisabledOptimizationReasonBits::update(opt_count_and_bailout_reason(),
                                                reason));
@@ -6955,10 +7058,16 @@ class SharedFunctionInfo: public HeapObject {
   static const int kInferredNameOffset = kDebugInfoOffset + kPointerSize;
   static const int kFeedbackVectorOffset =
       kInferredNameOffset + kPointerSize;
+#if TRACE_MAPS
+  static const int kUniqueIdOffset = kFeedbackVectorOffset + kPointerSize;
+  static const int kLastPointerFieldOffset = kUniqueIdOffset;
+#else
+  static const int kLastPointerFieldOffset = kFeedbackVectorOffset;
+#endif
+
 #if V8_HOST_ARCH_32_BIT
   // Smi fields.
-  static const int kLengthOffset =
-      kFeedbackVectorOffset + kPointerSize;
+  static const int kLengthOffset = kLastPointerFieldOffset + kPointerSize;
   static const int kFormalParameterCountOffset = kLengthOffset + kPointerSize;
   static const int kExpectedNofPropertiesOffset =
       kFormalParameterCountOffset + kPointerSize;
@@ -6994,8 +7103,7 @@ class SharedFunctionInfo: public HeapObject {
 // word is not set and thus this word cannot be treated as pointer
 // to HeapObject during old space traversal.
 #if V8_TARGET_LITTLE_ENDIAN
-  static const int kLengthOffset =
-      kFeedbackVectorOffset + kPointerSize;
+  static const int kLengthOffset = kLastPointerFieldOffset + kPointerSize;
   static const int kFormalParameterCountOffset =
       kLengthOffset + kIntSize;
 
@@ -7029,7 +7137,7 @@ class SharedFunctionInfo: public HeapObject {
 
 #elif V8_TARGET_BIG_ENDIAN
   static const int kFormalParameterCountOffset =
-      kFeedbackVectorOffset + kPointerSize;
+      kLastPointerFieldOffset + kPointerSize;
   static const int kLengthOffset = kFormalParameterCountOffset + kIntSize;
 
   static const int kNumLiteralsOffset = kLengthOffset + kIntSize;
@@ -7062,7 +7170,7 @@ class SharedFunctionInfo: public HeapObject {
   static const int kAlignedSize = POINTER_SIZE_ALIGN(kSize);
 
   typedef FixedBodyDescriptor<kNameOffset,
-                              kFeedbackVectorOffset + kPointerSize,
+                              kLastPointerFieldOffset + kPointerSize,
                               kSize> BodyDescriptor;
 
   // Bit positions in start_position_and_type.
@@ -7080,6 +7188,8 @@ class SharedFunctionInfo: public HeapObject {
     kOptimizationDisabled,
     kStrictModeFunction,
     kUsesArguments,
+    kUsesSuperProperty,
+    kUsesSuperConstructorCall,
     kHasDuplicateParameters,
     kNative,
     kInlineBuiltin,
@@ -7092,12 +7202,13 @@ class SharedFunctionInfo: public HeapObject {
     kIsArrow,
     kIsGenerator,
     kIsConciseMethod,
+    kIsDefaultConstructor,
     kIsAsmFunction,
     kDeserialized,
     kCompilerHintsCount  // Pseudo entry
   };
 
-  class FunctionKindBits : public BitField<FunctionKind, kIsArrow, 3> {};
+  class FunctionKindBits : public BitField<FunctionKind, kIsArrow, 4> {};
 
   class DeoptCountBits : public BitField<int, 0, 4> {};
   class OptReenableTriesBits : public BitField<int, 4, 18> {};
@@ -7302,8 +7413,7 @@ class JSFunction: public JSObject {
   // Mark this function for lazy recompilation. The function will be
   // recompiled the next time it is executed.
   void MarkForOptimization();
-  void MarkForConcurrentOptimization();
-  void MarkInOptimizationQueue();
+  void AttemptConcurrentOptimization();
 
   // Tells whether or not the function is already marked for lazy
   // recompilation.
@@ -7323,13 +7433,12 @@ class JSFunction: public JSObject {
   // Here is the algorithm to reclaim the unused inobject space:
   // - Detect the first constructor call for this JSFunction.
   //   When it happens enter the "in progress" state: initialize construction
-  //   counter in the initial_map and set the |done_inobject_slack_tracking|
-  //   flag.
+  //   counter in the initial_map.
   // - While the tracking is in progress create objects filled with
   //   one_pointer_filler_map instead of undefined_value. This way they can be
   //   resized quickly and safely.
-  // - Once enough (kGenerousAllocationCount) objects have been created
-  //   compute the 'slack' (traverse the map transition tree starting from the
+  // - Once enough objects have been created  compute the 'slack'
+  //   (traverse the map transition tree starting from the
   //   initial_map and find the lowest value of unused_property_fields).
   // - Traverse the transition tree again and decrease the instance size
   //   of every map. Existing objects will resize automatically (they are
@@ -7342,23 +7451,17 @@ class JSFunction: public JSObject {
   //  Important: inobject slack tracking is not attempted during the snapshot
   //  creation.
 
-  static const int kGenerousAllocationCount = Map::ConstructionCount::kMax;
-  static const int kFinishSlackTracking     = 1;
-  static const int kNoSlackTracking         = 0;
-
   // True if the initial_map is set and the object constructions countdown
   // counter is not zero.
+  static const int kGenerousAllocationCount =
+      Map::kSlackTrackingCounterStart - Map::kSlackTrackingCounterEnd + 1;
   inline bool IsInobjectSlackTrackingInProgress();
 
   // Starts the tracking.
   // Initializes object constructions countdown counter in the initial map.
-  // IsInobjectSlackTrackingInProgress is normally true after this call,
-  // except when tracking have not been started (e.g. the map has no unused
-  // properties or the snapshot is being built).
   void StartInobjectSlackTracking();
 
   // Completes the tracking.
-  // IsInobjectSlackTrackingInProgress is false after this call.
   void CompleteInobjectSlackTracking();
 
   // [literals_or_bindings]: Fixed array holding either
@@ -7530,19 +7633,18 @@ class GlobalObject: public JSObject {
   // [native context]: the natives corresponding to this global object.
   DECL_ACCESSORS(native_context, Context)
 
-  // [global context]: the most recent (i.e. innermost) global context.
-  DECL_ACCESSORS(global_context, Context)
-
   // [global proxy]: the global proxy object of the context
   DECL_ACCESSORS(global_proxy, JSObject)
 
   DECLARE_CAST(GlobalObject)
 
+  static void InvalidatePropertyCell(Handle<GlobalObject> object,
+                                     Handle<Name> name);
+
   // Layout description.
   static const int kBuiltinsOffset = JSObject::kHeaderSize;
   static const int kNativeContextOffset = kBuiltinsOffset + kPointerSize;
-  static const int kGlobalContextOffset = kNativeContextOffset + kPointerSize;
-  static const int kGlobalProxyOffset = kGlobalContextOffset + kPointerSize;
+  static const int kGlobalProxyOffset = kNativeContextOffset + kPointerSize;
   static const int kHeaderSize = kGlobalProxyOffset + kPointerSize;
 
  private:
@@ -7902,12 +8004,11 @@ class JSRegExp: public JSObject {
       FixedArray::kHeaderSize + kIrregexpCaptureCountIndex * kPointerSize;
 
   // In-object fields.
-  static const int kSourceFieldIndex = 0;
-  static const int kGlobalFieldIndex = 1;
-  static const int kIgnoreCaseFieldIndex = 2;
-  static const int kMultilineFieldIndex = 3;
-  static const int kLastIndexFieldIndex = 4;
-  static const int kInObjectFieldCount = 5;
+  static const int kGlobalFieldIndex = 0;
+  static const int kIgnoreCaseFieldIndex = 1;
+  static const int kMultilineFieldIndex = 2;
+  static const int kLastIndexFieldIndex = 3;
+  static const int kInObjectFieldCount = 4;
 
   // The uninitialized value for a regexp code object.
   static const int kUninitializedValue = -1;
@@ -7990,6 +8091,7 @@ class CodeCache: public Struct {
  public:
   DECL_ACCESSORS(default_cache, FixedArray)
   DECL_ACCESSORS(normal_type_cache, Object)
+  DECL_ACCESSORS(weak_cell_cache, Object)
 
   // Add the code object to the cache.
   static void Update(
@@ -8017,7 +8119,8 @@ class CodeCache: public Struct {
   static const int kDefaultCacheOffset = HeapObject::kHeaderSize;
   static const int kNormalTypeCacheOffset =
       kDefaultCacheOffset + kPointerSize;
-  static const int kSize = kNormalTypeCacheOffset + kPointerSize;
+  static const int kWeakCellCacheOffset = kNormalTypeCacheOffset + kPointerSize;
+  static const int kSize = kWeakCellCacheOffset + kPointerSize;
 
  private:
   static void UpdateDefaultCache(
@@ -8330,14 +8433,11 @@ class AllocationSite: public Struct {
   static void DigestTransitionFeedback(Handle<AllocationSite> site,
                                        ElementsKind to_kind);
 
-  enum Reason {
-    TENURING,
-    TRANSITIONS
-  };
+  static void RegisterForDeoptOnTenureChange(Handle<AllocationSite> site,
+                                             CompilationInfo* info);
 
-  static void AddDependentCompilationInfo(Handle<AllocationSite> site,
-                                          Reason reason,
-                                          CompilationInfo* info);
+  static void RegisterForDeoptOnTransitionChange(Handle<AllocationSite> site,
+                                                 CompilationInfo* info);
 
   DECLARE_PRINTER(AllocationSite)
   DECLARE_VERIFIER(AllocationSite)
@@ -8369,7 +8469,10 @@ class AllocationSite: public Struct {
                               kSize> BodyDescriptor;
 
  private:
-  inline DependentCode::DependencyGroup ToDependencyGroup(Reason reason);
+  static void AddDependentCompilationInfo(Handle<AllocationSite> site,
+                                          DependentCode::DependencyGroup group,
+                                          CompilationInfo* info);
+
   bool PretenuringDecisionMade() {
     return pretenure_decision() != kUndecided;
   }
@@ -8462,6 +8565,11 @@ class StringHasher {
   // Reusable parts of the hashing algorithm.
   INLINE(static uint32_t AddCharacterCore(uint32_t running_hash, uint16_t c));
   INLINE(static uint32_t GetHashCore(uint32_t running_hash));
+  INLINE(static uint32_t ComputeRunningHash(uint32_t running_hash,
+                                            const uc16* chars, int length));
+  INLINE(static uint32_t ComputeRunningHashOneByte(uint32_t running_hash,
+                                                   const char* chars,
+                                                   int length));
 
  protected:
   // Returns the value to store in the hash field of a string with
@@ -8498,6 +8606,7 @@ class IteratingStringHasher : public StringHasher {
  private:
   inline IteratingStringHasher(int len, uint32_t seed)
       : StringHasher(len, seed) {}
+  void VisitConsString(ConsString* cons_string);
   DISALLOW_COPY_AND_ASSIGN(IteratingStringHasher);
 };
 
@@ -8578,6 +8687,10 @@ class Name: public HeapObject {
   DECLARE_CAST(Name)
 
   DECLARE_PRINTER(Name)
+#if TRACE_MAPS
+  void NameShortPrint();
+  int NameShortPrint(Vector<char> str);
+#endif
 
   // Layout description.
   static const int kHashFieldSlot = HeapObject::kHeaderSize;
@@ -8680,6 +8793,10 @@ class Symbol: public Name {
 
   const char* PrivateSymbolToName() const;
 
+#if TRACE_MAPS
+  friend class Name;  // For PrivateSymbolToName.
+#endif
+
   DISALLOW_IMPLICIT_CONSTRUCTORS(Symbol);
 };
 
@@ -8780,6 +8897,9 @@ class String: public Name {
     friend class String;
   };
 
+  template <typename Char>
+  INLINE(Vector<const Char> GetCharVector());
+
   // Get and set the length of the string.
   inline int length() const;
   inline void set_length(int value);
@@ -8896,7 +9016,7 @@ class String: public Name {
   // Dispatched behavior.
   void StringShortPrint(StringStream* accumulator);
   void PrintUC16(std::ostream& os, int start = 0, int end = -1);  // NOLINT
-#ifdef OBJECT_PRINT
+#if defined(DEBUG) || defined(OBJECT_PRINT)
   char* ToAsciiArray();
 #endif
   DECLARE_PRINTER(String)
@@ -9365,6 +9485,8 @@ class FlatStringReader : public Relocatable {
   FlatStringReader(Isolate* isolate, Vector<const char> input);
   void PostGarbageCollection();
   inline uc32 Get(int index);
+  template <typename Char>
+  inline Char Get(int index);
   int length() { return length_; }
  private:
   String** str_;
@@ -9561,8 +9683,10 @@ class PropertyCell: public Cell {
   // of the cell's current type and the value's type. If the change causes
   // a change of the type of the cell's contents, code dependent on the cell
   // will be deoptimized.
-  static void SetValueInferType(Handle<PropertyCell> cell,
-                                Handle<Object> value);
+  // Usually returns the value that was passed in, but may perform
+  // non-observable modifications on it, such as internalize strings.
+  static Handle<Object> SetValueInferType(Handle<PropertyCell> cell,
+                                          Handle<Object> value);
 
   // Computes the new type of the cell's contents for the given value, but
   // without actually modifying the 'type' field.
@@ -10156,10 +10280,13 @@ class JSArray: public JSObject {
                                            uint32_t index,
                                            Handle<Object> value);
 
-  static bool IsReadOnlyLengthDescriptor(Handle<Map> jsarray_map);
+  static bool HasReadOnlyLength(Handle<JSArray> array);
   static bool WouldChangeReadOnlyLength(Handle<JSArray> array, uint32_t index);
   static MaybeHandle<Object> ReadOnlyLengthError(Handle<JSArray> array);
 
+  // TODO(adamk): Remove this method in favor of HasReadOnlyLength().
+  static bool IsReadOnlyLengthDescriptor(Handle<Map> jsarray_map);
+
   // Initialize the array with the given capacity. The function may
   // fail due to out-of-memory situations, but only if the requested
   // capacity is non-zero.
@@ -10512,6 +10639,10 @@ class InterceptorInfo: public Struct {
   DECL_ACCESSORS(deleter, Object)
   DECL_ACCESSORS(enumerator, Object)
   DECL_ACCESSORS(data, Object)
+  DECL_BOOLEAN_ACCESSORS(can_intercept_symbols)
+
+  inline int flags() const;
+  inline void set_flags(int flags);
 
   DECLARE_CAST(InterceptorInfo)
 
@@ -10525,7 +10656,10 @@ class InterceptorInfo: public Struct {
   static const int kDeleterOffset = kQueryOffset + kPointerSize;
   static const int kEnumeratorOffset = kDeleterOffset + kPointerSize;
   static const int kDataOffset = kEnumeratorOffset + kPointerSize;
-  static const int kSize = kDataOffset + kPointerSize;
+  static const int kFlagsOffset = kDataOffset + kPointerSize;
+  static const int kSize = kFlagsOffset + kPointerSize;
+
+  static const int kCanInterceptSymbolsBit = 0;
 
  private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(InterceptorInfo);
index acff5b6..6926f47 100644 (file)
 namespace v8 {
 namespace internal {
 
+namespace {
+
+void DisposeOptimizedCompileJob(OptimizedCompileJob* job,
+                                bool restore_function_code) {
+  // The recompile job is allocated in the CompilationInfo's zone.
+  CompilationInfo* info = job->info();
+  if (restore_function_code) {
+    if (info->is_osr()) {
+      if (!job->IsWaitingForInstall()) {
+        // Remove stack check that guards OSR entry on original code.
+        Handle<Code> code = info->unoptimized_code();
+        uint32_t offset = code->TranslateAstIdToPcOffset(info->osr_ast_id());
+        BackEdgeTable::RemoveStackCheck(code, offset);
+      }
+    } else {
+      Handle<JSFunction> function = info->closure();
+      function->ReplaceCode(function->shared()->code());
+    }
+  }
+  delete info;
+}
+
+}  // namespace
+
+
 class OptimizingCompilerThread::CompileTask : public v8::Task {
  public:
-  CompileTask(Isolate* isolate, OptimizedCompileJob* job)
-      : isolate_(isolate), job_(job) {}
+  explicit CompileTask(Isolate* isolate) : isolate_(isolate) {}
 
   virtual ~CompileTask() {}
 
  private:
   // v8::Task overrides.
-  virtual void Run() OVERRIDE {
+  void Run() OVERRIDE {
     DisallowHeapAllocation no_allocation;
     DisallowHandleAllocation no_handles;
     DisallowHandleDereference no_deref;
 
-    // The function may have already been optimized by OSR.  Simply continue.
-    OptimizedCompileJob::Status status = job_->OptimizeGraph();
-    USE(status);  // Prevent an unused-variable error in release mode.
-    DCHECK(status != OptimizedCompileJob::FAILED);
+    TimerEventScope<TimerEventRecompileConcurrent> timer(isolate_);
 
-    // The function may have already been optimized by OSR.  Simply continue.
-    // Use a mutex to make sure that functions marked for install
-    // are always also queued.
-    {
-      base::LockGuard<base::Mutex> lock_guard(
-          &isolate_->optimizing_compiler_thread()->output_queue_mutex_);
-      isolate_->optimizing_compiler_thread()->output_queue_.Enqueue(job_);
+    OptimizingCompilerThread* thread = isolate_->optimizing_compiler_thread();
+
+    if (thread->recompilation_delay_ != 0) {
+      base::OS::Sleep(thread->recompilation_delay_);
+    }
+
+    StopFlag flag;
+    OptimizedCompileJob* job = thread->NextInput(&flag);
+
+    if (flag == CONTINUE) {
+      thread->CompileNext(job);
+    } else {
+      AllowHandleDereference allow_handle_dereference;
+      if (!job->info()->is_osr()) {
+        DisposeOptimizedCompileJob(job, true);
+      }
     }
-    isolate_->stack_guard()->RequestInstallCode();
+    bool signal = false;
     {
-      base::LockGuard<base::Mutex> lock_guard(
-          &isolate_->optimizing_compiler_thread()->input_queue_mutex_);
-      isolate_->optimizing_compiler_thread()->input_queue_length_--;
+      base::LockGuard<base::RecursiveMutex> lock(&thread->task_count_mutex_);
+      if (--thread->task_count_ == 0) {
+        if (static_cast<StopFlag>(base::Acquire_Load(&thread->stop_thread_)) ==
+            FLUSH) {
+          base::Release_Store(&thread->stop_thread_,
+                              static_cast<base::AtomicWord>(CONTINUE));
+          signal = true;
+        }
+      }
     }
-    isolate_->optimizing_compiler_thread()->input_queue_semaphore_.Signal();
+    if (signal) thread->stop_semaphore_.Signal();
   }
 
   Isolate* isolate_;
-  OptimizedCompileJob* job_;
 
   DISALLOW_COPY_AND_ASSIGN(CompileTask);
 };
@@ -93,8 +128,8 @@ void OptimizingCompilerThread::Run() {
     input_queue_semaphore_.Wait();
     TimerEventScope<TimerEventRecompileConcurrent> timer(isolate_);
 
-    if (FLAG_concurrent_recompilation_delay != 0) {
-      base::OS::Sleep(FLAG_concurrent_recompilation_delay);
+    if (recompilation_delay_ != 0) {
+      base::OS::Sleep(recompilation_delay_);
     }
 
     switch (static_cast<StopFlag>(base::Acquire_Load(&stop_thread_))) {
@@ -121,7 +156,7 @@ void OptimizingCompilerThread::Run() {
     base::ElapsedTimer compiling_timer;
     if (tracing_enabled_) compiling_timer.Start();
 
-    CompileNext();
+    CompileNext(NextInput());
 
     if (tracing_enabled_) {
       time_spent_compiling_ += compiling_timer.Elapsed();
@@ -130,20 +165,27 @@ void OptimizingCompilerThread::Run() {
 }
 
 
-OptimizedCompileJob* OptimizingCompilerThread::NextInput() {
+OptimizedCompileJob* OptimizingCompilerThread::NextInput(StopFlag* flag) {
   base::LockGuard<base::Mutex> access_input_queue_(&input_queue_mutex_);
-  DCHECK(!job_based_recompilation_);
-  if (input_queue_length_ == 0) return NULL;
+  if (input_queue_length_ == 0) {
+    if (flag) {
+      UNREACHABLE();
+      *flag = CONTINUE;
+    }
+    return NULL;
+  }
   OptimizedCompileJob* job = input_queue_[InputQueueIndex(0)];
   DCHECK_NE(NULL, job);
   input_queue_shift_ = InputQueueIndex(1);
   input_queue_length_--;
+  if (flag) {
+    *flag = static_cast<StopFlag>(base::Acquire_Load(&stop_thread_));
+  }
   return job;
 }
 
 
-void OptimizingCompilerThread::CompileNext() {
-  OptimizedCompileJob* job = NextInput();
+void OptimizingCompilerThread::CompileNext(OptimizedCompileJob* job) {
   DCHECK_NE(NULL, job);
 
   // The function may have already been optimized by OSR.  Simply continue.
@@ -154,36 +196,17 @@ void OptimizingCompilerThread::CompileNext() {
   // The function may have already been optimized by OSR.  Simply continue.
   // Use a mutex to make sure that functions marked for install
   // are always also queued.
+  if (job_based_recompilation_) output_queue_mutex_.Lock();
   output_queue_.Enqueue(job);
+  if (job_based_recompilation_) output_queue_mutex_.Unlock();
   isolate_->stack_guard()->RequestInstallCode();
 }
 
 
-static void DisposeOptimizedCompileJob(OptimizedCompileJob* job,
-                                       bool restore_function_code) {
-  // The recompile job is allocated in the CompilationInfo's zone.
-  CompilationInfo* info = job->info();
-  if (restore_function_code) {
-    if (info->is_osr()) {
-      if (!job->IsWaitingForInstall()) {
-        // Remove stack check that guards OSR entry on original code.
-        Handle<Code> code = info->unoptimized_code();
-        uint32_t offset = code->TranslateAstIdToPcOffset(info->osr_ast_id());
-        BackEdgeTable::RemoveStackCheck(code, offset);
-      }
-    } else {
-      Handle<JSFunction> function = info->closure();
-      function->ReplaceCode(function->shared()->code());
-    }
-  }
-  delete info;
-}
-
-
 void OptimizingCompilerThread::FlushInputQueue(bool restore_function_code) {
-  DCHECK(!job_based_recompilation_);
   OptimizedCompileJob* job;
   while ((job = NextInput())) {
+    DCHECK(!job_based_recompilation_);
     // This should not block, since we have one signal on the input queue
     // semaphore corresponding to each element in the input queue.
     input_queue_semaphore_.Wait();
@@ -196,6 +219,7 @@ void OptimizingCompilerThread::FlushInputQueue(bool restore_function_code) {
 
 
 void OptimizingCompilerThread::FlushOutputQueue(bool restore_function_code) {
+  base::LockGuard<base::Mutex> access_output_queue_(&output_queue_mutex_);
   OptimizedCompileJob* job;
   while (output_queue_.Dequeue(&job)) {
     // OSR jobs are dealt with separately.
@@ -218,12 +242,20 @@ void OptimizingCompilerThread::FlushOsrBuffer(bool restore_function_code) {
 
 void OptimizingCompilerThread::Flush() {
   DCHECK(!IsOptimizerThread());
-  base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(FLUSH));
-  if (FLAG_block_concurrent_recompilation) Unblock();
-  if (!job_based_recompilation_) {
-    input_queue_semaphore_.Signal();
-    stop_semaphore_.Wait();
+  bool block = true;
+  if (job_based_recompilation_) {
+    base::LockGuard<base::RecursiveMutex> lock(&task_count_mutex_);
+    block = task_count_ > 0 || blocked_jobs_ > 0;
+    if (block) {
+      base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(FLUSH));
+    }
+    if (FLAG_block_concurrent_recompilation) Unblock();
+  } else {
+    base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(FLUSH));
+    if (FLAG_block_concurrent_recompilation) Unblock();
   }
+  if (!job_based_recompilation_) input_queue_semaphore_.Signal();
+  if (block) stop_semaphore_.Wait();
   FlushOutputQueue(true);
   if (FLAG_concurrent_osr) FlushOsrBuffer(true);
   if (tracing_enabled_) {
@@ -234,25 +266,25 @@ void OptimizingCompilerThread::Flush() {
 
 void OptimizingCompilerThread::Stop() {
   DCHECK(!IsOptimizerThread());
-  base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(STOP));
-  if (FLAG_block_concurrent_recompilation) Unblock();
-  if (!job_based_recompilation_) {
-    input_queue_semaphore_.Signal();
-    stop_semaphore_.Wait();
-  }
-
+  bool block = true;
   if (job_based_recompilation_) {
-    while (true) {
-      {
-        base::LockGuard<base::Mutex> access_input_queue(&input_queue_mutex_);
-        if (!input_queue_length_) break;
-      }
-      input_queue_semaphore_.Wait();
+    base::LockGuard<base::RecursiveMutex> lock(&task_count_mutex_);
+    block = task_count_ > 0 || blocked_jobs_ > 0;
+    if (block) {
+      base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(FLUSH));
     }
-  } else if (FLAG_concurrent_recompilation_delay != 0) {
+    if (FLAG_block_concurrent_recompilation) Unblock();
+  } else {
+    base::Release_Store(&stop_thread_, static_cast<base::AtomicWord>(STOP));
+    if (FLAG_block_concurrent_recompilation) Unblock();
+  }
+  if (!job_based_recompilation_) input_queue_semaphore_.Signal();
+  if (block) stop_semaphore_.Wait();
+
+  if (recompilation_delay_ != 0) {
     // At this point the optimizing compiler thread's event loop has stopped.
     // There is no need for a mutex when reading input_queue_length_.
-    while (input_queue_length_ > 0) CompileNext();
+    while (input_queue_length_ > 0) CompileNext(NextInput());
     InstallOptimizedFunctions();
   } else {
     FlushInputQueue(false);
@@ -263,6 +295,7 @@ void OptimizingCompilerThread::Stop() {
 
   if (tracing_enabled_) {
     double percentage = time_spent_compiling_.PercentOf(time_spent_total_);
+    if (job_based_recompilation_) percentage = 100.0;
     PrintF("  ** Compiler thread did %.2f%% useful work\n", percentage);
   }
 
@@ -333,11 +366,13 @@ void OptimizingCompilerThread::QueueForOptimization(OptimizedCompileJob* job) {
     input_queue_[InputQueueIndex(input_queue_length_)] = job;
     input_queue_length_++;
   }
-  if (job_based_recompilation_) {
-    V8::GetCurrentPlatform()->CallOnBackgroundThread(
-        new CompileTask(isolate_, job), v8::Platform::kShortRunningTask);
-  } else if (FLAG_block_concurrent_recompilation) {
+  if (FLAG_block_concurrent_recompilation) {
     blocked_jobs_++;
+  } else if (job_based_recompilation_) {
+    base::LockGuard<base::RecursiveMutex> lock(&task_count_mutex_);
+    ++task_count_;
+    V8::GetCurrentPlatform()->CallOnBackgroundThread(
+        new CompileTask(isolate_), v8::Platform::kShortRunningTask);
   } else {
     input_queue_semaphore_.Signal();
   }
@@ -346,11 +381,17 @@ void OptimizingCompilerThread::QueueForOptimization(OptimizedCompileJob* job) {
 
 void OptimizingCompilerThread::Unblock() {
   DCHECK(!IsOptimizerThread());
-  if (job_based_recompilation_) {
-    return;
+  {
+    base::LockGuard<base::RecursiveMutex> lock(&task_count_mutex_);
+    task_count_ += blocked_jobs_;
   }
   while (blocked_jobs_ > 0) {
-    input_queue_semaphore_.Signal();
+    if (job_based_recompilation_) {
+      V8::GetCurrentPlatform()->CallOnBackgroundThread(
+          new CompileTask(isolate_), v8::Platform::kShortRunningTask);
+    } else {
+      input_queue_semaphore_.Signal();
+    }
     blocked_jobs_--;
   }
 }
index 60f95f4..3088843 100644 (file)
@@ -35,11 +35,13 @@ class OptimizingCompilerThread : public base::Thread {
         input_queue_shift_(0),
         osr_buffer_capacity_(FLAG_concurrent_recompilation_queue_length + 4),
         osr_buffer_cursor_(0),
+        task_count_(0),
         osr_hits_(0),
         osr_attempts_(0),
         blocked_jobs_(0),
         tracing_enabled_(FLAG_trace_concurrent_recompilation),
-        job_based_recompilation_(FLAG_job_based_recompilation) {
+        job_based_recompilation_(FLAG_job_based_recompilation),
+        recompilation_delay_(FLAG_concurrent_recompilation_delay) {
     base::NoBarrier_Store(&stop_thread_,
                           static_cast<base::AtomicWord>(CONTINUE));
     input_queue_ = NewArray<OptimizedCompileJob*>(input_queue_capacity_);
@@ -93,8 +95,8 @@ class OptimizingCompilerThread : public base::Thread {
   void FlushInputQueue(bool restore_function_code);
   void FlushOutputQueue(bool restore_function_code);
   void FlushOsrBuffer(bool restore_function_code);
-  void CompileNext();
-  OptimizedCompileJob* NextInput();
+  void CompileNext(OptimizedCompileJob* job);
+  OptimizedCompileJob* NextInput(StopFlag* flag = NULL);
 
   // Add a recompilation task for OSR to the cyclic buffer, awaiting OSR entry.
   // Tasks evicted from the cyclic buffer are discarded.
@@ -138,18 +140,27 @@ class OptimizingCompilerThread : public base::Thread {
   base::TimeDelta time_spent_compiling_;
   base::TimeDelta time_spent_total_;
 
+  int task_count_;
+  // TODO(jochen): This is currently a RecursiveMutex since both Flush/Stop and
+  // Unblock try to get it, but the former methods both can call Unblock. Once
+  // job based recompilation is on by default, and the dedicated thread can be
+  // removed, this should be refactored to not use a RecursiveMutex.
+  base::RecursiveMutex task_count_mutex_;
+
   int osr_hits_;
   int osr_attempts_;
 
   int blocked_jobs_;
 
-  // Copies of FLAG_trace_concurrent_recompilation and
+  // Copies of FLAG_trace_concurrent_recompilation,
+  // FLAG_concurrent_recompilation_delay and
   // FLAG_job_based_recompilation that will be used from the background thread.
   //
   // Since flags might get modified while the background thread is running, it
   // is not safe to access them directly.
   bool tracing_enabled_;
   bool job_based_recompilation_;
+  int recompilation_delay_;
 };
 
 } }  // namespace v8::internal
index 416da55..ee0474d 100644 (file)
@@ -28,7 +28,9 @@ OFStreamBase::int_type OFStreamBase::overflow(int_type c) {
 }
 
 
-OFStream::OFStream(FILE* f) : OFStreamBase(f), std::ostream(this) {}
+OFStream::OFStream(FILE* f) : OFStreamBase(f), std::ostream(this) {
+  DCHECK_NOT_NULL(f);
+}
 
 
 OFStream::~OFStream() {}
index 65ae2ff..56787f7 100644 (file)
@@ -22,8 +22,8 @@ class OFStreamBase : public std::streambuf {
   explicit OFStreamBase(FILE* f);
   virtual ~OFStreamBase();
 
-  virtual int_type sync() FINAL;
-  virtual int_type overflow(int_type c) FINAL;
+  int_type sync() FINAL;
+  int_type overflow(int_type c) FINAL;
 
  private:
   FILE* const f_;
index c5bf0d9..bfdeaa3 100644 (file)
@@ -111,7 +111,7 @@ void RegExpBuilder::FlushTerms() {
   int num_terms = terms_.length();
   RegExpTree* alternative;
   if (num_terms == 0) {
-    alternative = RegExpEmpty::GetInstance();
+    alternative = new (zone()) RegExpEmpty();
   } else if (num_terms == 1) {
     alternative = terms_.last();
   } else {
@@ -126,12 +126,8 @@ void RegExpBuilder::FlushTerms() {
 RegExpTree* RegExpBuilder::ToRegExp() {
   FlushTerms();
   int num_alternatives = alternatives_.length();
-  if (num_alternatives == 0) {
-    return RegExpEmpty::GetInstance();
-  }
-  if (num_alternatives == 1) {
-    return alternatives_.last();
-  }
+  if (num_alternatives == 0) return new (zone()) RegExpEmpty();
+  if (num_alternatives == 1) return alternatives_.last();
   return new(zone()) RegExpDisjunction(alternatives_.GetList(zone()));
 }
 
@@ -206,6 +202,7 @@ int ParseData::FunctionCount() {
 
 
 bool ParseData::IsSane() {
+  if (!IsAligned(script_data_->length(), sizeof(unsigned))) return false;
   // Check that the header data is valid and doesn't specify
   // point to positions outside the store.
   int data_length = Length();
@@ -260,7 +257,7 @@ void Parser::SetCachedData() {
   } else {
     DCHECK(info_->cached_data() != NULL);
     if (compile_options() == ScriptCompiler::kConsumeParserCache) {
-      cached_parse_data_ = new ParseData(*info_->cached_data());
+      cached_parse_data_ = ParseData::FromCachedData(*info_->cached_data());
     }
   }
 }
@@ -275,6 +272,55 @@ Scope* Parser::NewScope(Scope* parent, ScopeType scope_type) {
 }
 
 
+FunctionLiteral* Parser::DefaultConstructor(bool call_super, Scope* scope,
+                                            int pos, int end_pos) {
+  int materialized_literal_count = -1;
+  int expected_property_count = -1;
+  int handler_count = 0;
+  int parameter_count = 0;
+  const AstRawString* name = ast_value_factory()->empty_string();
+
+  Scope* function_scope = NewScope(scope, FUNCTION_SCOPE);
+  function_scope->SetStrictMode(STRICT);
+  // Set start and end position to the same value
+  function_scope->set_start_position(pos);
+  function_scope->set_end_position(pos);
+  ZoneList<Statement*>* body = NULL;
+
+  {
+    AstNodeFactory function_factory(ast_value_factory());
+    FunctionState function_state(&function_state_, &scope_, function_scope,
+                                 &function_factory);
+
+    body = new (zone()) ZoneList<Statement*>(1, zone());
+    if (call_super) {
+      ZoneList<Expression*>* args =
+          new (zone()) ZoneList<Expression*>(0, zone());
+      CallRuntime* call = factory()->NewCallRuntime(
+          ast_value_factory()->empty_string(),
+          Runtime::FunctionForId(Runtime::kDefaultConstructorSuperCall), args,
+          pos);
+      body->Add(factory()->NewExpressionStatement(call, pos), zone());
+      function_scope->RecordSuperConstructorCallUsage();
+    }
+
+    materialized_literal_count = function_state.materialized_literal_count();
+    expected_property_count = function_state.expected_property_count();
+    handler_count = function_state.handler_count();
+  }
+
+  FunctionLiteral* function_literal = factory()->NewFunctionLiteral(
+      name, ast_value_factory(), function_scope, body,
+      materialized_literal_count, expected_property_count, handler_count,
+      parameter_count, FunctionLiteral::kNoDuplicateParameters,
+      FunctionLiteral::ANONYMOUS_EXPRESSION, FunctionLiteral::kIsFunction,
+      FunctionLiteral::kNotParenthesized, FunctionKind::kDefaultConstructor,
+      pos);
+
+  return function_literal;
+}
+
+
 // ----------------------------------------------------------------------------
 // Target is a support class to facilitate manipulation of the
 // Parser's target_stack_ (the stack of potential 'break' and
@@ -414,7 +460,7 @@ Expression* ParserTraits::MarkExpressionAsAssigned(Expression* expression) {
 
 bool ParserTraits::ShortcutNumericLiteralBinaryExpression(
     Expression** x, Expression* y, Token::Value op, int pos,
-    AstNodeFactory<AstConstructionVisitor>* factory) {
+    AstNodeFactory* factory) {
   if ((*x)->AsLiteral() && (*x)->AsLiteral()->raw_value()->IsNumber() &&
       y->AsLiteral() && y->AsLiteral()->raw_value()->IsNumber()) {
     double x_val = (*x)->AsLiteral()->raw_value()->AsNumber();
@@ -472,9 +518,9 @@ bool ParserTraits::ShortcutNumericLiteralBinaryExpression(
 }
 
 
-Expression* ParserTraits::BuildUnaryExpression(
-    Expression* expression, Token::Value op, int pos,
-    AstNodeFactory<AstConstructionVisitor>* factory) {
+Expression* ParserTraits::BuildUnaryExpression(Expression* expression,
+                                               Token::Value op, int pos,
+                                               AstNodeFactory* factory) {
   DCHECK(expression != NULL);
   if (expression->IsLiteral()) {
     const AstValue* literal = expression->AsLiteral()->raw_value();
@@ -628,30 +674,28 @@ const AstRawString* ParserTraits::GetNextSymbol(Scanner* scanner) {
 }
 
 
-Expression* ParserTraits::ThisExpression(
-    Scope* scope, AstNodeFactory<AstConstructionVisitor>* factory, int pos) {
+Expression* ParserTraits::ThisExpression(Scope* scope, AstNodeFactory* factory,
+                                         int pos) {
   return factory->NewVariableProxy(scope->receiver(), pos);
 }
 
-Expression* ParserTraits::SuperReference(
-    Scope* scope, AstNodeFactory<AstConstructionVisitor>* factory, int pos) {
+Expression* ParserTraits::SuperReference(Scope* scope, AstNodeFactory* factory,
+                                         int pos) {
   return factory->NewSuperReference(
       ThisExpression(scope, factory, pos)->AsVariableProxy(),
       pos);
 }
 
-Expression* ParserTraits::ClassExpression(
-    const AstRawString* name, Expression* extends, Expression* constructor,
-    ZoneList<ObjectLiteral::Property*>* properties, int start_position,
-    int end_position, AstNodeFactory<AstConstructionVisitor>* factory) {
-  return factory->NewClassLiteral(name, extends, constructor, properties,
-                                  start_position, end_position);
+
+Expression* ParserTraits::DefaultConstructor(bool call_super, Scope* scope,
+                                             int pos, int end_pos) {
+  return parser_->DefaultConstructor(call_super, scope, pos, end_pos);
 }
 
-Literal* ParserTraits::ExpressionFromLiteral(
-    Token::Value token, int pos,
-    Scanner* scanner,
-    AstNodeFactory<AstConstructionVisitor>* factory) {
+
+Literal* ParserTraits::ExpressionFromLiteral(Token::Value token, int pos,
+                                             Scanner* scanner,
+                                             AstNodeFactory* factory) {
   switch (token) {
     case Token::NULL_LITERAL:
       return factory->NewNullLiteral(pos);
@@ -670,9 +714,9 @@ Literal* ParserTraits::ExpressionFromLiteral(
 }
 
 
-Expression* ParserTraits::ExpressionFromIdentifier(
-    const AstRawString* name, int pos, Scope* scope,
-    AstNodeFactory<AstConstructionVisitor>* factory) {
+Expression* ParserTraits::ExpressionFromIdentifier(const AstRawString* name,
+                                                   int pos, Scope* scope,
+                                                   AstNodeFactory* factory) {
   if (parser_->fni_ != NULL) parser_->fni_->PushVariableName(name);
   // The name may refer to a module instance object, so its type is unknown.
 #ifdef DEBUG
@@ -684,19 +728,18 @@ Expression* ParserTraits::ExpressionFromIdentifier(
 }
 
 
-Expression* ParserTraits::ExpressionFromString(
-    int pos, Scanner* scanner,
-    AstNodeFactory<AstConstructionVisitor>* factory) {
+Expression* ParserTraits::ExpressionFromString(int pos, Scanner* scanner,
+                                               AstNodeFactory* factory) {
   const AstRawString* symbol = GetSymbol(scanner);
   if (parser_->fni_ != NULL) parser_->fni_->PushLiteralName(symbol);
   return factory->NewStringLiteral(symbol, pos);
 }
 
 
-Expression* ParserTraits::GetIterator(
-    Expression* iterable, AstNodeFactory<AstConstructionVisitor>* factory) {
+Expression* ParserTraits::GetIterator(Expression* iterable,
+                                      AstNodeFactory* factory) {
   Expression* iterator_symbol_literal =
-      factory->NewSymbolLiteral("symbolIterator", RelocInfo::kNoPosition);
+      factory->NewSymbolLiteral("iterator_symbol", RelocInfo::kNoPosition);
   int pos = iterable->position();
   Expression* prop =
       factory->NewProperty(iterable, iterator_symbol_literal, pos);
@@ -706,8 +749,8 @@ Expression* ParserTraits::GetIterator(
 }
 
 
-Literal* ParserTraits::GetLiteralTheHole(
-    int position, AstNodeFactory<AstConstructionVisitor>* factory) {
+Literal* ParserTraits::GetLiteralTheHole(int position,
+                                         AstNodeFactory* factory) {
   return factory->NewTheHoleLiteral(RelocInfo::kNoPosition);
 }
 
@@ -728,6 +771,14 @@ FunctionLiteral* ParserTraits::ParseFunctionLiteral(
 }
 
 
+ClassLiteral* ParserTraits::ParseClassLiteral(
+    const AstRawString* name, Scanner::Location class_name_location,
+    bool name_is_strict_reserved, int pos, bool* ok) {
+  return parser_->ParseClassLiteral(name, class_name_location,
+                                    name_is_strict_reserved, pos, ok);
+}
+
+
 Parser::Parser(CompilationInfo* info, ParseInfo* parse_info)
     : ParserBase<ParserTraits>(&scanner_, parse_info->stack_limit,
                                info->extension(), NULL, info->zone(), this),
@@ -744,14 +795,17 @@ Parser::Parser(CompilationInfo* info, ParseInfo* parse_info)
       total_preparse_skipped_(0),
       pre_parse_timer_(NULL) {
   DCHECK(!script().is_null() || info->source_stream() != NULL);
-  set_allow_harmony_scoping(!info->is_native() && FLAG_harmony_scoping);
-  set_allow_modules(!info->is_native() && FLAG_harmony_modules);
-  set_allow_natives_syntax(FLAG_allow_natives_syntax || info->is_native());
   set_allow_lazy(false);  // Must be explicitly enabled.
-  set_allow_arrow_functions(FLAG_harmony_arrow_functions);
+  set_allow_natives(FLAG_allow_natives_syntax || info->is_native());
+  set_allow_harmony_scoping(!info->is_native() && FLAG_harmony_scoping);
+  set_allow_harmony_modules(!info->is_native() && FLAG_harmony_modules);
+  set_allow_harmony_arrow_functions(FLAG_harmony_arrow_functions);
   set_allow_harmony_numeric_literals(FLAG_harmony_numeric_literals);
-  set_allow_classes(FLAG_harmony_classes);
+  set_allow_harmony_classes(FLAG_harmony_classes);
   set_allow_harmony_object_literals(FLAG_harmony_object_literals);
+  set_allow_harmony_templates(FLAG_harmony_templates);
+  set_allow_harmony_sloppy(FLAG_harmony_sloppy);
+  set_allow_harmony_unicode(FLAG_harmony_unicode);
   for (int feature = 0; feature < v8::Isolate::kUseCounterFeatureCount;
        ++feature) {
     use_counts_[feature] = 0;
@@ -782,10 +836,9 @@ FunctionLiteral* Parser::ParseProgram() {
   // Initialize parser state.
   CompleteParserRecorder recorder;
 
-  debug_saved_compile_options_ = compile_options();
-  if (compile_options() == ScriptCompiler::kProduceParserCache) {
+  if (produce_cached_parse_data()) {
     log_ = &recorder;
-  } else if (compile_options() == ScriptCompiler::kConsumeParserCache) {
+  } else if (consume_cached_parse_data()) {
     cached_parse_data_->Initialize();
   }
 
@@ -826,7 +879,7 @@ FunctionLiteral* Parser::ParseProgram() {
     }
     PrintF(" - took %0.3f ms]\n", ms);
   }
-  if (compile_options() == ScriptCompiler::kProduceParserCache) {
+  if (produce_cached_parse_data()) {
     if (result != NULL) *info_->cached_data() = recorder.GetScriptData();
     log_ = NULL;
   }
@@ -841,8 +894,8 @@ FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info, Scope** scope,
 
   FunctionLiteral* result = NULL;
   {
-    *scope = NewScope(scope_, GLOBAL_SCOPE);
-    info->SetGlobalScope(*scope);
+    *scope = NewScope(scope_, SCRIPT_SCOPE);
+    info->SetScriptScope(*scope);
     if (!info->context().is_null() && !info->context()->IsNativeContext()) {
       *scope = Scope::DeserializeScopeChain(*info->context(), *scope, zone());
       // The Scope is backed up by ScopeInfo (which is in the V8 heap); this
@@ -853,26 +906,25 @@ FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info, Scope** scope,
     }
     original_scope_ = *scope;
     if (info->is_eval()) {
-      if (!(*scope)->is_global_scope() || info->strict_mode() == STRICT) {
+      if (!(*scope)->is_script_scope() || info->strict_mode() == STRICT) {
         *scope = NewScope(*scope, EVAL_SCOPE);
       }
     } else if (info->is_global()) {
-      *scope = NewScope(*scope, GLOBAL_SCOPE);
+      *scope = NewScope(*scope, SCRIPT_SCOPE);
     }
     (*scope)->set_start_position(0);
     // End position will be set by the caller.
 
     // Compute the parsing mode.
     Mode mode = (FLAG_lazy && allow_lazy()) ? PARSE_LAZILY : PARSE_EAGERLY;
-    if (allow_natives_syntax() || extension_ != NULL ||
+    if (allow_natives() || extension_ != NULL ||
         (*scope)->is_eval_scope()) {
       mode = PARSE_EAGERLY;
     }
     ParsingModeScope parsing_mode(this, mode);
 
     // Enters 'scope'.
-    AstNodeFactory<AstConstructionVisitor> function_factory(
-        ast_value_factory());
+    AstNodeFactory function_factory(ast_value_factory());
     FunctionState function_state(&function_state_, &scope_, *scope,
                                  &function_factory);
 
@@ -884,7 +936,7 @@ FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info, Scope** scope,
                         &ok);
 
     if (ok && strict_mode() == STRICT) {
-      CheckOctalLiteral(beg_pos, scanner()->location().end_pos, &ok);
+      CheckStrictOctalLiteral(beg_pos, scanner()->location().end_pos, &ok);
     }
 
     if (ok && allow_harmony_scoping() && strict_mode() == STRICT) {
@@ -910,9 +962,6 @@ FunctionLiteral* Parser::DoParseProgram(CompilationInfo* info, Scope** scope,
           FunctionLiteral::kNoDuplicateParameters,
           FunctionLiteral::ANONYMOUS_EXPRESSION, FunctionLiteral::kGlobalOrEval,
           FunctionLiteral::kNotParenthesized, FunctionKind::kNormalFunction, 0);
-      result->set_ast_properties(factory()->visitor()->ast_properties());
-      result->set_dont_optimize_reason(
-          factory()->visitor()->dont_optimize_reason());
     }
   }
 
@@ -979,15 +1028,14 @@ FunctionLiteral* Parser::ParseLazy(Utf16CharacterStream* source) {
 
   {
     // Parse the function literal.
-    Scope* scope = NewScope(scope_, GLOBAL_SCOPE);
-    info()->SetGlobalScope(scope);
+    Scope* scope = NewScope(scope_, SCRIPT_SCOPE);
+    info()->SetScriptScope(scope);
     if (!info()->closure().is_null()) {
       scope = Scope::DeserializeScopeChain(info()->closure()->context(), scope,
                                            zone());
     }
     original_scope_ = scope;
-    AstNodeFactory<AstConstructionVisitor> function_factory(
-        ast_value_factory());
+    AstNodeFactory function_factory(ast_value_factory());
     FunctionState function_state(&function_state_, &scope_, scope,
                                  &function_factory);
     DCHECK(scope->strict_mode() == SLOPPY || info()->strict_mode() == STRICT);
@@ -1004,6 +1052,10 @@ FunctionLiteral* Parser::ParseLazy(Utf16CharacterStream* source) {
       Expression* expression = ParseExpression(false, &ok);
       DCHECK(expression->IsFunctionLiteral());
       result = expression->AsFunctionLiteral();
+    } else if (shared_info->is_default_constructor()) {
+      result = DefaultConstructor(shared_info->uses_super_constructor_call(),
+                                  scope, shared_info->start_position(),
+                                  shared_info->end_position());
     } else {
       result = ParseFunctionLiteral(raw_name, Scanner::Location::invalid(),
                                     false,  // Strict mode name already checked.
@@ -1079,7 +1131,7 @@ void* Parser::ParseSourceElements(ZoneList<Statement*>* processor,
           // all over the code base, so we go with a quick-fix for now.
           // In the same manner, we have to patch the parsing mode.
           if (is_eval && !scope_->is_eval_scope()) {
-            DCHECK(scope_->is_global_scope());
+            DCHECK(scope_->is_script_scope());
             Scope* scope = NewScope(scope_, EVAL_SCOPE);
             scope->set_start_position(scope_->start_position());
             scope->set_end_position(scope_->end_position());
@@ -1713,14 +1765,9 @@ void Parser::Declare(Declaration* declaration, bool resolve, bool* ok) {
       declaration_scope->is_strict_eval_scope() ||
       declaration_scope->is_block_scope() ||
       declaration_scope->is_module_scope() ||
-      declaration_scope->is_global_scope()) {
+      declaration_scope->is_script_scope()) {
     // Declare the variable in the declaration scope.
-    // For the global scope, we have to check for collisions with earlier
-    // (i.e., enclosing) global scopes, to maintain the illusion of a single
-    // global scope.
-    var = declaration_scope->is_global_scope()
-        ? declaration_scope->Lookup(name)
-        : declaration_scope->LookupLocal(name);
+    var = declaration_scope->LookupLocal(name);
     if (var == NULL) {
       // Declare the name.
       var = declaration_scope->DeclareLocal(name, mode,
@@ -1728,10 +1775,10 @@ void Parser::Declare(Declaration* declaration, bool resolve, bool* ok) {
                                             kNotAssigned, proxy->interface());
     } else if (IsLexicalVariableMode(mode) || IsLexicalVariableMode(var->mode())
                || ((mode == CONST_LEGACY || var->mode() == CONST_LEGACY) &&
-                   !declaration_scope->is_global_scope())) {
+                   !declaration_scope->is_script_scope())) {
       // The name was declared in this scope before; check for conflicting
       // re-declarations. We have a conflict if either of the declarations is
-      // not a var (in the global scope, we also have to ignore legacy const for
+      // not a var (in script scope, we also have to ignore legacy const for
       // compatibility). There is similar code in runtime.cc in the Declare
       // functions. The function CheckConflictingVarDeclarations checks for
       // var and let bindings from different scopes whereas this is a check for
@@ -1776,7 +1823,7 @@ void Parser::Declare(Declaration* declaration, bool resolve, bool* ok) {
   // RuntimeHidden_DeclareLookupSlot calls.
   declaration_scope->AddDeclaration(declaration);
 
-  if (mode == CONST_LEGACY && declaration_scope->is_global_scope()) {
+  if (mode == CONST_LEGACY && declaration_scope->is_script_scope()) {
     // For global const variables we bind the proxy to a variable.
     DCHECK(resolve);  // should be set by all callers
     Variable::Kind kind = Variable::NORMAL;
@@ -1915,11 +1962,11 @@ Statement* Parser::ParseFunctionDeclaration(
   // Even if we're not at the top-level of the global or a function
   // scope, we treat it as such and introduce the function with its
   // initial value upon entering the corresponding scope.
-  // In ES6, a function behaves as a lexical binding, except in the
-  // global scope, or the initial scope of eval or another function.
+  // In ES6, a function behaves as a lexical binding, except in
+  // a script scope, or the initial scope of eval or another function.
   VariableMode mode =
       allow_harmony_scoping() && strict_mode() == STRICT &&
-      !(scope_->is_global_scope() || scope_->is_eval_scope() ||
+      !(scope_->is_script_scope() || scope_->is_eval_scope() ||
           scope_->is_function_scope()) ? LET : VAR;
   VariableProxy* proxy = NewUnresolved(name, mode, Interface::NewValue());
   Declaration* declaration =
@@ -1946,12 +1993,18 @@ Statement* Parser::ParseClassDeclaration(ZoneList<const AstRawString*>* names,
   // so rewrite it as such.
 
   Expect(Token::CLASS, CHECK_OK);
+  if (!allow_harmony_sloppy() && strict_mode() == SLOPPY) {
+    ReportMessage("sloppy_lexical");
+    *ok = false;
+    return NULL;
+  }
+
   int pos = position();
   bool is_strict_reserved = false;
   const AstRawString* name =
       ParseIdentifierOrStrictReservedWord(&is_strict_reserved, CHECK_OK);
-  Expression* value = ParseClassLiteral(name, scanner()->location(),
-                                        is_strict_reserved, pos, CHECK_OK);
+  ClassLiteral* value = ParseClassLiteral(name, scanner()->location(),
+                                          is_strict_reserved, pos, CHECK_OK);
 
   VariableProxy* proxy = NewUnresolved(name, LET, Interface::NewValue());
   Declaration* declaration =
@@ -2270,7 +2323,7 @@ Block* Parser::ParseVariableDeclarations(
     // declaration statement has been executed. This is important in
     // browsers where the global object (window) has lots of
     // properties defined in prototype objects.
-    if (initialization_scope->is_global_scope() &&
+    if (initialization_scope->is_script_scope() &&
         !IsLexicalVariableMode(mode)) {
       // Compute the arguments for the runtime call.
       ZoneList<Expression*>* arguments =
@@ -2433,12 +2486,20 @@ Statement* Parser::ParseExpressionOrLabelledStatement(
 
   // Parsed expression statement, or the context-sensitive 'module' keyword.
   // Only expect semicolon in the former case.
+  // Also detect attempts at 'let' declarations in sloppy mode.
   if (!FLAG_harmony_modules || peek() != Token::IDENTIFIER ||
       scanner()->HasAnyLineTerminatorBeforeNext() ||
       expr->AsVariableProxy() == NULL ||
       expr->AsVariableProxy()->raw_name() !=
           ast_value_factory()->module_string() ||
       scanner()->literal_contains_escapes()) {
+    if (peek() == Token::IDENTIFIER && expr->AsVariableProxy() != NULL &&
+        expr->AsVariableProxy()->raw_name() ==
+            ast_value_factory()->let_string()) {
+      ReportMessage("sloppy_lexical", NULL);
+      *ok = false;
+      return NULL;
+    }
     ExpectSemicolon(CHECK_OK);
   }
   return factory()->NewExpressionStatement(expr, pos);
@@ -2567,7 +2628,7 @@ Statement* Parser::ParseReturnStatement(bool* ok) {
   }
 
   Scope* decl_scope = scope_->DeclarationScope();
-  if (decl_scope->is_global_scope() || decl_scope->is_eval_scope()) {
+  if (decl_scope->is_script_scope() || decl_scope->is_eval_scope()) {
     ReportMessageAt(loc, "illegal_return");
     *ok = false;
     return NULL;
@@ -2735,9 +2796,7 @@ TryStatement* Parser::ParseTryStatement(bool* ok) {
     Expect(Token::RPAREN, CHECK_OK);
 
     Target target(&this->target_stack_, &catch_collector);
-    VariableMode mode =
-        allow_harmony_scoping() && strict_mode() == STRICT ? LET : VAR;
-    catch_variable = catch_scope->DeclareLocal(name, mode, kCreatedInitialized);
+    catch_variable = catch_scope->DeclareLocal(name, VAR, kCreatedInitialized);
     BlockState block_state(&scope_, catch_scope);
     catch_block = ParseBlock(NULL, CHECK_OK);
 
@@ -2871,7 +2930,7 @@ void Parser::InitializeForEachStatement(ForEachStatement* stmt,
     // var iterator = subject[Symbol.iterator]();
     assign_iterator = factory()->NewAssignment(
         Token::ASSIGN, factory()->NewVariableProxy(iterator),
-        GetIterator(subject, factory()), RelocInfo::kNoPosition);
+        GetIterator(subject, factory()), subject->position());
 
     // var result = iterator.next();
     {
@@ -2882,11 +2941,11 @@ void Parser::InitializeForEachStatement(ForEachStatement* stmt,
           iterator_proxy, next_literal, RelocInfo::kNoPosition);
       ZoneList<Expression*>* next_arguments =
           new(zone()) ZoneList<Expression*>(0, zone());
-      Expression* next_call = factory()->NewCall(
-          next_property, next_arguments, RelocInfo::kNoPosition);
+      Expression* next_call = factory()->NewCall(next_property, next_arguments,
+                                                 subject->position());
       Expression* result_proxy = factory()->NewVariableProxy(result);
-      next_result = factory()->NewAssignment(
-          Token::ASSIGN, result_proxy, next_call, RelocInfo::kNoPosition);
+      next_result = factory()->NewAssignment(Token::ASSIGN, result_proxy,
+                                             next_call, subject->position());
     }
 
     // result.done
@@ -2905,8 +2964,8 @@ void Parser::InitializeForEachStatement(ForEachStatement* stmt,
       Expression* result_proxy = factory()->NewVariableProxy(result);
       Expression* result_value = factory()->NewProperty(
           result_proxy, value_literal, RelocInfo::kNoPosition);
-      assign_each = factory()->NewAssignment(
-          Token::ASSIGN, each, result_value, RelocInfo::kNoPosition);
+      assign_each = factory()->NewAssignment(Token::ASSIGN, each, result_value,
+                                             each->position());
     }
 
     for_of->Initialize(each, subject, body,
@@ -2931,29 +2990,33 @@ Statement* Parser::DesugarLetBindingsInForStatement(
   //
   // We rewrite a for statement of the form
   //
-  //  for (let x = i; cond; next) body
+  //  labels: for (let x = i; cond; next) body
   //
   // into
   //
   //  {
-  //     let x = i;
-  //     temp_x = x;
-  //     flag = 1;
-  //     for (;;) {
-  //        let x = temp_x;
-  //        if (flag == 1) {
-  //          flag = 0;
-  //        } else {
-  //          next;
-  //        }
+  //    let x = i;
+  //    temp_x = x;
+  //    first = 1;
+  //    outer: for (;;) {
+  //      let x = temp_x;
+  //      if (first == 1) {
+  //        first = 0;
+  //      } else {
+  //        next;
+  //      }
+  //      flag = 1;
+  //      labels: for (; flag == 1; flag = 0, temp_x = x) {
   //        if (cond) {
-  //          <empty>
+  //          body
   //        } else {
-  //          break;
+  //          break outer;
   //        }
-  //        b
-  //        temp_x = x;
-  //     }
+  //      }
+  //      if (flag == 1) {
+  //        break;
+  //      }
+  //    }
   //  }
 
   DCHECK(names->length() > 0);
@@ -2962,6 +3025,8 @@ Statement* Parser::DesugarLetBindingsInForStatement(
 
   Block* outer_block = factory()->NewBlock(NULL, names->length() + 3, false,
                                            RelocInfo::kNoPosition);
+
+  // Add statement: let x = i.
   outer_block->AddStatement(init, zone());
 
   const AstRawString* temp_name = ast_value_factory()->dot_for_string();
@@ -2981,24 +3046,33 @@ Statement* Parser::DesugarLetBindingsInForStatement(
     temps.Add(temp, zone());
   }
 
-  Variable* flag = scope_->DeclarationScope()->NewTemporary(temp_name);
-  // Make statement: flag = 1.
-  {
-    VariableProxy* flag_proxy = factory()->NewVariableProxy(flag);
+  Variable* first = NULL;
+  // Make statement: first = 1.
+  if (next) {
+    first = scope_->DeclarationScope()->NewTemporary(temp_name);
+    VariableProxy* first_proxy = factory()->NewVariableProxy(first);
     Expression* const1 = factory()->NewSmiLiteral(1, RelocInfo::kNoPosition);
     Assignment* assignment = factory()->NewAssignment(
-        Token::ASSIGN, flag_proxy, const1, RelocInfo::kNoPosition);
-    Statement* assignment_statement = factory()->NewExpressionStatement(
-        assignment, RelocInfo::kNoPosition);
+        Token::ASSIGN, first_proxy, const1, RelocInfo::kNoPosition);
+    Statement* assignment_statement =
+        factory()->NewExpressionStatement(assignment, RelocInfo::kNoPosition);
     outer_block->AddStatement(assignment_statement, zone());
   }
 
-  outer_block->AddStatement(loop, zone());
+  // Make statement: outer: for (;;)
+  // Note that we don't actually create the label, or set this loop up as an
+  // explicit break target, instead handing it directly to those nodes that
+  // need to know about it. This should be safe because we don't run any code
+  // in this function that looks up break targets.
+  ForStatement* outer_loop =
+      factory()->NewForStatement(NULL, RelocInfo::kNoPosition);
+  outer_block->AddStatement(outer_loop, zone());
+
   outer_block->set_scope(for_scope);
   scope_ = inner_scope;
 
-  Block* inner_block = factory()->NewBlock(NULL, 2 * names->length() + 3,
-                                           false, RelocInfo::kNoPosition);
+  Block* inner_block = factory()->NewBlock(NULL, names->length() + 4, false,
+                                           RelocInfo::kNoPosition);
   int pos = scanner()->location().beg_pos;
   ZoneList<Variable*> inner_vars(names->length(), zone());
 
@@ -3020,61 +3094,118 @@ Statement* Parser::DesugarLetBindingsInForStatement(
     inner_block->AddStatement(assignment_statement, zone());
   }
 
-  // Make statement: if (flag == 1) { flag = 0; } else { next; }.
+  // Make statement: if (first == 1) { first = 0; } else { next; }
   if (next) {
+    DCHECK(first);
     Expression* compare = NULL;
-    // Make compare expresion: flag == 1.
+    // Make compare expression: first == 1.
     {
       Expression* const1 = factory()->NewSmiLiteral(1, RelocInfo::kNoPosition);
-      VariableProxy* flag_proxy = factory()->NewVariableProxy(flag);
-      compare = factory()->NewCompareOperation(
-          Token::EQ, flag_proxy, const1, pos);
+      VariableProxy* first_proxy = factory()->NewVariableProxy(first);
+      compare =
+          factory()->NewCompareOperation(Token::EQ, first_proxy, const1, pos);
     }
-    Statement* clear_flag = NULL;
-    // Make statement: flag = 0.
+    Statement* clear_first = NULL;
+    // Make statement: first = 0.
     {
-      VariableProxy* flag_proxy = factory()->NewVariableProxy(flag);
+      VariableProxy* first_proxy = factory()->NewVariableProxy(first);
       Expression* const0 = factory()->NewSmiLiteral(0, RelocInfo::kNoPosition);
       Assignment* assignment = factory()->NewAssignment(
-          Token::ASSIGN, flag_proxy, const0, RelocInfo::kNoPosition);
-      clear_flag = factory()->NewExpressionStatement(assignment, pos);
+          Token::ASSIGN, first_proxy, const0, RelocInfo::kNoPosition);
+      clear_first =
+          factory()->NewExpressionStatement(assignment, RelocInfo::kNoPosition);
     }
-    Statement* clear_flag_or_next = factory()->NewIfStatement(
-        compare, clear_flag, next, RelocInfo::kNoPosition);
-    inner_block->AddStatement(clear_flag_or_next, zone());
+    Statement* clear_first_or_next = factory()->NewIfStatement(
+        compare, clear_first, next, RelocInfo::kNoPosition);
+    inner_block->AddStatement(clear_first_or_next, zone());
   }
 
+  Variable* flag = scope_->DeclarationScope()->NewTemporary(temp_name);
+  // Make statement: flag = 1.
+  {
+    VariableProxy* flag_proxy = factory()->NewVariableProxy(flag);
+    Expression* const1 = factory()->NewSmiLiteral(1, RelocInfo::kNoPosition);
+    Assignment* assignment = factory()->NewAssignment(
+        Token::ASSIGN, flag_proxy, const1, RelocInfo::kNoPosition);
+    Statement* assignment_statement =
+        factory()->NewExpressionStatement(assignment, RelocInfo::kNoPosition);
+    inner_block->AddStatement(assignment_statement, zone());
+  }
+
+  // Make cond expression for main loop: flag == 1.
+  Expression* flag_cond = NULL;
+  {
+    Expression* const1 = factory()->NewSmiLiteral(1, RelocInfo::kNoPosition);
+    VariableProxy* flag_proxy = factory()->NewVariableProxy(flag);
+    flag_cond =
+        factory()->NewCompareOperation(Token::EQ, flag_proxy, const1, pos);
+  }
+
+  // Create chain of expressions "flag = 0, temp_x = x, ..."
+  Statement* compound_next_statement = NULL;
+  {
+    Expression* compound_next = NULL;
+    // Make expression: flag = 0.
+    {
+      VariableProxy* flag_proxy = factory()->NewVariableProxy(flag);
+      Expression* const0 = factory()->NewSmiLiteral(0, RelocInfo::kNoPosition);
+      compound_next = factory()->NewAssignment(Token::ASSIGN, flag_proxy,
+                                               const0, RelocInfo::kNoPosition);
+    }
+
+    // Make the comma-separated list of temp_x = x assignments.
+    for (int i = 0; i < names->length(); i++) {
+      VariableProxy* temp_proxy = factory()->NewVariableProxy(temps.at(i));
+      VariableProxy* proxy = factory()->NewVariableProxy(inner_vars.at(i), pos);
+      Assignment* assignment = factory()->NewAssignment(
+          Token::ASSIGN, temp_proxy, proxy, RelocInfo::kNoPosition);
+      compound_next = factory()->NewBinaryOperation(
+          Token::COMMA, compound_next, assignment, RelocInfo::kNoPosition);
+    }
+
+    compound_next_statement = factory()->NewExpressionStatement(
+        compound_next, RelocInfo::kNoPosition);
+  }
 
-  // Make statement: if (cond) { } else { break; }.
+  // Make statement: if (cond) { body; } else { break outer; }
+  Statement* body_or_stop = body;
   if (cond) {
-    Statement* empty = factory()->NewEmptyStatement(RelocInfo::kNoPosition);
-    BreakableStatement* t = LookupBreakTarget(NULL, CHECK_OK);
-    Statement* stop = factory()->NewBreakStatement(t, RelocInfo::kNoPosition);
-    Statement* if_not_cond_break = factory()->NewIfStatement(
-        cond, empty, stop, cond->position());
-    inner_block->AddStatement(if_not_cond_break, zone());
+    Statement* stop =
+        factory()->NewBreakStatement(outer_loop, RelocInfo::kNoPosition);
+    body_or_stop =
+        factory()->NewIfStatement(cond, body, stop, cond->position());
   }
 
-  inner_block->AddStatement(body, zone());
+  // Make statement: labels: for (; flag == 1; flag = 0, temp_x = x)
+  // Note that we re-use the original loop node, which retains it labels
+  // and ensures that any break or continue statements in body point to
+  // the right place.
+  loop->Initialize(NULL, flag_cond, compound_next_statement, body_or_stop);
+  inner_block->AddStatement(loop, zone());
 
-  // For each let variable x:
-  //   make statement: temp_x = x;
-  for (int i = 0; i < names->length(); i++) {
-    VariableProxy* temp_proxy = factory()->NewVariableProxy(temps.at(i));
-    int pos = scanner()->location().end_pos;
-    VariableProxy* proxy = factory()->NewVariableProxy(inner_vars.at(i), pos);
-    Assignment* assignment = factory()->NewAssignment(
-        Token::ASSIGN, temp_proxy, proxy, RelocInfo::kNoPosition);
-    Statement* assignment_statement = factory()->NewExpressionStatement(
-        assignment, RelocInfo::kNoPosition);
-    inner_block->AddStatement(assignment_statement, zone());
+  // Make statement: if (flag == 1) { break; }
+  {
+    Expression* compare = NULL;
+    // Make compare expresion: flag == 1.
+    {
+      Expression* const1 = factory()->NewSmiLiteral(1, RelocInfo::kNoPosition);
+      VariableProxy* flag_proxy = factory()->NewVariableProxy(flag);
+      compare =
+          factory()->NewCompareOperation(Token::EQ, flag_proxy, const1, pos);
+    }
+    Statement* stop =
+        factory()->NewBreakStatement(outer_loop, RelocInfo::kNoPosition);
+    Statement* empty = factory()->NewEmptyStatement(RelocInfo::kNoPosition);
+    Statement* if_flag_break =
+        factory()->NewIfStatement(compare, stop, empty, RelocInfo::kNoPosition);
+    inner_block->AddStatement(if_flag_break, zone());
   }
 
   inner_scope->set_end_position(scanner()->location().end_pos);
   inner_block->set_scope(inner_scope);
   scope_ = for_scope;
 
-  loop->Initialize(NULL, NULL, NULL, inner_block);
+  outer_loop->Initialize(NULL, NULL, NULL, inner_block);
   return outer_block;
 }
 
@@ -3084,7 +3215,7 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
   // ForStatement ::
   //   'for' '(' Expression? ';' Expression? ';' Expression? ')' Statement
 
-  int pos = peek_position();
+  int stmt_pos = peek_position();
   Statement* init = NULL;
   ZoneList<const AstRawString*> let_bindings(1, zone());
 
@@ -3096,6 +3227,7 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
   Expect(Token::FOR, CHECK_OK);
   Expect(Token::LPAREN, CHECK_OK);
   for_scope->set_start_position(scanner()->location().beg_pos);
+  bool is_let_identifier_expression = false;
   if (peek() != Token::SEMICOLON) {
     if (peek() == Token::VAR ||
         (peek() == Token::CONST && strict_mode() == SLOPPY)) {
@@ -3107,19 +3239,20 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
                                     CHECK_OK);
       bool accept_OF = decl_props == kHasNoInitializers;
       ForEachStatement::VisitMode mode;
+      int each_pos = position();
 
       if (name != NULL && CheckInOrOf(accept_OF, &mode)) {
         Interface* interface =
             is_const ? Interface::NewConst() : Interface::NewValue();
         ForEachStatement* loop =
-            factory()->NewForEachStatement(mode, labels, pos);
+            factory()->NewForEachStatement(mode, labels, stmt_pos);
         Target target(&this->target_stack_, loop);
 
         Expression* enumerable = ParseExpression(true, CHECK_OK);
         Expect(Token::RPAREN, CHECK_OK);
 
         VariableProxy* each =
-            scope_->NewUnresolved(factory(), name, interface);
+            scope_->NewUnresolved(factory(), name, interface, each_pos);
         Statement* body = ParseStatement(NULL, CHECK_OK);
         InitializeForEachStatement(loop, each, enumerable, body);
         Block* result =
@@ -3146,6 +3279,7 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
       bool accept_IN = name != NULL && decl_props != kHasInitializers;
       bool accept_OF = decl_props == kHasNoInitializers;
       ForEachStatement::VisitMode mode;
+      int each_pos = position();
 
       if (accept_IN && CheckInOrOf(accept_OF, &mode)) {
         // Rewrite a for-in statement of the form
@@ -3165,9 +3299,9 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
         // implementing stack allocated block scoped variables.
         Variable* temp = scope_->DeclarationScope()->NewTemporary(
             ast_value_factory()->dot_for_string());
-        VariableProxy* temp_proxy = factory()->NewVariableProxy(temp);
+        VariableProxy* temp_proxy = factory()->NewVariableProxy(temp, each_pos);
         ForEachStatement* loop =
-            factory()->NewForEachStatement(mode, labels, pos);
+            factory()->NewForEachStatement(mode, labels, stmt_pos);
         Target target(&this->target_stack_, loop);
 
         // The expression does not see the loop variable.
@@ -3176,7 +3310,8 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
         scope_ = for_scope;
         Expect(Token::RPAREN, CHECK_OK);
 
-        VariableProxy* each = scope_->NewUnresolved(factory(), name);
+        VariableProxy* each = scope_->NewUnresolved(
+            factory(), name, Interface::NewValue(), each_pos);
         Statement* body = ParseStatement(NULL, CHECK_OK);
         Block* body_block =
             factory()->NewBlock(NULL, 3, false, RelocInfo::kNoPosition);
@@ -3204,13 +3339,17 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
       Expression* expression = ParseExpression(false, CHECK_OK);
       ForEachStatement::VisitMode mode;
       bool accept_OF = expression->IsVariableProxy();
+      is_let_identifier_expression =
+        expression->IsVariableProxy() &&
+        expression->AsVariableProxy()->raw_name() ==
+            ast_value_factory()->let_string();
 
       if (CheckInOrOf(accept_OF, &mode)) {
         expression = this->CheckAndRewriteReferenceExpression(
             expression, lhs_location, "invalid_lhs_in_for", CHECK_OK);
 
         ForEachStatement* loop =
-            factory()->NewForEachStatement(mode, labels, pos);
+            factory()->NewForEachStatement(mode, labels, stmt_pos);
         Target target(&this->target_stack_, loop);
 
         Expression* enumerable = ParseExpression(true, CHECK_OK);
@@ -3226,17 +3365,23 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
         return loop;
 
       } else {
-        init = factory()->NewExpressionStatement(
-            expression, RelocInfo::kNoPosition);
+        init = factory()->NewExpressionStatement(expression, position());
       }
     }
   }
 
   // Standard 'for' loop
-  ForStatement* loop = factory()->NewForStatement(labels, pos);
+  ForStatement* loop = factory()->NewForStatement(labels, stmt_pos);
   Target target(&this->target_stack_, loop);
 
   // Parsed initializer at this point.
+  // Detect attempts at 'let' declarations in sloppy mode.
+  if (peek() == Token::IDENTIFIER && strict_mode() == SLOPPY &&
+      is_let_identifier_expression) {
+    ReportMessage("sloppy_lexical", NULL);
+    *ok = false;
+    return NULL;
+  }
   Expect(Token::SEMICOLON, CHECK_OK);
 
   // If there are let bindings, then condition and the next statement of the
@@ -3256,8 +3401,9 @@ Statement* Parser::ParseForStatement(ZoneList<const AstRawString*>* labels,
 
   Statement* next = NULL;
   if (peek() != Token::RPAREN) {
+    int next_pos = position();
     Expression* exp = ParseExpression(true, CHECK_OK);
-    next = factory()->NewExpressionStatement(exp, RelocInfo::kNoPosition);
+    next = factory()->NewExpressionStatement(exp, next_pos);
   }
   Expect(Token::RPAREN, CHECK_OK);
 
@@ -3495,12 +3641,9 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
   FunctionLiteral::IsParenthesizedFlag parenthesized = parenthesized_function_
       ? FunctionLiteral::kIsParenthesized
       : FunctionLiteral::kNotParenthesized;
-  AstProperties ast_properties;
-  BailoutReason dont_optimize_reason = kNoReason;
   // Parse function body.
   {
-    AstNodeFactory<AstConstructionVisitor> function_factory(
-        ast_value_factory());
+    AstNodeFactory function_factory(ast_value_factory());
     FunctionState function_state(&function_state_, &scope_, scope,
                                  &function_factory);
     scope_->SetScopeName(function_name);
@@ -3660,13 +3803,9 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
                                            CHECK_OK);
     }
     if (strict_mode() == STRICT) {
-      CheckOctalLiteral(scope->start_position(),
-                        scope->end_position(),
-                        CHECK_OK);
+      CheckStrictOctalLiteral(scope->start_position(), scope->end_position(),
+                              CHECK_OK);
     }
-    ast_properties = *factory()->visitor()->ast_properties();
-    dont_optimize_reason = factory()->visitor()->dont_optimize_reason();
-
     if (allow_harmony_scoping() && strict_mode() == STRICT) {
       CheckConflictingVarDeclarations(scope, CHECK_OK);
     }
@@ -3678,8 +3817,6 @@ FunctionLiteral* Parser::ParseFunctionLiteral(
       num_parameters, duplicate_parameters, function_type,
       FunctionLiteral::kIsFunction, parenthesized, kind, pos);
   function_literal->set_function_token_position(function_token_pos);
-  function_literal->set_ast_properties(&ast_properties);
-  function_literal->set_dont_optimize_reason(dont_optimize_reason);
 
   if (fni_ != NULL && should_infer_name) fni_->AddFunction(function_literal);
   return function_literal;
@@ -3690,75 +3827,66 @@ void Parser::SkipLazyFunctionBody(const AstRawString* function_name,
                                   int* materialized_literal_count,
                                   int* expected_property_count,
                                   bool* ok) {
-  // Temporary debugging code for tracking down a mystery crash which should
-  // never happen. The crash happens on the line where we log the function in
-  // the preparse data: log_->LogFunction(...). TODO(marja): remove this once
-  // done.
-  CHECK(materialized_literal_count);
-  CHECK(expected_property_count);
-  CHECK(debug_saved_compile_options_ == compile_options());
-  if (compile_options() == ScriptCompiler::kProduceParserCache) {
-    CHECK(log_);
-  }
+  if (produce_cached_parse_data()) CHECK(log_);
 
   int function_block_pos = position();
-  if (compile_options() == ScriptCompiler::kConsumeParserCache) {
+  if (consume_cached_parse_data() && !cached_parse_data_->rejected()) {
     // If we have cached data, we use it to skip parsing the function body. The
     // data contains the information we need to construct the lazy function.
     FunctionEntry entry =
         cached_parse_data_->GetFunctionEntry(function_block_pos);
-    // Check that cached data is valid.
-    CHECK(entry.is_valid());
-    // End position greater than end of stream is safe, and hard to check.
-    CHECK(entry.end_pos() > function_block_pos);
-    scanner()->SeekForward(entry.end_pos() - 1);
-
-    scope_->set_end_position(entry.end_pos());
-    Expect(Token::RBRACE, ok);
-    if (!*ok) {
-      return;
-    }
-    total_preparse_skipped_ += scope_->end_position() - function_block_pos;
-    *materialized_literal_count = entry.literal_count();
-    *expected_property_count = entry.property_count();
-    scope_->SetStrictMode(entry.strict_mode());
-  } else {
-    // With no cached data, we partially parse the function, without building an
-    // AST. This gathers the data needed to build a lazy function.
-    SingletonLogger logger;
-    PreParser::PreParseResult result =
-        ParseLazyFunctionBodyWithPreParser(&logger);
-    if (result == PreParser::kPreParseStackOverflow) {
-      // Propagate stack overflow.
-      set_stack_overflow();
-      *ok = false;
-      return;
-    }
-    if (logger.has_error()) {
-      ParserTraits::ReportMessageAt(
-          Scanner::Location(logger.start(), logger.end()),
-          logger.message(), logger.argument_opt(), logger.is_reference_error());
-      *ok = false;
-      return;
-    }
-    scope_->set_end_position(logger.end());
-    Expect(Token::RBRACE, ok);
-    if (!*ok) {
+    // Check that cached data is valid. If not, mark it as invalid (the embedder
+    // handles it). Note that end position greater than end of stream is safe,
+    // and hard to check.
+    if (entry.is_valid() && entry.end_pos() > function_block_pos) {
+      scanner()->SeekForward(entry.end_pos() - 1);
+
+      scope_->set_end_position(entry.end_pos());
+      Expect(Token::RBRACE, ok);
+      if (!*ok) {
+        return;
+      }
+      total_preparse_skipped_ += scope_->end_position() - function_block_pos;
+      *materialized_literal_count = entry.literal_count();
+      *expected_property_count = entry.property_count();
+      scope_->SetStrictMode(entry.strict_mode());
       return;
     }
-    total_preparse_skipped_ += scope_->end_position() - function_block_pos;
-    *materialized_literal_count = logger.literals();
-    *expected_property_count = logger.properties();
-    scope_->SetStrictMode(logger.strict_mode());
-    if (compile_options() == ScriptCompiler::kProduceParserCache) {
-      DCHECK(log_);
-      // Position right after terminal '}'.
-      int body_end = scanner()->location().end_pos;
-      log_->LogFunction(function_block_pos, body_end,
-                        *materialized_literal_count,
-                        *expected_property_count,
-                        scope_->strict_mode());
-    }
+    cached_parse_data_->Reject();
+  }
+  // With no cached data, we partially parse the function, without building an
+  // AST. This gathers the data needed to build a lazy function.
+  SingletonLogger logger;
+  PreParser::PreParseResult result =
+      ParseLazyFunctionBodyWithPreParser(&logger);
+  if (result == PreParser::kPreParseStackOverflow) {
+    // Propagate stack overflow.
+    set_stack_overflow();
+    *ok = false;
+    return;
+  }
+  if (logger.has_error()) {
+    ParserTraits::ReportMessageAt(
+        Scanner::Location(logger.start(), logger.end()), logger.message(),
+        logger.argument_opt(), logger.is_reference_error());
+    *ok = false;
+    return;
+  }
+  scope_->set_end_position(logger.end());
+  Expect(Token::RBRACE, ok);
+  if (!*ok) {
+    return;
+  }
+  total_preparse_skipped_ += scope_->end_position() - function_block_pos;
+  *materialized_literal_count = logger.literals();
+  *expected_property_count = logger.properties();
+  scope_->SetStrictMode(logger.strict_mode());
+  if (produce_cached_parse_data()) {
+    DCHECK(log_);
+    // Position right after terminal '}'.
+    int body_end = scanner()->location().end_pos;
+    log_->LogFunction(function_block_pos, body_end, *materialized_literal_count,
+                      *expected_property_count, scope_->strict_mode());
   }
 }
 
@@ -3833,16 +3961,20 @@ PreParser::PreParseResult Parser::ParseLazyFunctionBodyWithPreParser(
 
   if (reusable_preparser_ == NULL) {
     reusable_preparser_ = new PreParser(&scanner_, NULL, stack_limit_);
-    reusable_preparser_->set_allow_harmony_scoping(allow_harmony_scoping());
-    reusable_preparser_->set_allow_modules(allow_modules());
-    reusable_preparser_->set_allow_natives_syntax(allow_natives_syntax());
     reusable_preparser_->set_allow_lazy(true);
-    reusable_preparser_->set_allow_arrow_functions(allow_arrow_functions());
+    reusable_preparser_->set_allow_natives(allow_natives());
+    reusable_preparser_->set_allow_harmony_scoping(allow_harmony_scoping());
+    reusable_preparser_->set_allow_harmony_modules(allow_harmony_modules());
+    reusable_preparser_->set_allow_harmony_arrow_functions(
+        allow_harmony_arrow_functions());
     reusable_preparser_->set_allow_harmony_numeric_literals(
         allow_harmony_numeric_literals());
-    reusable_preparser_->set_allow_classes(allow_classes());
+    reusable_preparser_->set_allow_harmony_classes(allow_harmony_classes());
     reusable_preparser_->set_allow_harmony_object_literals(
         allow_harmony_object_literals());
+    reusable_preparser_->set_allow_harmony_templates(allow_harmony_templates());
+    reusable_preparser_->set_allow_harmony_sloppy(allow_harmony_sloppy());
+    reusable_preparser_->set_allow_harmony_unicode(allow_harmony_unicode());
   }
   PreParser::PreParseResult result =
       reusable_preparser_->PreParseLazyFunction(strict_mode(),
@@ -3855,6 +3987,90 @@ PreParser::PreParseResult Parser::ParseLazyFunctionBodyWithPreParser(
 }
 
 
+ClassLiteral* Parser::ParseClassLiteral(const AstRawString* name,
+                                        Scanner::Location class_name_location,
+                                        bool name_is_strict_reserved, int pos,
+                                        bool* ok) {
+  // All parts of a ClassDeclaration and ClassExpression are strict code.
+  if (name_is_strict_reserved) {
+    ReportMessageAt(class_name_location, "unexpected_strict_reserved");
+    *ok = false;
+    return NULL;
+  }
+  if (IsEvalOrArguments(name)) {
+    ReportMessageAt(class_name_location, "strict_eval_arguments");
+    *ok = false;
+    return NULL;
+  }
+
+  Scope* block_scope = NewScope(scope_, BLOCK_SCOPE);
+  BlockState block_state(&scope_, block_scope);
+  scope_->SetStrictMode(STRICT);
+  scope_->SetScopeName(name);
+
+  VariableProxy* proxy = NULL;
+  if (name != NULL) {
+    proxy = NewUnresolved(name, CONST, Interface::NewConst());
+    Declaration* declaration =
+        factory()->NewVariableDeclaration(proxy, CONST, block_scope, pos);
+    Declare(declaration, true, CHECK_OK);
+  }
+
+  Expression* extends = NULL;
+  if (Check(Token::EXTENDS)) {
+    block_scope->set_start_position(scanner()->location().end_pos);
+    extends = ParseLeftHandSideExpression(CHECK_OK);
+  } else {
+    block_scope->set_start_position(scanner()->location().end_pos);
+  }
+
+  ZoneList<ObjectLiteral::Property*>* properties = NewPropertyList(4, zone());
+  Expression* constructor = NULL;
+  bool has_seen_constructor = false;
+
+  Expect(Token::LBRACE, CHECK_OK);
+  while (peek() != Token::RBRACE) {
+    if (Check(Token::SEMICOLON)) continue;
+    if (fni_ != NULL) fni_->Enter();
+    const bool in_class = true;
+    const bool is_static = false;
+    ObjectLiteral::Property* property = ParsePropertyDefinition(
+        NULL, in_class, is_static, &has_seen_constructor, CHECK_OK);
+
+    if (has_seen_constructor && constructor == NULL) {
+      constructor = GetPropertyValue(property);
+    } else {
+      properties->Add(property, zone());
+    }
+
+    if (fni_ != NULL) {
+      fni_->Infer();
+      fni_->Leave();
+    }
+  }
+
+  Expect(Token::RBRACE, CHECK_OK);
+  int end_pos = scanner()->location().end_pos;
+
+  if (constructor == NULL) {
+    constructor =
+        DefaultConstructor(extends != NULL, block_scope, pos, end_pos);
+  }
+
+  block_scope->set_end_position(end_pos);
+  block_scope = block_scope->FinalizeBlockScope();
+
+  if (name != NULL) {
+    DCHECK_NOT_NULL(proxy);
+    DCHECK_NOT_NULL(block_scope);
+    proxy->var()->set_initializer_position(end_pos);
+  }
+
+  return factory()->NewClassLiteral(name, block_scope, proxy, extends,
+                                    constructor, properties, pos, end_pos);
+}
+
+
 Expression* Parser::ParseV8Intrinsic(bool* ok) {
   // CallRuntime ::
   //   '%' Identifier Arguments
@@ -4106,6 +4322,9 @@ void RegExpParser::Advance() {
     }
   } else {
     current_ = kEndMarker;
+    // Advance so that position() points to 1-after-the-last-character. This is
+    // important so that Reset() to this position works correctly.
+    next_pos_ = in()->length() + 1;
     has_more_ = false;
   }
 }
@@ -4893,7 +5112,7 @@ bool Parser::Parse() {
   DCHECK(info()->function() == NULL);
   FunctionLiteral* result = NULL;
   pre_parse_timer_ = isolate()->counters()->pre_parse();
-  if (FLAG_trace_parse || allow_natives_syntax() || extension_ != NULL) {
+  if (FLAG_trace_parse || allow_natives() || extension_ != NULL) {
     // If intrinsics are allowed, the Parser cannot operate independent of the
     // V8 heap because of Runtime. Tell the string table to internalize strings
     // and values right after they're created.
@@ -4925,10 +5144,7 @@ void Parser::ParseOnBackground() {
   fni_ = new (zone()) FuncNameInferrer(ast_value_factory(), zone());
 
   CompleteParserRecorder recorder;
-  debug_saved_compile_options_ = compile_options();
-  if (compile_options() == ScriptCompiler::kProduceParserCache) {
-    log_ = &recorder;
-  }
+  if (produce_cached_parse_data()) log_ = &recorder;
 
   DCHECK(info()->source_stream() != NULL);
   ExternalStreamingStream stream(info()->source_stream(),
@@ -4956,9 +5172,131 @@ void Parser::ParseOnBackground() {
   // We cannot internalize on a background thread; a foreground task will take
   // care of calling Parser::Internalize just before compilation.
 
-  if (compile_options() == ScriptCompiler::kProduceParserCache) {
+  if (produce_cached_parse_data()) {
     if (result != NULL) *info_->cached_data() = recorder.GetScriptData();
     log_ = NULL;
   }
 }
+
+
+ParserTraits::TemplateLiteralState Parser::OpenTemplateLiteral(int pos) {
+  return new (zone()) ParserTraits::TemplateLiteral(zone(), pos);
+}
+
+
+void Parser::AddTemplateSpan(TemplateLiteralState* state, bool tail) {
+  int pos = scanner()->location().beg_pos;
+  int end = scanner()->location().end_pos - (tail ? 1 : 2);
+  const AstRawString* tv = scanner()->CurrentSymbol(ast_value_factory());
+  const AstRawString* trv = scanner()->CurrentRawSymbol(ast_value_factory());
+  Literal* cooked = factory()->NewStringLiteral(tv, pos);
+  Literal* raw = factory()->NewStringLiteral(trv, pos);
+  (*state)->AddTemplateSpan(cooked, raw, end, zone());
+}
+
+
+void Parser::AddTemplateExpression(TemplateLiteralState* state,
+                                   Expression* expression) {
+  (*state)->AddExpression(expression, zone());
+}
+
+
+Expression* Parser::CloseTemplateLiteral(TemplateLiteralState* state, int start,
+                                         Expression* tag) {
+  TemplateLiteral* lit = *state;
+  int pos = lit->position();
+  const ZoneList<Expression*>* cooked_strings = lit->cooked();
+  const ZoneList<Expression*>* raw_strings = lit->raw();
+  const ZoneList<Expression*>* expressions = lit->expressions();
+  DCHECK_EQ(cooked_strings->length(), raw_strings->length());
+  DCHECK_EQ(cooked_strings->length(), expressions->length() + 1);
+
+  if (!tag) {
+    // Build tree of BinaryOps to simplify code-generation
+    Expression* expr = NULL;
+
+    if (expressions->length() == 0) {
+      // Simple case: treat as string literal
+      expr = cooked_strings->at(0);
+    } else {
+      int i;
+      Expression* cooked_str = cooked_strings->at(0);
+      expr = factory()->NewBinaryOperation(
+          Token::ADD, cooked_str, expressions->at(0), cooked_str->position());
+      for (i = 1; i < expressions->length(); ++i) {
+        cooked_str = cooked_strings->at(i);
+        expr = factory()->NewBinaryOperation(
+            Token::ADD, expr, factory()->NewBinaryOperation(
+                                  Token::ADD, cooked_str, expressions->at(i),
+                                  cooked_str->position()),
+            cooked_str->position());
+      }
+      cooked_str = cooked_strings->at(i);
+      expr = factory()->NewBinaryOperation(Token::ADD, expr, cooked_str,
+                                           cooked_str->position());
+    }
+    return expr;
+  } else {
+    uint32_t hash = ComputeTemplateLiteralHash(lit);
+
+    int cooked_idx = function_state_->NextMaterializedLiteralIndex();
+    int raw_idx = function_state_->NextMaterializedLiteralIndex();
+
+    // GetTemplateCallSite
+    ZoneList<Expression*>* args = new (zone()) ZoneList<Expression*>(4, zone());
+    args->Add(factory()->NewArrayLiteral(
+                  const_cast<ZoneList<Expression*>*>(cooked_strings),
+                  cooked_idx, pos),
+              zone());
+    args->Add(
+        factory()->NewArrayLiteral(
+            const_cast<ZoneList<Expression*>*>(raw_strings), raw_idx, pos),
+        zone());
+
+    // Ensure hash is suitable as a Smi value
+    Smi* hash_obj = Smi::cast(Internals::IntToSmi(static_cast<int>(hash)));
+    args->Add(factory()->NewSmiLiteral(hash_obj->value(), pos), zone());
+
+    this->CheckPossibleEvalCall(tag, scope_);
+    Expression* call_site = factory()->NewCallRuntime(
+        ast_value_factory()->get_template_callsite_string(), NULL, args, start);
+
+    // Call TagFn
+    ZoneList<Expression*>* call_args =
+        new (zone()) ZoneList<Expression*>(expressions->length() + 1, zone());
+    call_args->Add(call_site, zone());
+    call_args->AddAll(*expressions, zone());
+    return factory()->NewCall(tag, call_args, pos);
+  }
+}
+
+
+uint32_t Parser::ComputeTemplateLiteralHash(const TemplateLiteral* lit) {
+  const ZoneList<Expression*>* raw_strings = lit->raw();
+  int total = raw_strings->length();
+  DCHECK(total);
+
+  uint32_t running_hash = 0;
+
+  for (int index = 0; index < total; ++index) {
+    if (index) {
+      running_hash = StringHasher::ComputeRunningHashOneByte(
+          running_hash, "${}", 3);
+    }
+
+    const AstRawString* raw_string =
+        raw_strings->at(index)->AsLiteral()->raw_value()->AsString();
+    if (raw_string->is_one_byte()) {
+      const char* data = reinterpret_cast<const char*>(raw_string->raw_data());
+      running_hash = StringHasher::ComputeRunningHashOneByte(
+          running_hash, data, raw_string->length());
+    } else {
+      const uc16* data = reinterpret_cast<const uc16*>(raw_string->raw_data());
+      running_hash = StringHasher::ComputeRunningHash(running_hash, data,
+                                                      raw_string->length());
+    }
+  }
+
+  return running_hash;
+}
 } }  // namespace v8::internal
index db9071a..02dfe15 100644 (file)
@@ -23,8 +23,6 @@ class ParserLog;
 class PositionStack;
 class Target;
 
-template <typename T> class ZoneListWrapper;
-
 
 class FunctionEntry BASE_EMBEDDED {
  public:
@@ -62,10 +60,14 @@ class FunctionEntry BASE_EMBEDDED {
 // Wrapper around ScriptData to provide parser-specific functionality.
 class ParseData {
  public:
-  explicit ParseData(ScriptData* script_data) : script_data_(script_data) {
-    CHECK(IsAligned(script_data->length(), sizeof(unsigned)));
-    CHECK(IsSane());
+  static ParseData* FromCachedData(ScriptData* cached_data) {
+    ParseData* pd = new ParseData(cached_data);
+    if (pd->IsSane()) return pd;
+    cached_data->Reject();
+    delete pd;
+    return NULL;
   }
+
   void Initialize();
   FunctionEntry GetFunctionEntry(int start);
   int FunctionCount();
@@ -76,7 +78,13 @@ class ParseData {
     return reinterpret_cast<unsigned*>(const_cast<byte*>(script_data_->data()));
   }
 
+  void Reject() { script_data_->Reject(); }
+
+  bool rejected() const { return script_data_->rejected(); }
+
  private:
+  explicit ParseData(ScriptData* script_data) : script_data_(script_data) {}
+
   bool IsSane();
   unsigned Magic();
   unsigned Version();
@@ -373,7 +381,7 @@ class ParserTraits {
     typedef ZoneList<v8::internal::Statement*>* StatementList;
 
     // For constructing objects returned by the traversing functions.
-    typedef AstNodeFactory<AstConstructionVisitor> Factory;
+    typedef AstNodeFactory Factory;
   };
 
   explicit ParserTraits(Parser* parser) : parser_(parser) {}
@@ -427,7 +435,7 @@ class ParserTraits {
   static void CheckFunctionLiteralInsideTopLevelObjectLiteral(
       Scope* scope, ObjectLiteralProperty* property, bool* has_function) {
     Expression* value = property->value();
-    if (scope->DeclarationScope()->is_global_scope() &&
+    if (scope->DeclarationScope()->is_script_scope() &&
         value->AsFunctionLiteral() != NULL) {
       *has_function = true;
       value->AsFunctionLiteral()->set_pretenure();
@@ -451,9 +459,9 @@ class ParserTraits {
   // Returns true if we have a binary expression between two numeric
   // literals. In that case, *x will be changed to an expression which is the
   // computed value.
-  bool ShortcutNumericLiteralBinaryExpression(
-      Expression** x, Expression* y, Token::Value op, int pos,
-      AstNodeFactory<AstConstructionVisitor>* factory);
+  bool ShortcutNumericLiteralBinaryExpression(Expression** x, Expression* y,
+                                              Token::Value op, int pos,
+                                              AstNodeFactory* factory);
 
   // Rewrites the following types of unary expressions:
   // not <literal> -> true / false
@@ -466,9 +474,8 @@ class ParserTraits {
   // + foo -> foo * 1
   // - foo -> foo * (-1)
   // ~ foo -> foo ^(~0)
-  Expression* BuildUnaryExpression(
-      Expression* expression, Token::Value op, int pos,
-      AstNodeFactory<AstConstructionVisitor>* factory);
+  Expression* BuildUnaryExpression(Expression* expression, Token::Value op,
+                                   int pos, AstNodeFactory* factory);
 
   // Generate AST node that throws a ReferenceError with the given type.
   Expression* NewThrowReferenceError(const char* type, int pos);
@@ -528,37 +535,26 @@ class ParserTraits {
   V8_INLINE const AstRawString* EmptyIdentifierString();
 
   // Odd-ball literal creators.
-  Literal* GetLiteralTheHole(int position,
-                             AstNodeFactory<AstConstructionVisitor>* factory);
+  Literal* GetLiteralTheHole(int position, AstNodeFactory* factory);
 
   // Producing data during the recursive descent.
   const AstRawString* GetSymbol(Scanner* scanner);
   const AstRawString* GetNextSymbol(Scanner* scanner);
   const AstRawString* GetNumberAsSymbol(Scanner* scanner);
 
-  Expression* ThisExpression(Scope* scope,
-                             AstNodeFactory<AstConstructionVisitor>* factory,
+  Expression* ThisExpression(Scope* scope, AstNodeFactory* factory,
                              int pos = RelocInfo::kNoPosition);
-  Expression* SuperReference(Scope* scope,
-                             AstNodeFactory<AstConstructionVisitor>* factory,
+  Expression* SuperReference(Scope* scope, AstNodeFactory* factory,
                              int pos = RelocInfo::kNoPosition);
-  Expression* ClassExpression(const AstRawString* name, Expression* extends,
-                              Expression* constructor,
-                              ZoneList<ObjectLiteral::Property*>* properties,
-                              int start_position, int end_position,
-                              AstNodeFactory<AstConstructionVisitor>* factory);
-
-  Literal* ExpressionFromLiteral(
-      Token::Value token, int pos, Scanner* scanner,
-      AstNodeFactory<AstConstructionVisitor>* factory);
-  Expression* ExpressionFromIdentifier(
-      const AstRawString* name, int pos, Scope* scope,
-      AstNodeFactory<AstConstructionVisitor>* factory);
-  Expression* ExpressionFromString(
-      int pos, Scanner* scanner,
-      AstNodeFactory<AstConstructionVisitor>* factory);
-  Expression* GetIterator(Expression* iterable,
-                          AstNodeFactory<AstConstructionVisitor>* factory);
+  Expression* DefaultConstructor(bool call_super, Scope* scope, int pos,
+                                 int end_pos);
+  Literal* ExpressionFromLiteral(Token::Value token, int pos, Scanner* scanner,
+                                 AstNodeFactory* factory);
+  Expression* ExpressionFromIdentifier(const AstRawString* name, int pos,
+                                       Scope* scope, AstNodeFactory* factory);
+  Expression* ExpressionFromString(int pos, Scanner* scanner,
+                                   AstNodeFactory* factory);
+  Expression* GetIterator(Expression* iterable, AstNodeFactory* factory);
   ZoneList<v8::internal::Expression*>* NewExpressionList(int size, Zone* zone) {
     return new(zone) ZoneList<v8::internal::Expression*>(size, zone);
   }
@@ -589,9 +585,57 @@ class ParserTraits {
   V8_INLINE ZoneList<Statement*>* ParseEagerFunctionBody(
       const AstRawString* name, int pos, Variable* fvar,
       Token::Value fvar_init_op, bool is_generator, bool* ok);
+
+  ClassLiteral* ParseClassLiteral(const AstRawString* name,
+                                  Scanner::Location class_name_location,
+                                  bool name_is_strict_reserved, int pos,
+                                  bool* ok);
+
   V8_INLINE void CheckConflictingVarDeclarations(v8::internal::Scope* scope,
                                                  bool* ok);
 
+  class TemplateLiteral : public ZoneObject {
+   public:
+    TemplateLiteral(Zone* zone, int pos)
+        : cooked_(8, zone), raw_(8, zone), expressions_(8, zone), pos_(pos) {}
+
+    const ZoneList<Expression*>* cooked() const { return &cooked_; }
+    const ZoneList<Expression*>* raw() const { return &raw_; }
+    const ZoneList<Expression*>* expressions() const { return &expressions_; }
+    int position() const { return pos_; }
+
+    void AddTemplateSpan(Literal* cooked, Literal* raw, int end, Zone* zone) {
+      DCHECK_NOT_NULL(cooked);
+      DCHECK_NOT_NULL(raw);
+      cooked_.Add(cooked, zone);
+      raw_.Add(raw, zone);
+    }
+
+    void AddExpression(Expression* expression, Zone* zone) {
+      DCHECK_NOT_NULL(expression);
+      expressions_.Add(expression, zone);
+    }
+
+   private:
+    ZoneList<Expression*> cooked_;
+    ZoneList<Expression*> raw_;
+    ZoneList<Expression*> expressions_;
+    int pos_;
+  };
+
+  typedef TemplateLiteral* TemplateLiteralState;
+
+  V8_INLINE TemplateLiteralState OpenTemplateLiteral(int pos);
+  V8_INLINE void AddTemplateSpan(TemplateLiteralState* state, bool tail);
+  V8_INLINE void AddTemplateExpression(TemplateLiteralState* state,
+                                       Expression* expression);
+  V8_INLINE Expression* CloseTemplateLiteral(TemplateLiteralState* state,
+                                             int start, Expression* tag);
+  V8_INLINE Expression* NoTemplateTag() { return NULL; }
+  V8_INLINE static bool IsTaggedTemplate(const Expression* tag) {
+    return tag != NULL;
+  }
+
  private:
   Parser* parser_;
 };
@@ -687,6 +731,13 @@ class Parser : public ParserBase<ParserTraits> {
   ScriptCompiler::CompileOptions compile_options() const {
     return info_->compile_options();
   }
+  bool consume_cached_parse_data() const {
+    return compile_options() == ScriptCompiler::kConsumeParserCache &&
+           cached_parse_data_ != NULL;
+  }
+  bool produce_cached_parse_data() const {
+    return compile_options() == ScriptCompiler::kProduceParserCache;
+  }
   Scope* DeclarationScope(VariableMode mode) {
     return IsLexicalVariableMode(mode)
         ? scope_ : scope_->DeclarationScope();
@@ -769,6 +820,12 @@ class Parser : public ParserBase<ParserTraits> {
       int function_token_position, FunctionLiteral::FunctionType type,
       FunctionLiteral::ArityRestriction arity_restriction, bool* ok);
 
+
+  ClassLiteral* ParseClassLiteral(const AstRawString* name,
+                                  Scanner::Location class_name_location,
+                                  bool name_is_strict_reserved, int pos,
+                                  bool* ok);
+
   // Magical syntax support.
   Expression* ParseV8Intrinsic(bool* ok);
 
@@ -804,6 +861,9 @@ class Parser : public ParserBase<ParserTraits> {
 
   Scope* NewScope(Scope* parent, ScopeType type);
 
+  FunctionLiteral* DefaultConstructor(bool call_super, Scope* scope, int pos,
+                                      int end_pos);
+
   // Skip over a lazy function, either using cached data if we have it, or
   // by parsing the function with PreParser. Consumes the ending }.
   void SkipLazyFunctionBody(const AstRawString* function_name,
@@ -823,6 +883,14 @@ class Parser : public ParserBase<ParserTraits> {
 
   void ThrowPendingError();
 
+  TemplateLiteralState OpenTemplateLiteral(int pos);
+  void AddTemplateSpan(TemplateLiteralState* state, bool tail);
+  void AddTemplateExpression(TemplateLiteralState* state,
+                             Expression* expression);
+  Expression* CloseTemplateLiteral(TemplateLiteralState* state, int start,
+                                   Expression* tag);
+  uint32_t ComputeTemplateLiteralHash(const TemplateLiteral* lit);
+
   Scanner scanner_;
   PreParser* reusable_preparser_;
   Scope* original_scope_;  // for ES5 function declarations in sloppy eval
@@ -844,10 +912,6 @@ class Parser : public ParserBase<ParserTraits> {
   int use_counts_[v8::Isolate::kUseCounterFeatureCount];
   int total_preparse_skipped_;
   HistogramTimer* pre_parse_timer_;
-
-  // Temporary; for debugging. See Parser::SkipLazyFunctionBody. TODO(marja):
-  // remove this once done.
-  ScriptCompiler::CompileOptions debug_saved_compile_options_;
 };
 
 
@@ -922,6 +986,27 @@ class CompileTimeValue: public AllStatic {
   DISALLOW_IMPLICIT_CONSTRUCTORS(CompileTimeValue);
 };
 
+
+ParserTraits::TemplateLiteralState ParserTraits::OpenTemplateLiteral(int pos) {
+  return parser_->OpenTemplateLiteral(pos);
+}
+
+
+void ParserTraits::AddTemplateSpan(TemplateLiteralState* state, bool tail) {
+  parser_->AddTemplateSpan(state, tail);
+}
+
+
+void ParserTraits::AddTemplateExpression(TemplateLiteralState* state,
+                                         Expression* expression) {
+  parser_->AddTemplateExpression(state, expression);
+}
+
+
+Expression* ParserTraits::CloseTemplateLiteral(TemplateLiteralState* state,
+                                               int start, Expression* tag) {
+  return parser_->CloseTemplateLiteral(state, start, tag);
+}
 } }  // namespace v8::internal
 
 #endif  // V8_PARSER_H_
diff --git a/deps/v8/src/ppc/assembler-ppc-inl.h b/deps/v8/src/ppc/assembler-ppc-inl.h
new file mode 100644 (file)
index 0000000..6779ee3
--- /dev/null
@@ -0,0 +1,593 @@
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the
+// distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+// OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been modified
+// significantly by Google Inc.
+// Copyright 2014 the V8 project authors. All rights reserved.
+
+#ifndef V8_PPC_ASSEMBLER_PPC_INL_H_
+#define V8_PPC_ASSEMBLER_PPC_INL_H_
+
+#include "src/ppc/assembler-ppc.h"
+
+#include "src/assembler.h"
+#include "src/debug.h"
+
+
+namespace v8 {
+namespace internal {
+
+
+bool CpuFeatures::SupportsCrankshaft() { return true; }
+
+
+void RelocInfo::apply(intptr_t delta, ICacheFlushMode icache_flush_mode) {
+#if ABI_USES_FUNCTION_DESCRIPTORS || V8_OOL_CONSTANT_POOL
+  if (RelocInfo::IsInternalReference(rmode_)) {
+    // absolute code pointer inside code object moves with the code object.
+    Assembler::RelocateInternalReference(pc_, delta, 0, icache_flush_mode);
+  }
+#endif
+  // We do not use pc relative addressing on PPC, so there is
+  // nothing else to do.
+}
+
+
+Address RelocInfo::target_address() {
+  DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
+  return Assembler::target_address_at(pc_, host_);
+}
+
+
+Address RelocInfo::target_address_address() {
+  DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) ||
+         rmode_ == EMBEDDED_OBJECT || rmode_ == EXTERNAL_REFERENCE);
+
+#if V8_OOL_CONSTANT_POOL
+  if (Assembler::IsConstantPoolLoadStart(pc_)) {
+    // We return the PC for ool constant pool since this function is used by the
+    // serializerer and expects the address to reside within the code object.
+    return reinterpret_cast<Address>(pc_);
+  }
+#endif
+
+  // Read the address of the word containing the target_address in an
+  // instruction stream.
+  // The only architecture-independent user of this function is the serializer.
+  // The serializer uses it to find out how many raw bytes of instruction to
+  // output before the next target.
+  // For an instruction like LIS/ORI where the target bits are mixed into the
+  // instruction bits, the size of the target will be zero, indicating that the
+  // serializer should not step forward in memory after a target is resolved
+  // and written.
+  return reinterpret_cast<Address>(pc_);
+}
+
+
+Address RelocInfo::constant_pool_entry_address() {
+#if V8_OOL_CONSTANT_POOL
+  return Assembler::target_constant_pool_address_at(pc_,
+                                                    host_->constant_pool());
+#else
+  UNREACHABLE();
+  return NULL;
+#endif
+}
+
+
+int RelocInfo::target_address_size() { return Assembler::kSpecialTargetSize; }
+
+
+void RelocInfo::set_target_address(Address target,
+                                   WriteBarrierMode write_barrier_mode,
+                                   ICacheFlushMode icache_flush_mode) {
+  DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_));
+  Assembler::set_target_address_at(pc_, host_, target, icache_flush_mode);
+  if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL &&
+      IsCodeTarget(rmode_)) {
+    Object* target_code = Code::GetCodeFromTargetAddress(target);
+    host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+        host(), this, HeapObject::cast(target_code));
+  }
+}
+
+
+Address Assembler::break_address_from_return_address(Address pc) {
+  return target_address_from_return_address(pc);
+}
+
+
+Address Assembler::target_address_from_return_address(Address pc) {
+// Returns the address of the call target from the return address that will
+// be returned to after a call.
+// Call sequence is :
+//  mov   ip, @ call address
+//  mtlr  ip
+//  blrl
+//                      @ return address
+#if V8_OOL_CONSTANT_POOL
+  if (IsConstantPoolLoadEnd(pc - 3 * kInstrSize)) {
+    return pc - (kMovInstructionsConstantPool + 2) * kInstrSize;
+  }
+#endif
+  return pc - (kMovInstructionsNoConstantPool + 2) * kInstrSize;
+}
+
+
+Address Assembler::return_address_from_call_start(Address pc) {
+#if V8_OOL_CONSTANT_POOL
+  Address load_address = pc + (kMovInstructionsConstantPool - 1) * kInstrSize;
+  if (IsConstantPoolLoadEnd(load_address))
+    return pc + (kMovInstructionsConstantPool + 2) * kInstrSize;
+#endif
+  return pc + (kMovInstructionsNoConstantPool + 2) * kInstrSize;
+}
+
+
+Object* RelocInfo::target_object() {
+  DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+  return reinterpret_cast<Object*>(Assembler::target_address_at(pc_, host_));
+}
+
+
+Handle<Object> RelocInfo::target_object_handle(Assembler* origin) {
+  DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+  return Handle<Object>(
+      reinterpret_cast<Object**>(Assembler::target_address_at(pc_, host_)));
+}
+
+
+void RelocInfo::set_target_object(Object* target,
+                                  WriteBarrierMode write_barrier_mode,
+                                  ICacheFlushMode icache_flush_mode) {
+  DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
+  Assembler::set_target_address_at(
+      pc_, host_, reinterpret_cast<Address>(target), icache_flush_mode);
+  if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL &&
+      target->IsHeapObject()) {
+    host()->GetHeap()->incremental_marking()->RecordWrite(
+        host(), &Memory::Object_at(pc_), HeapObject::cast(target));
+  }
+}
+
+
+Address RelocInfo::target_reference() {
+  DCHECK(rmode_ == EXTERNAL_REFERENCE);
+  return Assembler::target_address_at(pc_, host_);
+}
+
+
+Address RelocInfo::target_runtime_entry(Assembler* origin) {
+  DCHECK(IsRuntimeEntry(rmode_));
+  return target_address();
+}
+
+
+void RelocInfo::set_target_runtime_entry(Address target,
+                                         WriteBarrierMode write_barrier_mode,
+                                         ICacheFlushMode icache_flush_mode) {
+  DCHECK(IsRuntimeEntry(rmode_));
+  if (target_address() != target)
+    set_target_address(target, write_barrier_mode, icache_flush_mode);
+}
+
+
+Handle<Cell> RelocInfo::target_cell_handle() {
+  DCHECK(rmode_ == RelocInfo::CELL);
+  Address address = Memory::Address_at(pc_);
+  return Handle<Cell>(reinterpret_cast<Cell**>(address));
+}
+
+
+Cell* RelocInfo::target_cell() {
+  DCHECK(rmode_ == RelocInfo::CELL);
+  return Cell::FromValueAddress(Memory::Address_at(pc_));
+}
+
+
+void RelocInfo::set_target_cell(Cell* cell, WriteBarrierMode write_barrier_mode,
+                                ICacheFlushMode icache_flush_mode) {
+  DCHECK(rmode_ == RelocInfo::CELL);
+  Address address = cell->address() + Cell::kValueOffset;
+  Memory::Address_at(pc_) = address;
+  if (write_barrier_mode == UPDATE_WRITE_BARRIER && host() != NULL) {
+    // TODO(1550) We are passing NULL as a slot because cell can never be on
+    // evacuation candidate.
+    host()->GetHeap()->incremental_marking()->RecordWrite(host(), NULL, cell);
+  }
+}
+
+
+#if V8_OOL_CONSTANT_POOL
+static const int kNoCodeAgeInstructions = 7;
+#else
+static const int kNoCodeAgeInstructions = 6;
+#endif
+static const int kCodeAgingInstructions =
+    Assembler::kMovInstructionsNoConstantPool + 3;
+static const int kNoCodeAgeSequenceInstructions =
+    ((kNoCodeAgeInstructions >= kCodeAgingInstructions)
+         ? kNoCodeAgeInstructions
+         : kCodeAgingInstructions);
+static const int kNoCodeAgeSequenceNops =
+    (kNoCodeAgeSequenceInstructions - kNoCodeAgeInstructions);
+static const int kCodeAgingSequenceNops =
+    (kNoCodeAgeSequenceInstructions - kCodeAgingInstructions);
+static const int kCodeAgingTargetDelta = 1 * Assembler::kInstrSize;
+static const int kNoCodeAgeSequenceLength =
+    (kNoCodeAgeSequenceInstructions * Assembler::kInstrSize);
+
+
+Handle<Object> RelocInfo::code_age_stub_handle(Assembler* origin) {
+  UNREACHABLE();  // This should never be reached on PPC.
+  return Handle<Object>();
+}
+
+
+Code* RelocInfo::code_age_stub() {
+  DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+  return Code::GetCodeFromTargetAddress(
+      Assembler::target_address_at(pc_ + kCodeAgingTargetDelta, host_));
+}
+
+
+void RelocInfo::set_code_age_stub(Code* stub,
+                                  ICacheFlushMode icache_flush_mode) {
+  DCHECK(rmode_ == RelocInfo::CODE_AGE_SEQUENCE);
+  Assembler::set_target_address_at(pc_ + kCodeAgingTargetDelta, host_,
+                                   stub->instruction_start(),
+                                   icache_flush_mode);
+}
+
+
+Address RelocInfo::call_address() {
+  DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
+         (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
+  // The pc_ offset of 0 assumes patched return sequence per
+  // BreakLocationIterator::SetDebugBreakAtReturn(), or debug break
+  // slot per BreakLocationIterator::SetDebugBreakAtSlot().
+  return Assembler::target_address_at(pc_, host_);
+}
+
+
+void RelocInfo::set_call_address(Address target) {
+  DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
+         (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
+  Assembler::set_target_address_at(pc_, host_, target);
+  if (host() != NULL) {
+    Object* target_code = Code::GetCodeFromTargetAddress(target);
+    host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
+        host(), this, HeapObject::cast(target_code));
+  }
+}
+
+
+Object* RelocInfo::call_object() { return *call_object_address(); }
+
+
+void RelocInfo::set_call_object(Object* target) {
+  *call_object_address() = target;
+}
+
+
+Object** RelocInfo::call_object_address() {
+  DCHECK((IsJSReturn(rmode()) && IsPatchedReturnSequence()) ||
+         (IsDebugBreakSlot(rmode()) && IsPatchedDebugBreakSlotSequence()));
+  return reinterpret_cast<Object**>(pc_ + 2 * Assembler::kInstrSize);
+}
+
+
+void RelocInfo::WipeOut() {
+  DCHECK(IsEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
+         IsRuntimeEntry(rmode_) || IsExternalReference(rmode_));
+  Assembler::set_target_address_at(pc_, host_, NULL);
+}
+
+
+bool RelocInfo::IsPatchedReturnSequence() {
+  //
+  // The patched return sequence is defined by
+  // BreakLocationIterator::SetDebugBreakAtReturn()
+  // FIXED_SEQUENCE
+
+  Instr instr0 = Assembler::instr_at(pc_);
+  Instr instr1 = Assembler::instr_at(pc_ + 1 * Assembler::kInstrSize);
+#if V8_TARGET_ARCH_PPC64
+  Instr instr3 = Assembler::instr_at(pc_ + (3 * Assembler::kInstrSize));
+  Instr instr4 = Assembler::instr_at(pc_ + (4 * Assembler::kInstrSize));
+  Instr binstr = Assembler::instr_at(pc_ + (7 * Assembler::kInstrSize));
+#else
+  Instr binstr = Assembler::instr_at(pc_ + 4 * Assembler::kInstrSize);
+#endif
+  bool patched_return =
+      ((instr0 & kOpcodeMask) == ADDIS && (instr1 & kOpcodeMask) == ORI &&
+#if V8_TARGET_ARCH_PPC64
+       (instr3 & kOpcodeMask) == ORIS && (instr4 & kOpcodeMask) == ORI &&
+#endif
+       (binstr == 0x7d821008));  // twge r2, r2
+
+  // printf("IsPatchedReturnSequence: %d\n", patched_return);
+  return patched_return;
+}
+
+
+bool RelocInfo::IsPatchedDebugBreakSlotSequence() {
+  Instr current_instr = Assembler::instr_at(pc_);
+  return !Assembler::IsNop(current_instr, Assembler::DEBUG_BREAK_NOP);
+}
+
+
+void RelocInfo::Visit(Isolate* isolate, ObjectVisitor* visitor) {
+  RelocInfo::Mode mode = rmode();
+  if (mode == RelocInfo::EMBEDDED_OBJECT) {
+    visitor->VisitEmbeddedPointer(this);
+  } else if (RelocInfo::IsCodeTarget(mode)) {
+    visitor->VisitCodeTarget(this);
+  } else if (mode == RelocInfo::CELL) {
+    visitor->VisitCell(this);
+  } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
+    visitor->VisitExternalReference(this);
+  } else if (RelocInfo::IsCodeAgeSequence(mode)) {
+    visitor->VisitCodeAgeSequence(this);
+  } else if (((RelocInfo::IsJSReturn(mode) && IsPatchedReturnSequence()) ||
+              (RelocInfo::IsDebugBreakSlot(mode) &&
+               IsPatchedDebugBreakSlotSequence())) &&
+             isolate->debug()->has_break_points()) {
+    visitor->VisitDebugTarget(this);
+  } else if (IsRuntimeEntry(mode)) {
+    visitor->VisitRuntimeEntry(this);
+  }
+}
+
+
+template <typename StaticVisitor>
+void RelocInfo::Visit(Heap* heap) {
+  RelocInfo::Mode mode = rmode();
+  if (mode == RelocInfo::EMBEDDED_OBJECT) {
+    StaticVisitor::VisitEmbeddedPointer(heap, this);
+  } else if (RelocInfo::IsCodeTarget(mode)) {
+    StaticVisitor::VisitCodeTarget(heap, this);
+  } else if (mode == RelocInfo::CELL) {
+    StaticVisitor::VisitCell(heap, this);
+  } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
+    StaticVisitor::VisitExternalReference(this);
+  } else if (RelocInfo::IsCodeAgeSequence(mode)) {
+    StaticVisitor::VisitCodeAgeSequence(heap, this);
+  } else if (heap->isolate()->debug()->has_break_points() &&
+             ((RelocInfo::IsJSReturn(mode) && IsPatchedReturnSequence()) ||
+              (RelocInfo::IsDebugBreakSlot(mode) &&
+               IsPatchedDebugBreakSlotSequence()))) {
+    StaticVisitor::VisitDebugTarget(heap, this);
+  } else if (IsRuntimeEntry(mode)) {
+    StaticVisitor::VisitRuntimeEntry(this);
+  }
+}
+
+Operand::Operand(intptr_t immediate, RelocInfo::Mode rmode) {
+  rm_ = no_reg;
+  imm_ = immediate;
+  rmode_ = rmode;
+}
+
+Operand::Operand(const ExternalReference& f) {
+  rm_ = no_reg;
+  imm_ = reinterpret_cast<intptr_t>(f.address());
+  rmode_ = RelocInfo::EXTERNAL_REFERENCE;
+}
+
+Operand::Operand(Smi* value) {
+  rm_ = no_reg;
+  imm_ = reinterpret_cast<intptr_t>(value);
+  rmode_ = kRelocInfo_NONEPTR;
+}
+
+Operand::Operand(Register rm) {
+  rm_ = rm;
+  rmode_ = kRelocInfo_NONEPTR;  // PPC -why doesn't ARM do this?
+}
+
+void Assembler::CheckBuffer() {
+  if (buffer_space() <= kGap) {
+    GrowBuffer();
+  }
+}
+
+void Assembler::CheckTrampolinePoolQuick() {
+  if (pc_offset() >= next_buffer_check_) {
+    CheckTrampolinePool();
+  }
+}
+
+void Assembler::emit(Instr x) {
+  CheckBuffer();
+  *reinterpret_cast<Instr*>(pc_) = x;
+  pc_ += kInstrSize;
+  CheckTrampolinePoolQuick();
+}
+
+bool Operand::is_reg() const { return rm_.is_valid(); }
+
+
+// Fetch the 32bit value from the FIXED_SEQUENCE lis/ori
+Address Assembler::target_address_at(Address pc,
+                                     ConstantPoolArray* constant_pool) {
+  Instr instr1 = instr_at(pc);
+  Instr instr2 = instr_at(pc + kInstrSize);
+  // Interpret 2 instructions generated by lis/ori
+  if (IsLis(instr1) && IsOri(instr2)) {
+#if V8_TARGET_ARCH_PPC64
+    Instr instr4 = instr_at(pc + (3 * kInstrSize));
+    Instr instr5 = instr_at(pc + (4 * kInstrSize));
+    // Assemble the 64 bit value.
+    uint64_t hi = (static_cast<uint32_t>((instr1 & kImm16Mask) << 16) |
+                   static_cast<uint32_t>(instr2 & kImm16Mask));
+    uint64_t lo = (static_cast<uint32_t>((instr4 & kImm16Mask) << 16) |
+                   static_cast<uint32_t>(instr5 & kImm16Mask));
+    return reinterpret_cast<Address>((hi << 32) | lo);
+#else
+    // Assemble the 32 bit value.
+    return reinterpret_cast<Address>(((instr1 & kImm16Mask) << 16) |
+                                     (instr2 & kImm16Mask));
+#endif
+  }
+#if V8_OOL_CONSTANT_POOL
+  return Memory::Address_at(target_constant_pool_address_at(pc, constant_pool));
+#else
+  DCHECK(false);
+  return (Address)0;
+#endif
+}
+
+
+#if V8_OOL_CONSTANT_POOL
+bool Assembler::IsConstantPoolLoadStart(Address pc) {
+#if V8_TARGET_ARCH_PPC64
+  if (!IsLi(instr_at(pc))) return false;
+  pc += kInstrSize;
+#endif
+  return GetRA(instr_at(pc)).is(kConstantPoolRegister);
+}
+
+
+bool Assembler::IsConstantPoolLoadEnd(Address pc) {
+#if V8_TARGET_ARCH_PPC64
+  pc -= kInstrSize;
+#endif
+  return IsConstantPoolLoadStart(pc);
+}
+
+
+int Assembler::GetConstantPoolOffset(Address pc) {
+  DCHECK(IsConstantPoolLoadStart(pc));
+  Instr instr = instr_at(pc);
+  int offset = SIGN_EXT_IMM16((instr & kImm16Mask));
+  return offset;
+}
+
+
+void Assembler::SetConstantPoolOffset(Address pc, int offset) {
+  DCHECK(IsConstantPoolLoadStart(pc));
+  DCHECK(is_int16(offset));
+  Instr instr = instr_at(pc);
+  instr &= ~kImm16Mask;
+  instr |= (offset & kImm16Mask);
+  instr_at_put(pc, instr);
+}
+
+
+Address Assembler::target_constant_pool_address_at(
+    Address pc, ConstantPoolArray* constant_pool) {
+  Address addr = reinterpret_cast<Address>(constant_pool);
+  DCHECK(addr);
+  addr += GetConstantPoolOffset(pc);
+  return addr;
+}
+#endif
+
+
+// This sets the branch destination (which gets loaded at the call address).
+// This is for calls and branches within generated code.  The serializer
+// has already deserialized the mov instructions etc.
+// There is a FIXED_SEQUENCE assumption here
+void Assembler::deserialization_set_special_target_at(
+    Address instruction_payload, Code* code, Address target) {
+  set_target_address_at(instruction_payload, code, target);
+}
+
+// This code assumes the FIXED_SEQUENCE of lis/ori
+void Assembler::set_target_address_at(Address pc,
+                                      ConstantPoolArray* constant_pool,
+                                      Address target,
+                                      ICacheFlushMode icache_flush_mode) {
+  Instr instr1 = instr_at(pc);
+  Instr instr2 = instr_at(pc + kInstrSize);
+  // Interpret 2 instructions generated by lis/ori
+  if (IsLis(instr1) && IsOri(instr2)) {
+#if V8_TARGET_ARCH_PPC64
+    Instr instr4 = instr_at(pc + (3 * kInstrSize));
+    Instr instr5 = instr_at(pc + (4 * kInstrSize));
+    // Needs to be fixed up when mov changes to handle 64-bit values.
+    uint32_t* p = reinterpret_cast<uint32_t*>(pc);
+    uintptr_t itarget = reinterpret_cast<uintptr_t>(target);
+
+    instr5 &= ~kImm16Mask;
+    instr5 |= itarget & kImm16Mask;
+    itarget = itarget >> 16;
+
+    instr4 &= ~kImm16Mask;
+    instr4 |= itarget & kImm16Mask;
+    itarget = itarget >> 16;
+
+    instr2 &= ~kImm16Mask;
+    instr2 |= itarget & kImm16Mask;
+    itarget = itarget >> 16;
+
+    instr1 &= ~kImm16Mask;
+    instr1 |= itarget & kImm16Mask;
+    itarget = itarget >> 16;
+
+    *p = instr1;
+    *(p + 1) = instr2;
+    *(p + 3) = instr4;
+    *(p + 4) = instr5;
+    if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+      CpuFeatures::FlushICache(p, 5 * kInstrSize);
+    }
+#else
+    uint32_t* p = reinterpret_cast<uint32_t*>(pc);
+    uint32_t itarget = reinterpret_cast<uint32_t>(target);
+    int lo_word = itarget & kImm16Mask;
+    int hi_word = itarget >> 16;
+    instr1 &= ~kImm16Mask;
+    instr1 |= hi_word;
+    instr2 &= ~kImm16Mask;
+    instr2 |= lo_word;
+
+    *p = instr1;
+    *(p + 1) = instr2;
+    if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
+      CpuFeatures::FlushICache(p, 2 * kInstrSize);
+    }
+#endif
+  } else {
+#if V8_OOL_CONSTANT_POOL
+    Memory::Address_at(target_constant_pool_address_at(pc, constant_pool)) =
+        target;
+#else
+    UNREACHABLE();
+#endif
+  }
+}
+}
+}  // namespace v8::internal
+
+#endif  // V8_PPC_ASSEMBLER_PPC_INL_H_
diff --git a/deps/v8/src/ppc/assembler-ppc.cc b/deps/v8/src/ppc/assembler-ppc.cc
new file mode 100644 (file)
index 0000000..4b8b165
--- /dev/null
@@ -0,0 +1,2493 @@
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the
+// distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+// OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2014 the V8 project authors. All rights reserved.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_PPC
+
+#include "src/base/bits.h"
+#include "src/base/cpu.h"
+#include "src/macro-assembler.h"
+#include "src/ppc/assembler-ppc-inl.h"
+#include "src/serialize.h"
+
+namespace v8 {
+namespace internal {
+
+// Get the CPU features enabled by the build.
+static unsigned CpuFeaturesImpliedByCompiler() {
+  unsigned answer = 0;
+  return answer;
+}
+
+
+void CpuFeatures::ProbeImpl(bool cross_compile) {
+  supported_ |= CpuFeaturesImpliedByCompiler();
+  cache_line_size_ = 128;
+
+  // Only use statically determined features for cross compile (snapshot).
+  if (cross_compile) return;
+
+// Detect whether frim instruction is supported (POWER5+)
+// For now we will just check for processors we know do not
+// support it
+#ifndef USE_SIMULATOR
+  // Probe for additional features at runtime.
+  base::CPU cpu;
+#if V8_TARGET_ARCH_PPC64
+  if (cpu.part() == base::CPU::PPC_POWER8) {
+    supported_ |= (1u << FPR_GPR_MOV);
+  }
+#endif
+  if (cpu.part() == base::CPU::PPC_POWER6 ||
+      cpu.part() == base::CPU::PPC_POWER7 ||
+      cpu.part() == base::CPU::PPC_POWER8) {
+    supported_ |= (1u << LWSYNC);
+  }
+#if V8_OS_LINUX
+  if (!(cpu.part() == base::CPU::PPC_G5 || cpu.part() == base::CPU::PPC_G4)) {
+    // Assume support
+    supported_ |= (1u << FPU);
+  }
+  if (cpu.cache_line_size() != 0) {
+    cache_line_size_ = cpu.cache_line_size();
+  }
+#elif V8_OS_AIX
+  // Assume support FP support and default cache line size
+  supported_ |= (1u << FPU);
+#endif
+#else  // Simulator
+  supported_ |= (1u << FPU);
+  supported_ |= (1u << LWSYNC);
+#if V8_TARGET_ARCH_PPC64
+  supported_ |= (1u << FPR_GPR_MOV);
+#endif
+#endif
+}
+
+
+void CpuFeatures::PrintTarget() {
+  const char* ppc_arch = NULL;
+
+#if V8_TARGET_ARCH_PPC64
+  ppc_arch = "ppc64";
+#else
+  ppc_arch = "ppc";
+#endif
+
+  printf("target %s\n", ppc_arch);
+}
+
+
+void CpuFeatures::PrintFeatures() {
+  printf("FPU=%d\n", CpuFeatures::IsSupported(FPU));
+}
+
+
+Register ToRegister(int num) {
+  DCHECK(num >= 0 && num < kNumRegisters);
+  const Register kRegisters[] = {r0,  sp,  r2,  r3,  r4,  r5,  r6,  r7,
+                                 r8,  r9,  r10, r11, ip,  r13, r14, r15,
+                                 r16, r17, r18, r19, r20, r21, r22, r23,
+                                 r24, r25, r26, r27, r28, r29, r30, fp};
+  return kRegisters[num];
+}
+
+
+const char* DoubleRegister::AllocationIndexToString(int index) {
+  DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
+  const char* const names[] = {
+      "d1",  "d2",  "d3",  "d4",  "d5",  "d6",  "d7",  "d8",  "d9",  "d10",
+      "d11", "d12", "d15", "d16", "d17", "d18", "d19", "d20", "d21", "d22",
+      "d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31"};
+  return names[index];
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of RelocInfo
+
+const int RelocInfo::kApplyMask = 1 << RelocInfo::INTERNAL_REFERENCE;
+
+
+bool RelocInfo::IsCodedSpecially() {
+  // The deserializer needs to know whether a pointer is specially
+  // coded.  Being specially coded on PPC means that it is a lis/ori
+  // instruction sequence or is an out of line constant pool entry,
+  // and these are always the case inside code objects.
+  return true;
+}
+
+
+bool RelocInfo::IsInConstantPool() {
+#if V8_OOL_CONSTANT_POOL
+  return Assembler::IsConstantPoolLoadStart(pc_);
+#else
+  return false;
+#endif
+}
+
+
+void RelocInfo::PatchCode(byte* instructions, int instruction_count) {
+  // Patch the code at the current address with the supplied instructions.
+  Instr* pc = reinterpret_cast<Instr*>(pc_);
+  Instr* instr = reinterpret_cast<Instr*>(instructions);
+  for (int i = 0; i < instruction_count; i++) {
+    *(pc + i) = *(instr + i);
+  }
+
+  // Indicate that code has changed.
+  CpuFeatures::FlushICache(pc_, instruction_count * Assembler::kInstrSize);
+}
+
+
+// Patch the code at the current PC with a call to the target address.
+// Additional guard instructions can be added if required.
+void RelocInfo::PatchCodeWithCall(Address target, int guard_bytes) {
+  // Patch the code at the current address with a call to the target.
+  UNIMPLEMENTED();
+}
+
+
+// -----------------------------------------------------------------------------
+// Implementation of Operand and MemOperand
+// See assembler-ppc-inl.h for inlined constructors
+
+Operand::Operand(Handle<Object> handle) {
+  AllowDeferredHandleDereference using_raw_address;
+  rm_ = no_reg;
+  // Verify all Objects referred by code are NOT in new space.
+  Object* obj = *handle;
+  if (obj->IsHeapObject()) {
+    DCHECK(!HeapObject::cast(obj)->GetHeap()->InNewSpace(obj));
+    imm_ = reinterpret_cast<intptr_t>(handle.location());
+    rmode_ = RelocInfo::EMBEDDED_OBJECT;
+  } else {
+    // no relocation needed
+    imm_ = reinterpret_cast<intptr_t>(obj);
+    rmode_ = kRelocInfo_NONEPTR;
+  }
+}
+
+
+MemOperand::MemOperand(Register rn, int32_t offset) {
+  ra_ = rn;
+  rb_ = no_reg;
+  offset_ = offset;
+}
+
+
+MemOperand::MemOperand(Register ra, Register rb) {
+  ra_ = ra;
+  rb_ = rb;
+  offset_ = 0;
+}
+
+
+// -----------------------------------------------------------------------------
+// Specific instructions, constants, and masks.
+
+// Spare buffer.
+static const int kMinimalBufferSize = 4 * KB;
+
+
+Assembler::Assembler(Isolate* isolate, void* buffer, int buffer_size)
+    : AssemblerBase(isolate, buffer, buffer_size),
+      recorded_ast_id_(TypeFeedbackId::None()),
+#if V8_OOL_CONSTANT_POOL
+      constant_pool_builder_(),
+#endif
+      positions_recorder_(this) {
+  reloc_info_writer.Reposition(buffer_ + buffer_size_, pc_);
+
+  no_trampoline_pool_before_ = 0;
+  trampoline_pool_blocked_nesting_ = 0;
+  // We leave space (kMaxBlockTrampolineSectionSize)
+  // for BlockTrampolinePoolScope buffer.
+  next_buffer_check_ =
+      FLAG_force_long_branches ? kMaxInt : kMaxCondBranchReach -
+                                               kMaxBlockTrampolineSectionSize;
+  internal_trampoline_exception_ = false;
+  last_bound_pos_ = 0;
+  trampoline_emitted_ = FLAG_force_long_branches;
+  unbound_labels_count_ = 0;
+  ClearRecordedAstId();
+}
+
+
+void Assembler::GetCode(CodeDesc* desc) {
+  // Set up code descriptor.
+  desc->buffer = buffer_;
+  desc->buffer_size = buffer_size_;
+  desc->instr_size = pc_offset();
+  desc->reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
+  desc->origin = this;
+}
+
+
+void Assembler::Align(int m) {
+#if V8_TARGET_ARCH_PPC64
+  DCHECK(m >= 4 && base::bits::IsPowerOfTwo64(m));
+#else
+  DCHECK(m >= 4 && base::bits::IsPowerOfTwo32(m));
+#endif
+  while ((pc_offset() & (m - 1)) != 0) {
+    nop();
+  }
+}
+
+
+void Assembler::CodeTargetAlign() { Align(8); }
+
+
+Condition Assembler::GetCondition(Instr instr) {
+  switch (instr & kCondMask) {
+    case BT:
+      return eq;
+    case BF:
+      return ne;
+    default:
+      UNIMPLEMENTED();
+  }
+  return al;
+}
+
+
+bool Assembler::IsLis(Instr instr) {
+  return ((instr & kOpcodeMask) == ADDIS) && GetRA(instr).is(r0);
+}
+
+
+bool Assembler::IsLi(Instr instr) {
+  return ((instr & kOpcodeMask) == ADDI) && GetRA(instr).is(r0);
+}
+
+
+bool Assembler::IsAddic(Instr instr) { return (instr & kOpcodeMask) == ADDIC; }
+
+
+bool Assembler::IsOri(Instr instr) { return (instr & kOpcodeMask) == ORI; }
+
+
+bool Assembler::IsBranch(Instr instr) { return ((instr & kOpcodeMask) == BCX); }
+
+
+Register Assembler::GetRA(Instr instr) {
+  Register reg;
+  reg.code_ = Instruction::RAValue(instr);
+  return reg;
+}
+
+
+Register Assembler::GetRB(Instr instr) {
+  Register reg;
+  reg.code_ = Instruction::RBValue(instr);
+  return reg;
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+// This code assumes a FIXED_SEQUENCE for 64bit loads (lis/ori)
+bool Assembler::Is64BitLoadIntoR12(Instr instr1, Instr instr2, Instr instr3,
+                                   Instr instr4, Instr instr5) {
+  // Check the instructions are indeed a five part load (into r12)
+  // 3d800000       lis     r12, 0
+  // 618c0000       ori     r12, r12, 0
+  // 798c07c6       rldicr  r12, r12, 32, 31
+  // 658c00c3       oris    r12, r12, 195
+  // 618ccd40       ori     r12, r12, 52544
+  return (((instr1 >> 16) == 0x3d80) && ((instr2 >> 16) == 0x618c) &&
+          (instr3 == 0x798c07c6) && ((instr4 >> 16) == 0x658c) &&
+          ((instr5 >> 16) == 0x618c));
+}
+#else
+// This code assumes a FIXED_SEQUENCE for 32bit loads (lis/ori)
+bool Assembler::Is32BitLoadIntoR12(Instr instr1, Instr instr2) {
+  // Check the instruction is indeed a two part load (into r12)
+  // 3d802553       lis     r12, 9555
+  // 618c5000       ori   r12, r12, 20480
+  return (((instr1 >> 16) == 0x3d80) && ((instr2 >> 16) == 0x618c));
+}
+#endif
+
+
+bool Assembler::IsCmpRegister(Instr instr) {
+  return (((instr & kOpcodeMask) == EXT2) &&
+          ((instr & kExt2OpcodeMask) == CMP));
+}
+
+
+bool Assembler::IsRlwinm(Instr instr) {
+  return ((instr & kOpcodeMask) == RLWINMX);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+bool Assembler::IsRldicl(Instr instr) {
+  return (((instr & kOpcodeMask) == EXT5) &&
+          ((instr & kExt5OpcodeMask) == RLDICL));
+}
+#endif
+
+
+bool Assembler::IsCmpImmediate(Instr instr) {
+  return ((instr & kOpcodeMask) == CMPI);
+}
+
+
+bool Assembler::IsCrSet(Instr instr) {
+  return (((instr & kOpcodeMask) == EXT1) &&
+          ((instr & kExt1OpcodeMask) == CREQV));
+}
+
+
+Register Assembler::GetCmpImmediateRegister(Instr instr) {
+  DCHECK(IsCmpImmediate(instr));
+  return GetRA(instr);
+}
+
+
+int Assembler::GetCmpImmediateRawImmediate(Instr instr) {
+  DCHECK(IsCmpImmediate(instr));
+  return instr & kOff16Mask;
+}
+
+
+// Labels refer to positions in the (to be) generated code.
+// There are bound, linked, and unused labels.
+//
+// Bound labels refer to known positions in the already
+// generated code. pos() is the position the label refers to.
+//
+// Linked labels refer to unknown positions in the code
+// to be generated; pos() is the position of the last
+// instruction using the label.
+
+
+// The link chain is terminated by a negative code position (must be aligned)
+const int kEndOfChain = -4;
+
+
+int Assembler::target_at(int pos) {
+  Instr instr = instr_at(pos);
+  // check which type of branch this is 16 or 26 bit offset
+  int opcode = instr & kOpcodeMask;
+  if (BX == opcode) {
+    int imm26 = ((instr & kImm26Mask) << 6) >> 6;
+    imm26 &= ~(kAAMask | kLKMask);  // discard AA|LK bits if present
+    if (imm26 == 0) return kEndOfChain;
+    return pos + imm26;
+  } else if (BCX == opcode) {
+    int imm16 = SIGN_EXT_IMM16((instr & kImm16Mask));
+    imm16 &= ~(kAAMask | kLKMask);  // discard AA|LK bits if present
+    if (imm16 == 0) return kEndOfChain;
+    return pos + imm16;
+  } else if ((instr & ~kImm26Mask) == 0) {
+    // Emitted link to a label, not part of a branch (regexp PushBacktrack).
+    if (instr == 0) {
+      return kEndOfChain;
+    } else {
+      int32_t imm26 = SIGN_EXT_IMM26(instr);
+      return (imm26 + pos);
+    }
+  }
+
+  PPCPORT_UNIMPLEMENTED();
+  DCHECK(false);
+  return -1;
+}
+
+
+void Assembler::target_at_put(int pos, int target_pos) {
+  Instr instr = instr_at(pos);
+  int opcode = instr & kOpcodeMask;
+
+  // check which type of branch this is 16 or 26 bit offset
+  if (BX == opcode) {
+    int imm26 = target_pos - pos;
+    DCHECK((imm26 & (kAAMask | kLKMask)) == 0);
+    instr &= ((~kImm26Mask) | kAAMask | kLKMask);
+    DCHECK(is_int26(imm26));
+    instr_at_put(pos, instr | (imm26 & kImm26Mask));
+    return;
+  } else if (BCX == opcode) {
+    int imm16 = target_pos - pos;
+    DCHECK((imm16 & (kAAMask | kLKMask)) == 0);
+    instr &= ((~kImm16Mask) | kAAMask | kLKMask);
+    DCHECK(is_int16(imm16));
+    instr_at_put(pos, instr | (imm16 & kImm16Mask));
+    return;
+  } else if ((instr & ~kImm26Mask) == 0) {
+    DCHECK(target_pos == kEndOfChain || target_pos >= 0);
+    // Emitted link to a label, not part of a branch (regexp PushBacktrack).
+    // Load the position of the label relative to the generated code object
+    // pointer in a register.
+
+    Register dst = r3;  // we assume r3 for now
+    DCHECK(IsNop(instr_at(pos + kInstrSize)));
+    uint32_t target = target_pos + (Code::kHeaderSize - kHeapObjectTag);
+    CodePatcher patcher(reinterpret_cast<byte*>(buffer_ + pos), 2,
+                        CodePatcher::DONT_FLUSH);
+    int target_hi = static_cast<int>(target) >> 16;
+    int target_lo = static_cast<int>(target) & 0XFFFF;
+
+    patcher.masm()->lis(dst, Operand(SIGN_EXT_IMM16(target_hi)));
+    patcher.masm()->ori(dst, dst, Operand(target_lo));
+    return;
+  }
+
+  DCHECK(false);
+}
+
+
+int Assembler::max_reach_from(int pos) {
+  Instr instr = instr_at(pos);
+  int opcode = instr & kOpcodeMask;
+
+  // check which type of branch this is 16 or 26 bit offset
+  if (BX == opcode) {
+    return 26;
+  } else if (BCX == opcode) {
+    return 16;
+  } else if ((instr & ~kImm26Mask) == 0) {
+    // Emitted label constant, not part of a branch (regexp PushBacktrack).
+    return 26;
+  }
+
+  DCHECK(false);
+  return 0;
+}
+
+
+void Assembler::bind_to(Label* L, int pos) {
+  DCHECK(0 <= pos && pos <= pc_offset());  // must have a valid binding position
+  int32_t trampoline_pos = kInvalidSlotPos;
+  if (L->is_linked() && !trampoline_emitted_) {
+    unbound_labels_count_--;
+    next_buffer_check_ += kTrampolineSlotsSize;
+  }
+
+  while (L->is_linked()) {
+    int fixup_pos = L->pos();
+    int32_t offset = pos - fixup_pos;
+    int maxReach = max_reach_from(fixup_pos);
+    next(L);  // call next before overwriting link with target at fixup_pos
+    if (is_intn(offset, maxReach) == false) {
+      if (trampoline_pos == kInvalidSlotPos) {
+        trampoline_pos = get_trampoline_entry();
+        CHECK(trampoline_pos != kInvalidSlotPos);
+        target_at_put(trampoline_pos, pos);
+      }
+      target_at_put(fixup_pos, trampoline_pos);
+    } else {
+      target_at_put(fixup_pos, pos);
+    }
+  }
+  L->bind_to(pos);
+
+  // Keep track of the last bound label so we don't eliminate any instructions
+  // before a bound label.
+  if (pos > last_bound_pos_) last_bound_pos_ = pos;
+}
+
+
+void Assembler::bind(Label* L) {
+  DCHECK(!L->is_bound());  // label can only be bound once
+  bind_to(L, pc_offset());
+}
+
+
+void Assembler::next(Label* L) {
+  DCHECK(L->is_linked());
+  int link = target_at(L->pos());
+  if (link == kEndOfChain) {
+    L->Unuse();
+  } else {
+    DCHECK(link >= 0);
+    L->link_to(link);
+  }
+}
+
+
+bool Assembler::is_near(Label* L, Condition cond) {
+  DCHECK(L->is_bound());
+  if (L->is_bound() == false) return false;
+
+  int maxReach = ((cond == al) ? 26 : 16);
+  int offset = L->pos() - pc_offset();
+
+  return is_intn(offset, maxReach);
+}
+
+
+void Assembler::a_form(Instr instr, DoubleRegister frt, DoubleRegister fra,
+                       DoubleRegister frb, RCBit r) {
+  emit(instr | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 | r);
+}
+
+
+void Assembler::d_form(Instr instr, Register rt, Register ra,
+                       const intptr_t val, bool signed_disp) {
+  if (signed_disp) {
+    if (!is_int16(val)) {
+      PrintF("val = %" V8PRIdPTR ", 0x%" V8PRIxPTR "\n", val, val);
+    }
+    DCHECK(is_int16(val));
+  } else {
+    if (!is_uint16(val)) {
+      PrintF("val = %" V8PRIdPTR ", 0x%" V8PRIxPTR
+             ", is_unsigned_imm16(val)=%d, kImm16Mask=0x%x\n",
+             val, val, is_uint16(val), kImm16Mask);
+    }
+    DCHECK(is_uint16(val));
+  }
+  emit(instr | rt.code() * B21 | ra.code() * B16 | (kImm16Mask & val));
+}
+
+
+void Assembler::x_form(Instr instr, Register ra, Register rs, Register rb,
+                       RCBit r) {
+  emit(instr | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | r);
+}
+
+
+void Assembler::xo_form(Instr instr, Register rt, Register ra, Register rb,
+                        OEBit o, RCBit r) {
+  emit(instr | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 | o | r);
+}
+
+
+void Assembler::md_form(Instr instr, Register ra, Register rs, int shift,
+                        int maskbit, RCBit r) {
+  int sh0_4 = shift & 0x1f;
+  int sh5 = (shift >> 5) & 0x1;
+  int m0_4 = maskbit & 0x1f;
+  int m5 = (maskbit >> 5) & 0x1;
+
+  emit(instr | rs.code() * B21 | ra.code() * B16 | sh0_4 * B11 | m0_4 * B6 |
+       m5 * B5 | sh5 * B1 | r);
+}
+
+
+void Assembler::mds_form(Instr instr, Register ra, Register rs, Register rb,
+                         int maskbit, RCBit r) {
+  int m0_4 = maskbit & 0x1f;
+  int m5 = (maskbit >> 5) & 0x1;
+
+  emit(instr | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | m0_4 * B6 |
+       m5 * B5 | r);
+}
+
+
+// Returns the next free trampoline entry.
+int32_t Assembler::get_trampoline_entry() {
+  int32_t trampoline_entry = kInvalidSlotPos;
+
+  if (!internal_trampoline_exception_) {
+    trampoline_entry = trampoline_.take_slot();
+
+    if (kInvalidSlotPos == trampoline_entry) {
+      internal_trampoline_exception_ = true;
+    }
+  }
+  return trampoline_entry;
+}
+
+
+int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
+  int target_pos;
+  if (L->is_bound()) {
+    target_pos = L->pos();
+  } else {
+    if (L->is_linked()) {
+      target_pos = L->pos();  // L's link
+    } else {
+      // was: target_pos = kEndOfChain;
+      // However, using branch to self to mark the first reference
+      // should avoid most instances of branch offset overflow.  See
+      // target_at() for where this is converted back to kEndOfChain.
+      target_pos = pc_offset();
+      if (!trampoline_emitted_) {
+        unbound_labels_count_++;
+        next_buffer_check_ -= kTrampolineSlotsSize;
+      }
+    }
+    L->link_to(pc_offset());
+  }
+
+  return target_pos - pc_offset();
+}
+
+
+// Branch instructions.
+
+
+void Assembler::bclr(BOfield bo, LKBit lk) {
+  positions_recorder()->WriteRecordedPositions();
+  emit(EXT1 | bo | BCLRX | lk);
+}
+
+
+void Assembler::bcctr(BOfield bo, LKBit lk) {
+  positions_recorder()->WriteRecordedPositions();
+  emit(EXT1 | bo | BCCTRX | lk);
+}
+
+
+// Pseudo op - branch to link register
+void Assembler::blr() { bclr(BA, LeaveLK); }
+
+
+// Pseudo op - branch to count register -- used for "jump"
+void Assembler::bctr() { bcctr(BA, LeaveLK); }
+
+
+void Assembler::bctrl() { bcctr(BA, SetLK); }
+
+
+void Assembler::bc(int branch_offset, BOfield bo, int condition_bit, LKBit lk) {
+  if (lk == SetLK) {
+    positions_recorder()->WriteRecordedPositions();
+  }
+  DCHECK(is_int16(branch_offset));
+  emit(BCX | bo | condition_bit * B16 | (kImm16Mask & branch_offset) | lk);
+}
+
+
+void Assembler::b(int branch_offset, LKBit lk) {
+  if (lk == SetLK) {
+    positions_recorder()->WriteRecordedPositions();
+  }
+  DCHECK((branch_offset & 3) == 0);
+  int imm26 = branch_offset;
+  DCHECK(is_int26(imm26));
+  // todo add AA and LK bits
+  emit(BX | (imm26 & kImm26Mask) | lk);
+}
+
+
+void Assembler::xori(Register dst, Register src, const Operand& imm) {
+  d_form(XORI, src, dst, imm.imm_, false);
+}
+
+
+void Assembler::xoris(Register ra, Register rs, const Operand& imm) {
+  d_form(XORIS, rs, ra, imm.imm_, false);
+}
+
+
+void Assembler::xor_(Register dst, Register src1, Register src2, RCBit rc) {
+  x_form(EXT2 | XORX, dst, src1, src2, rc);
+}
+
+
+void Assembler::cntlzw_(Register ra, Register rs, RCBit rc) {
+  x_form(EXT2 | CNTLZWX, ra, rs, r0, rc);
+}
+
+
+void Assembler::and_(Register ra, Register rs, Register rb, RCBit rc) {
+  x_form(EXT2 | ANDX, ra, rs, rb, rc);
+}
+
+
+void Assembler::rlwinm(Register ra, Register rs, int sh, int mb, int me,
+                       RCBit rc) {
+  sh &= 0x1f;
+  mb &= 0x1f;
+  me &= 0x1f;
+  emit(RLWINMX | rs.code() * B21 | ra.code() * B16 | sh * B11 | mb * B6 |
+       me << 1 | rc);
+}
+
+
+void Assembler::rlwnm(Register ra, Register rs, Register rb, int mb, int me,
+                      RCBit rc) {
+  mb &= 0x1f;
+  me &= 0x1f;
+  emit(RLWNMX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 | mb * B6 |
+       me << 1 | rc);
+}
+
+
+void Assembler::rlwimi(Register ra, Register rs, int sh, int mb, int me,
+                       RCBit rc) {
+  sh &= 0x1f;
+  mb &= 0x1f;
+  me &= 0x1f;
+  emit(RLWIMIX | rs.code() * B21 | ra.code() * B16 | sh * B11 | mb * B6 |
+       me << 1 | rc);
+}
+
+
+void Assembler::slwi(Register dst, Register src, const Operand& val, RCBit rc) {
+  DCHECK((32 > val.imm_) && (val.imm_ >= 0));
+  rlwinm(dst, src, val.imm_, 0, 31 - val.imm_, rc);
+}
+
+
+void Assembler::srwi(Register dst, Register src, const Operand& val, RCBit rc) {
+  DCHECK((32 > val.imm_) && (val.imm_ >= 0));
+  rlwinm(dst, src, 32 - val.imm_, val.imm_, 31, rc);
+}
+
+
+void Assembler::clrrwi(Register dst, Register src, const Operand& val,
+                       RCBit rc) {
+  DCHECK((32 > val.imm_) && (val.imm_ >= 0));
+  rlwinm(dst, src, 0, 0, 31 - val.imm_, rc);
+}
+
+
+void Assembler::clrlwi(Register dst, Register src, const Operand& val,
+                       RCBit rc) {
+  DCHECK((32 > val.imm_) && (val.imm_ >= 0));
+  rlwinm(dst, src, 0, val.imm_, 31, rc);
+}
+
+
+void Assembler::srawi(Register ra, Register rs, int sh, RCBit r) {
+  emit(EXT2 | SRAWIX | rs.code() * B21 | ra.code() * B16 | sh * B11 | r);
+}
+
+
+void Assembler::srw(Register dst, Register src1, Register src2, RCBit r) {
+  x_form(EXT2 | SRWX, dst, src1, src2, r);
+}
+
+
+void Assembler::slw(Register dst, Register src1, Register src2, RCBit r) {
+  x_form(EXT2 | SLWX, dst, src1, src2, r);
+}
+
+
+void Assembler::sraw(Register ra, Register rs, Register rb, RCBit r) {
+  x_form(EXT2 | SRAW, ra, rs, rb, r);
+}
+
+
+void Assembler::rotlw(Register ra, Register rs, Register rb, RCBit r) {
+  rlwnm(ra, rs, rb, 0, 31, r);
+}
+
+
+void Assembler::rotlwi(Register ra, Register rs, int sh, RCBit r) {
+  rlwinm(ra, rs, sh, 0, 31, r);
+}
+
+
+void Assembler::rotrwi(Register ra, Register rs, int sh, RCBit r) {
+  rlwinm(ra, rs, 32 - sh, 0, 31, r);
+}
+
+
+void Assembler::subi(Register dst, Register src, const Operand& imm) {
+  addi(dst, src, Operand(-(imm.imm_)));
+}
+
+void Assembler::addc(Register dst, Register src1, Register src2, OEBit o,
+                     RCBit r) {
+  xo_form(EXT2 | ADDCX, dst, src1, src2, o, r);
+}
+
+
+void Assembler::addze(Register dst, Register src1, OEBit o, RCBit r) {
+  // a special xo_form
+  emit(EXT2 | ADDZEX | dst.code() * B21 | src1.code() * B16 | o | r);
+}
+
+
+void Assembler::sub(Register dst, Register src1, Register src2, OEBit o,
+                    RCBit r) {
+  xo_form(EXT2 | SUBFX, dst, src2, src1, o, r);
+}
+
+
+void Assembler::subfc(Register dst, Register src1, Register src2, OEBit o,
+                      RCBit r) {
+  xo_form(EXT2 | SUBFCX, dst, src2, src1, o, r);
+}
+
+
+void Assembler::subfic(Register dst, Register src, const Operand& imm) {
+  d_form(SUBFIC, dst, src, imm.imm_, true);
+}
+
+
+void Assembler::add(Register dst, Register src1, Register src2, OEBit o,
+                    RCBit r) {
+  xo_form(EXT2 | ADDX, dst, src1, src2, o, r);
+}
+
+
+// Multiply low word
+void Assembler::mullw(Register dst, Register src1, Register src2, OEBit o,
+                      RCBit r) {
+  xo_form(EXT2 | MULLW, dst, src1, src2, o, r);
+}
+
+
+// Multiply hi word
+void Assembler::mulhw(Register dst, Register src1, Register src2, OEBit o,
+                      RCBit r) {
+  xo_form(EXT2 | MULHWX, dst, src1, src2, o, r);
+}
+
+
+// Divide word
+void Assembler::divw(Register dst, Register src1, Register src2, OEBit o,
+                     RCBit r) {
+  xo_form(EXT2 | DIVW, dst, src1, src2, o, r);
+}
+
+
+void Assembler::addi(Register dst, Register src, const Operand& imm) {
+  DCHECK(!src.is(r0));  // use li instead to show intent
+  d_form(ADDI, dst, src, imm.imm_, true);
+}
+
+
+void Assembler::addis(Register dst, Register src, const Operand& imm) {
+  DCHECK(!src.is(r0));  // use lis instead to show intent
+  d_form(ADDIS, dst, src, imm.imm_, true);
+}
+
+
+void Assembler::addic(Register dst, Register src, const Operand& imm) {
+  d_form(ADDIC, dst, src, imm.imm_, true);
+}
+
+
+void Assembler::andi(Register ra, Register rs, const Operand& imm) {
+  d_form(ANDIx, rs, ra, imm.imm_, false);
+}
+
+
+void Assembler::andis(Register ra, Register rs, const Operand& imm) {
+  d_form(ANDISx, rs, ra, imm.imm_, false);
+}
+
+
+void Assembler::nor(Register dst, Register src1, Register src2, RCBit r) {
+  x_form(EXT2 | NORX, dst, src1, src2, r);
+}
+
+
+void Assembler::notx(Register dst, Register src, RCBit r) {
+  x_form(EXT2 | NORX, dst, src, src, r);
+}
+
+
+void Assembler::ori(Register ra, Register rs, const Operand& imm) {
+  d_form(ORI, rs, ra, imm.imm_, false);
+}
+
+
+void Assembler::oris(Register dst, Register src, const Operand& imm) {
+  d_form(ORIS, src, dst, imm.imm_, false);
+}
+
+
+void Assembler::orx(Register dst, Register src1, Register src2, RCBit rc) {
+  x_form(EXT2 | ORX, dst, src1, src2, rc);
+}
+
+
+void Assembler::cmpi(Register src1, const Operand& src2, CRegister cr) {
+  intptr_t imm16 = src2.imm_;
+#if V8_TARGET_ARCH_PPC64
+  int L = 1;
+#else
+  int L = 0;
+#endif
+  DCHECK(is_int16(imm16));
+  DCHECK(cr.code() >= 0 && cr.code() <= 7);
+  imm16 &= kImm16Mask;
+  emit(CMPI | cr.code() * B23 | L * B21 | src1.code() * B16 | imm16);
+}
+
+
+void Assembler::cmpli(Register src1, const Operand& src2, CRegister cr) {
+  uintptr_t uimm16 = src2.imm_;
+#if V8_TARGET_ARCH_PPC64
+  int L = 1;
+#else
+  int L = 0;
+#endif
+  DCHECK(is_uint16(uimm16));
+  DCHECK(cr.code() >= 0 && cr.code() <= 7);
+  uimm16 &= kImm16Mask;
+  emit(CMPLI | cr.code() * B23 | L * B21 | src1.code() * B16 | uimm16);
+}
+
+
+void Assembler::cmp(Register src1, Register src2, CRegister cr) {
+#if V8_TARGET_ARCH_PPC64
+  int L = 1;
+#else
+  int L = 0;
+#endif
+  DCHECK(cr.code() >= 0 && cr.code() <= 7);
+  emit(EXT2 | CMP | cr.code() * B23 | L * B21 | src1.code() * B16 |
+       src2.code() * B11);
+}
+
+
+void Assembler::cmpl(Register src1, Register src2, CRegister cr) {
+#if V8_TARGET_ARCH_PPC64
+  int L = 1;
+#else
+  int L = 0;
+#endif
+  DCHECK(cr.code() >= 0 && cr.code() <= 7);
+  emit(EXT2 | CMPL | cr.code() * B23 | L * B21 | src1.code() * B16 |
+       src2.code() * B11);
+}
+
+
+void Assembler::cmpwi(Register src1, const Operand& src2, CRegister cr) {
+  intptr_t imm16 = src2.imm_;
+  int L = 0;
+  DCHECK(is_int16(imm16));
+  DCHECK(cr.code() >= 0 && cr.code() <= 7);
+  imm16 &= kImm16Mask;
+  emit(CMPI | cr.code() * B23 | L * B21 | src1.code() * B16 | imm16);
+}
+
+
+void Assembler::cmplwi(Register src1, const Operand& src2, CRegister cr) {
+  uintptr_t uimm16 = src2.imm_;
+  int L = 0;
+  DCHECK(is_uint16(uimm16));
+  DCHECK(cr.code() >= 0 && cr.code() <= 7);
+  uimm16 &= kImm16Mask;
+  emit(CMPLI | cr.code() * B23 | L * B21 | src1.code() * B16 | uimm16);
+}
+
+
+void Assembler::cmpw(Register src1, Register src2, CRegister cr) {
+  int L = 0;
+  DCHECK(cr.code() >= 0 && cr.code() <= 7);
+  emit(EXT2 | CMP | cr.code() * B23 | L * B21 | src1.code() * B16 |
+       src2.code() * B11);
+}
+
+
+void Assembler::cmplw(Register src1, Register src2, CRegister cr) {
+  int L = 0;
+  DCHECK(cr.code() >= 0 && cr.code() <= 7);
+  emit(EXT2 | CMPL | cr.code() * B23 | L * B21 | src1.code() * B16 |
+       src2.code() * B11);
+}
+
+
+// Pseudo op - load immediate
+void Assembler::li(Register dst, const Operand& imm) {
+  d_form(ADDI, dst, r0, imm.imm_, true);
+}
+
+
+void Assembler::lis(Register dst, const Operand& imm) {
+  d_form(ADDIS, dst, r0, imm.imm_, true);
+}
+
+
+// Pseudo op - move register
+void Assembler::mr(Register dst, Register src) {
+  // actually or(dst, src, src)
+  orx(dst, src, src);
+}
+
+
+void Assembler::lbz(Register dst, const MemOperand& src) {
+  DCHECK(!src.ra_.is(r0));
+  d_form(LBZ, dst, src.ra(), src.offset(), true);
+}
+
+
+void Assembler::lbzx(Register rt, const MemOperand& src) {
+  Register ra = src.ra();
+  Register rb = src.rb();
+  DCHECK(!ra.is(r0));
+  emit(EXT2 | LBZX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
+       LeaveRC);
+}
+
+
+void Assembler::lbzux(Register rt, const MemOperand& src) {
+  Register ra = src.ra();
+  Register rb = src.rb();
+  DCHECK(!ra.is(r0));
+  emit(EXT2 | LBZUX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
+       LeaveRC);
+}
+
+
+void Assembler::lhz(Register dst, const MemOperand& src) {
+  DCHECK(!src.ra_.is(r0));
+  d_form(LHZ, dst, src.ra(), src.offset(), true);
+}
+
+
+void Assembler::lhzx(Register rt, const MemOperand& src) {
+  Register ra = src.ra();
+  Register rb = src.rb();
+  DCHECK(!ra.is(r0));
+  emit(EXT2 | LHZX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
+       LeaveRC);
+}
+
+
+void Assembler::lhzux(Register rt, const MemOperand& src) {
+  Register ra = src.ra();
+  Register rb = src.rb();
+  DCHECK(!ra.is(r0));
+  emit(EXT2 | LHZUX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
+       LeaveRC);
+}
+
+
+void Assembler::lwz(Register dst, const MemOperand& src) {
+  DCHECK(!src.ra_.is(r0));
+  d_form(LWZ, dst, src.ra(), src.offset(), true);
+}
+
+
+void Assembler::lwzu(Register dst, const MemOperand& src) {
+  DCHECK(!src.ra_.is(r0));
+  d_form(LWZU, dst, src.ra(), src.offset(), true);
+}
+
+
+void Assembler::lwzx(Register rt, const MemOperand& src) {
+  Register ra = src.ra();
+  Register rb = src.rb();
+  DCHECK(!ra.is(r0));
+  emit(EXT2 | LWZX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
+       LeaveRC);
+}
+
+
+void Assembler::lwzux(Register rt, const MemOperand& src) {
+  Register ra = src.ra();
+  Register rb = src.rb();
+  DCHECK(!ra.is(r0));
+  emit(EXT2 | LWZUX | rt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
+       LeaveRC);
+}
+
+
+void Assembler::lwa(Register dst, const MemOperand& src) {
+#if V8_TARGET_ARCH_PPC64
+  int offset = src.offset();
+  DCHECK(!src.ra_.is(r0));
+  DCHECK(!(offset & 3) && is_int16(offset));
+  offset = kImm16Mask & offset;
+  emit(LD | dst.code() * B21 | src.ra().code() * B16 | offset | 2);
+#else
+  lwz(dst, src);
+#endif
+}
+
+
+void Assembler::stb(Register dst, const MemOperand& src) {
+  DCHECK(!src.ra_.is(r0));
+  d_form(STB, dst, src.ra(), src.offset(), true);
+}
+
+
+void Assembler::stbx(Register rs, const MemOperand& src) {
+  Register ra = src.ra();
+  Register rb = src.rb();
+  DCHECK(!ra.is(r0));
+  emit(EXT2 | STBX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
+       LeaveRC);
+}
+
+
+void Assembler::stbux(Register rs, const MemOperand& src) {
+  Register ra = src.ra();
+  Register rb = src.rb();
+  DCHECK(!ra.is(r0));
+  emit(EXT2 | STBUX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
+       LeaveRC);
+}
+
+
+void Assembler::sth(Register dst, const MemOperand& src) {
+  DCHECK(!src.ra_.is(r0));
+  d_form(STH, dst, src.ra(), src.offset(), true);
+}
+
+
+void Assembler::sthx(Register rs, const MemOperand& src) {
+  Register ra = src.ra();
+  Register rb = src.rb();
+  DCHECK(!ra.is(r0));
+  emit(EXT2 | STHX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
+       LeaveRC);
+}
+
+
+void Assembler::sthux(Register rs, const MemOperand& src) {
+  Register ra = src.ra();
+  Register rb = src.rb();
+  DCHECK(!ra.is(r0));
+  emit(EXT2 | STHUX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
+       LeaveRC);
+}
+
+
+void Assembler::stw(Register dst, const MemOperand& src) {
+  DCHECK(!src.ra_.is(r0));
+  d_form(STW, dst, src.ra(), src.offset(), true);
+}
+
+
+void Assembler::stwu(Register dst, const MemOperand& src) {
+  DCHECK(!src.ra_.is(r0));
+  d_form(STWU, dst, src.ra(), src.offset(), true);
+}
+
+
+void Assembler::stwx(Register rs, const MemOperand& src) {
+  Register ra = src.ra();
+  Register rb = src.rb();
+  DCHECK(!ra.is(r0));
+  emit(EXT2 | STWX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
+       LeaveRC);
+}
+
+
+void Assembler::stwux(Register rs, const MemOperand& src) {
+  Register ra = src.ra();
+  Register rb = src.rb();
+  DCHECK(!ra.is(r0));
+  emit(EXT2 | STWUX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
+       LeaveRC);
+}
+
+
+void Assembler::extsb(Register rs, Register ra, RCBit rc) {
+  emit(EXT2 | EXTSB | ra.code() * B21 | rs.code() * B16 | rc);
+}
+
+
+void Assembler::extsh(Register rs, Register ra, RCBit rc) {
+  emit(EXT2 | EXTSH | ra.code() * B21 | rs.code() * B16 | rc);
+}
+
+
+void Assembler::neg(Register rt, Register ra, OEBit o, RCBit r) {
+  emit(EXT2 | NEGX | rt.code() * B21 | ra.code() * B16 | o | r);
+}
+
+
+void Assembler::andc(Register dst, Register src1, Register src2, RCBit rc) {
+  x_form(EXT2 | ANDCX, dst, src1, src2, rc);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+// 64bit specific instructions
+void Assembler::ld(Register rd, const MemOperand& src) {
+  int offset = src.offset();
+  DCHECK(!src.ra_.is(r0));
+  DCHECK(!(offset & 3) && is_int16(offset));
+  offset = kImm16Mask & offset;
+  emit(LD | rd.code() * B21 | src.ra().code() * B16 | offset);
+}
+
+
+void Assembler::ldx(Register rd, const MemOperand& src) {
+  Register ra = src.ra();
+  Register rb = src.rb();
+  DCHECK(!ra.is(r0));
+  emit(EXT2 | LDX | rd.code() * B21 | ra.code() * B16 | rb.code() * B11);
+}
+
+
+void Assembler::ldu(Register rd, const MemOperand& src) {
+  int offset = src.offset();
+  DCHECK(!src.ra_.is(r0));
+  DCHECK(!(offset & 3) && is_int16(offset));
+  offset = kImm16Mask & offset;
+  emit(LD | rd.code() * B21 | src.ra().code() * B16 | offset | 1);
+}
+
+
+void Assembler::ldux(Register rd, const MemOperand& src) {
+  Register ra = src.ra();
+  Register rb = src.rb();
+  DCHECK(!ra.is(r0));
+  emit(EXT2 | LDUX | rd.code() * B21 | ra.code() * B16 | rb.code() * B11);
+}
+
+
+void Assembler::std(Register rs, const MemOperand& src) {
+  int offset = src.offset();
+  DCHECK(!src.ra_.is(r0));
+  DCHECK(!(offset & 3) && is_int16(offset));
+  offset = kImm16Mask & offset;
+  emit(STD | rs.code() * B21 | src.ra().code() * B16 | offset);
+}
+
+
+void Assembler::stdx(Register rs, const MemOperand& src) {
+  Register ra = src.ra();
+  Register rb = src.rb();
+  DCHECK(!ra.is(r0));
+  emit(EXT2 | STDX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11);
+}
+
+
+void Assembler::stdu(Register rs, const MemOperand& src) {
+  int offset = src.offset();
+  DCHECK(!src.ra_.is(r0));
+  DCHECK(!(offset & 3) && is_int16(offset));
+  offset = kImm16Mask & offset;
+  emit(STD | rs.code() * B21 | src.ra().code() * B16 | offset | 1);
+}
+
+
+void Assembler::stdux(Register rs, const MemOperand& src) {
+  Register ra = src.ra();
+  Register rb = src.rb();
+  DCHECK(!ra.is(r0));
+  emit(EXT2 | STDUX | rs.code() * B21 | ra.code() * B16 | rb.code() * B11);
+}
+
+
+void Assembler::rldic(Register ra, Register rs, int sh, int mb, RCBit r) {
+  md_form(EXT5 | RLDIC, ra, rs, sh, mb, r);
+}
+
+
+void Assembler::rldicl(Register ra, Register rs, int sh, int mb, RCBit r) {
+  md_form(EXT5 | RLDICL, ra, rs, sh, mb, r);
+}
+
+
+void Assembler::rldcl(Register ra, Register rs, Register rb, int mb, RCBit r) {
+  mds_form(EXT5 | RLDCL, ra, rs, rb, mb, r);
+}
+
+
+void Assembler::rldicr(Register ra, Register rs, int sh, int me, RCBit r) {
+  md_form(EXT5 | RLDICR, ra, rs, sh, me, r);
+}
+
+
+void Assembler::sldi(Register dst, Register src, const Operand& val, RCBit rc) {
+  DCHECK((64 > val.imm_) && (val.imm_ >= 0));
+  rldicr(dst, src, val.imm_, 63 - val.imm_, rc);
+}
+
+
+void Assembler::srdi(Register dst, Register src, const Operand& val, RCBit rc) {
+  DCHECK((64 > val.imm_) && (val.imm_ >= 0));
+  rldicl(dst, src, 64 - val.imm_, val.imm_, rc);
+}
+
+
+void Assembler::clrrdi(Register dst, Register src, const Operand& val,
+                       RCBit rc) {
+  DCHECK((64 > val.imm_) && (val.imm_ >= 0));
+  rldicr(dst, src, 0, 63 - val.imm_, rc);
+}
+
+
+void Assembler::clrldi(Register dst, Register src, const Operand& val,
+                       RCBit rc) {
+  DCHECK((64 > val.imm_) && (val.imm_ >= 0));
+  rldicl(dst, src, 0, val.imm_, rc);
+}
+
+
+void Assembler::rldimi(Register ra, Register rs, int sh, int mb, RCBit r) {
+  md_form(EXT5 | RLDIMI, ra, rs, sh, mb, r);
+}
+
+
+void Assembler::sradi(Register ra, Register rs, int sh, RCBit r) {
+  int sh0_4 = sh & 0x1f;
+  int sh5 = (sh >> 5) & 0x1;
+
+  emit(EXT2 | SRADIX | rs.code() * B21 | ra.code() * B16 | sh0_4 * B11 |
+       sh5 * B1 | r);
+}
+
+
+void Assembler::srd(Register dst, Register src1, Register src2, RCBit r) {
+  x_form(EXT2 | SRDX, dst, src1, src2, r);
+}
+
+
+void Assembler::sld(Register dst, Register src1, Register src2, RCBit r) {
+  x_form(EXT2 | SLDX, dst, src1, src2, r);
+}
+
+
+void Assembler::srad(Register ra, Register rs, Register rb, RCBit r) {
+  x_form(EXT2 | SRAD, ra, rs, rb, r);
+}
+
+
+void Assembler::rotld(Register ra, Register rs, Register rb, RCBit r) {
+  rldcl(ra, rs, rb, 0, r);
+}
+
+
+void Assembler::rotldi(Register ra, Register rs, int sh, RCBit r) {
+  rldicl(ra, rs, sh, 0, r);
+}
+
+
+void Assembler::rotrdi(Register ra, Register rs, int sh, RCBit r) {
+  rldicl(ra, rs, 64 - sh, 0, r);
+}
+
+
+void Assembler::cntlzd_(Register ra, Register rs, RCBit rc) {
+  x_form(EXT2 | CNTLZDX, ra, rs, r0, rc);
+}
+
+
+void Assembler::extsw(Register rs, Register ra, RCBit rc) {
+  emit(EXT2 | EXTSW | ra.code() * B21 | rs.code() * B16 | rc);
+}
+
+
+void Assembler::mulld(Register dst, Register src1, Register src2, OEBit o,
+                      RCBit r) {
+  xo_form(EXT2 | MULLD, dst, src1, src2, o, r);
+}
+
+
+void Assembler::divd(Register dst, Register src1, Register src2, OEBit o,
+                     RCBit r) {
+  xo_form(EXT2 | DIVD, dst, src1, src2, o, r);
+}
+#endif
+
+
+void Assembler::fake_asm(enum FAKE_OPCODE_T fopcode) {
+  DCHECK(fopcode < fLastFaker);
+  emit(FAKE_OPCODE | FAKER_SUBOPCODE | fopcode);
+}
+
+
+void Assembler::marker_asm(int mcode) {
+  if (::v8::internal::FLAG_trace_sim_stubs) {
+    DCHECK(mcode < F_NEXT_AVAILABLE_STUB_MARKER);
+    emit(FAKE_OPCODE | MARKER_SUBOPCODE | mcode);
+  }
+}
+
+
+// Function descriptor for AIX.
+// Code address skips the function descriptor "header".
+// TOC and static chain are ignored and set to 0.
+void Assembler::function_descriptor() {
+  DCHECK(pc_offset() == 0);
+  RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
+  emit_ptr(reinterpret_cast<uintptr_t>(pc_) + 3 * kPointerSize);
+  emit_ptr(0);
+  emit_ptr(0);
+}
+
+
+#if ABI_USES_FUNCTION_DESCRIPTORS || V8_OOL_CONSTANT_POOL
+void Assembler::RelocateInternalReference(Address pc, intptr_t delta,
+                                          Address code_start,
+                                          ICacheFlushMode icache_flush_mode) {
+  DCHECK(delta || code_start);
+#if ABI_USES_FUNCTION_DESCRIPTORS
+  uintptr_t* fd = reinterpret_cast<uintptr_t*>(pc);
+  if (fd[1] == 0 && fd[2] == 0) {
+    // Function descriptor
+    if (delta) {
+      fd[0] += delta;
+    } else {
+      fd[0] = reinterpret_cast<uintptr_t>(code_start) + 3 * kPointerSize;
+    }
+    return;
+  }
+#endif
+#if V8_OOL_CONSTANT_POOL
+  // mov for LoadConstantPoolPointerRegister
+  ConstantPoolArray* constant_pool = NULL;
+  if (delta) {
+    code_start = target_address_at(pc, constant_pool) + delta;
+  }
+  set_target_address_at(pc, constant_pool, code_start, icache_flush_mode);
+#endif
+}
+
+
+int Assembler::DecodeInternalReference(Vector<char> buffer, Address pc) {
+#if ABI_USES_FUNCTION_DESCRIPTORS
+  uintptr_t* fd = reinterpret_cast<uintptr_t*>(pc);
+  if (fd[1] == 0 && fd[2] == 0) {
+    // Function descriptor
+    SNPrintF(buffer, "[%08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR
+                     "]"
+                     "   function descriptor",
+             fd[0], fd[1], fd[2]);
+    return kPointerSize * 3;
+  }
+#endif
+  return 0;
+}
+#endif
+
+
+int Assembler::instructions_required_for_mov(const Operand& x) const {
+#if V8_OOL_CONSTANT_POOL || DEBUG
+  bool canOptimize =
+      !(x.must_output_reloc_info(this) || is_trampoline_pool_blocked());
+#endif
+#if V8_OOL_CONSTANT_POOL
+  if (use_constant_pool_for_mov(x, canOptimize)) {
+    // Current usage guarantees that all constant pool references can
+    // use the same sequence.
+    return kMovInstructionsConstantPool;
+  }
+#endif
+  DCHECK(!canOptimize);
+  return kMovInstructionsNoConstantPool;
+}
+
+
+#if V8_OOL_CONSTANT_POOL
+bool Assembler::use_constant_pool_for_mov(const Operand& x,
+                                          bool canOptimize) const {
+  if (!is_ool_constant_pool_available() || is_constant_pool_full()) {
+    // If there is no constant pool available, we must use a mov
+    // immediate sequence.
+    return false;
+  }
+
+  intptr_t value = x.immediate();
+  if (canOptimize && is_int16(value)) {
+    // Prefer a single-instruction load-immediate.
+    return false;
+  }
+
+  return true;
+}
+
+
+void Assembler::EnsureSpaceFor(int space_needed) {
+  if (buffer_space() <= (kGap + space_needed)) {
+    GrowBuffer();
+  }
+}
+#endif
+
+
+bool Operand::must_output_reloc_info(const Assembler* assembler) const {
+  if (rmode_ == RelocInfo::EXTERNAL_REFERENCE) {
+    if (assembler != NULL && assembler->predictable_code_size()) return true;
+    return assembler->serializer_enabled();
+  } else if (RelocInfo::IsNone(rmode_)) {
+    return false;
+  }
+  return true;
+}
+
+
+// Primarily used for loading constants
+// This should really move to be in macro-assembler as it
+// is really a pseudo instruction
+// Some usages of this intend for a FIXED_SEQUENCE to be used
+// Todo - break this dependency so we can optimize mov() in general
+// and only use the generic version when we require a fixed sequence
+void Assembler::mov(Register dst, const Operand& src) {
+  intptr_t value = src.immediate();
+  bool canOptimize;
+  RelocInfo rinfo(pc_, src.rmode_, value, NULL);
+
+  if (src.must_output_reloc_info(this)) {
+    RecordRelocInfo(rinfo);
+  }
+
+  canOptimize =
+      !(src.must_output_reloc_info(this) || is_trampoline_pool_blocked());
+
+#if V8_OOL_CONSTANT_POOL
+  if (use_constant_pool_for_mov(src, canOptimize)) {
+    DCHECK(is_ool_constant_pool_available());
+    ConstantPoolAddEntry(rinfo);
+#if V8_TARGET_ARCH_PPC64
+    BlockTrampolinePoolScope block_trampoline_pool(this);
+    // We are forced to use 2 instruction sequence since the constant
+    // pool pointer is tagged.
+    li(dst, Operand::Zero());
+    ldx(dst, MemOperand(kConstantPoolRegister, dst));
+#else
+    lwz(dst, MemOperand(kConstantPoolRegister, 0));
+#endif
+    return;
+  }
+#endif
+
+  if (canOptimize) {
+    if (is_int16(value)) {
+      li(dst, Operand(value));
+    } else {
+      uint16_t u16;
+#if V8_TARGET_ARCH_PPC64
+      if (is_int32(value)) {
+#endif
+        lis(dst, Operand(value >> 16));
+#if V8_TARGET_ARCH_PPC64
+      } else {
+        if (is_int48(value)) {
+          li(dst, Operand(value >> 32));
+        } else {
+          lis(dst, Operand(value >> 48));
+          u16 = ((value >> 32) & 0xffff);
+          if (u16) {
+            ori(dst, dst, Operand(u16));
+          }
+        }
+        sldi(dst, dst, Operand(32));
+        u16 = ((value >> 16) & 0xffff);
+        if (u16) {
+          oris(dst, dst, Operand(u16));
+        }
+      }
+#endif
+      u16 = (value & 0xffff);
+      if (u16) {
+        ori(dst, dst, Operand(u16));
+      }
+    }
+    return;
+  }
+
+  DCHECK(!canOptimize);
+
+  {
+    BlockTrampolinePoolScope block_trampoline_pool(this);
+#if V8_TARGET_ARCH_PPC64
+    int32_t hi_32 = static_cast<int32_t>(value >> 32);
+    int32_t lo_32 = static_cast<int32_t>(value);
+    int hi_word = static_cast<int>(hi_32 >> 16);
+    int lo_word = static_cast<int>(hi_32 & 0xffff);
+    lis(dst, Operand(SIGN_EXT_IMM16(hi_word)));
+    ori(dst, dst, Operand(lo_word));
+    sldi(dst, dst, Operand(32));
+    hi_word = static_cast<int>(((lo_32 >> 16) & 0xffff));
+    lo_word = static_cast<int>(lo_32 & 0xffff);
+    oris(dst, dst, Operand(hi_word));
+    ori(dst, dst, Operand(lo_word));
+#else
+    int hi_word = static_cast<int>(value >> 16);
+    int lo_word = static_cast<int>(value & 0xffff);
+    lis(dst, Operand(SIGN_EXT_IMM16(hi_word)));
+    ori(dst, dst, Operand(lo_word));
+#endif
+  }
+}
+
+
+void Assembler::mov_label_offset(Register dst, Label* label) {
+  if (label->is_bound()) {
+    int target = label->pos();
+    mov(dst, Operand(target + Code::kHeaderSize - kHeapObjectTag));
+  } else {
+    bool is_linked = label->is_linked();
+    // Emit the link to the label in the code stream followed by extra
+    // nop instructions.
+    DCHECK(dst.is(r3));  // target_at_put assumes r3 for now
+    int link = is_linked ? label->pos() - pc_offset() : 0;
+    label->link_to(pc_offset());
+
+    if (!is_linked && !trampoline_emitted_) {
+      unbound_labels_count_++;
+      next_buffer_check_ -= kTrampolineSlotsSize;
+    }
+
+    // When the label is bound, these instructions will be patched
+    // with a 2 instruction mov sequence that will load the
+    // destination register with the position of the label from the
+    // beginning of the code.
+    //
+    // When the label gets bound: target_at extracts the link and
+    // target_at_put patches the instructions.
+    BlockTrampolinePoolScope block_trampoline_pool(this);
+    emit(link);
+    nop();
+  }
+}
+
+
+// Special register instructions
+void Assembler::crxor(int bt, int ba, int bb) {
+  emit(EXT1 | CRXOR | bt * B21 | ba * B16 | bb * B11);
+}
+
+
+void Assembler::creqv(int bt, int ba, int bb) {
+  emit(EXT1 | CREQV | bt * B21 | ba * B16 | bb * B11);
+}
+
+
+void Assembler::mflr(Register dst) {
+  emit(EXT2 | MFSPR | dst.code() * B21 | 256 << 11);  // Ignore RC bit
+}
+
+
+void Assembler::mtlr(Register src) {
+  emit(EXT2 | MTSPR | src.code() * B21 | 256 << 11);  // Ignore RC bit
+}
+
+
+void Assembler::mtctr(Register src) {
+  emit(EXT2 | MTSPR | src.code() * B21 | 288 << 11);  // Ignore RC bit
+}
+
+
+void Assembler::mtxer(Register src) {
+  emit(EXT2 | MTSPR | src.code() * B21 | 32 << 11);
+}
+
+
+void Assembler::mcrfs(int bf, int bfa) {
+  emit(EXT4 | MCRFS | bf * B23 | bfa * B18);
+}
+
+
+void Assembler::mfcr(Register dst) { emit(EXT2 | MFCR | dst.code() * B21); }
+
+
+#if V8_TARGET_ARCH_PPC64
+void Assembler::mffprd(Register dst, DoubleRegister src) {
+  emit(EXT2 | MFVSRD | src.code() * B21 | dst.code() * B16);
+}
+
+
+void Assembler::mffprwz(Register dst, DoubleRegister src) {
+  emit(EXT2 | MFVSRWZ | src.code() * B21 | dst.code() * B16);
+}
+
+
+void Assembler::mtfprd(DoubleRegister dst, Register src) {
+  emit(EXT2 | MTVSRD | dst.code() * B21 | src.code() * B16);
+}
+
+
+void Assembler::mtfprwz(DoubleRegister dst, Register src) {
+  emit(EXT2 | MTVSRWZ | dst.code() * B21 | src.code() * B16);
+}
+
+
+void Assembler::mtfprwa(DoubleRegister dst, Register src) {
+  emit(EXT2 | MTVSRWA | dst.code() * B21 | src.code() * B16);
+}
+#endif
+
+
+// Exception-generating instructions and debugging support.
+// Stops with a non-negative code less than kNumOfWatchedStops support
+// enabling/disabling and a counter feature. See simulator-ppc.h .
+void Assembler::stop(const char* msg, Condition cond, int32_t code,
+                     CRegister cr) {
+  if (cond != al) {
+    Label skip;
+    b(NegateCondition(cond), &skip, cr);
+    bkpt(0);
+    bind(&skip);
+  } else {
+    bkpt(0);
+  }
+}
+
+
+void Assembler::bkpt(uint32_t imm16) { emit(0x7d821008); }
+
+
+void Assembler::info(const char* msg, Condition cond, int32_t code,
+                     CRegister cr) {
+  if (::v8::internal::FLAG_trace_sim_stubs) {
+    emit(0x7d9ff808);
+#if V8_TARGET_ARCH_PPC64
+    uint64_t value = reinterpret_cast<uint64_t>(msg);
+    emit(static_cast<uint32_t>(value >> 32));
+    emit(static_cast<uint32_t>(value & 0xFFFFFFFF));
+#else
+    emit(reinterpret_cast<Instr>(msg));
+#endif
+  }
+}
+
+
+void Assembler::dcbf(Register ra, Register rb) {
+  emit(EXT2 | DCBF | ra.code() * B16 | rb.code() * B11);
+}
+
+
+void Assembler::sync() { emit(EXT2 | SYNC); }
+
+
+void Assembler::lwsync() { emit(EXT2 | SYNC | 1 * B21); }
+
+
+void Assembler::icbi(Register ra, Register rb) {
+  emit(EXT2 | ICBI | ra.code() * B16 | rb.code() * B11);
+}
+
+
+void Assembler::isync() { emit(EXT1 | ISYNC); }
+
+
+// Floating point support
+
+void Assembler::lfd(const DoubleRegister frt, const MemOperand& src) {
+  int offset = src.offset();
+  Register ra = src.ra();
+  DCHECK(is_int16(offset));
+  int imm16 = offset & kImm16Mask;
+  // could be x_form instruction with some casting magic
+  emit(LFD | frt.code() * B21 | ra.code() * B16 | imm16);
+}
+
+
+void Assembler::lfdu(const DoubleRegister frt, const MemOperand& src) {
+  int offset = src.offset();
+  Register ra = src.ra();
+  DCHECK(is_int16(offset));
+  int imm16 = offset & kImm16Mask;
+  // could be x_form instruction with some casting magic
+  emit(LFDU | frt.code() * B21 | ra.code() * B16 | imm16);
+}
+
+
+void Assembler::lfdx(const DoubleRegister frt, const MemOperand& src) {
+  Register ra = src.ra();
+  Register rb = src.rb();
+  DCHECK(!ra.is(r0));
+  emit(EXT2 | LFDX | frt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
+       LeaveRC);
+}
+
+
+void Assembler::lfdux(const DoubleRegister frt, const MemOperand& src) {
+  Register ra = src.ra();
+  Register rb = src.rb();
+  DCHECK(!ra.is(r0));
+  emit(EXT2 | LFDUX | frt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
+       LeaveRC);
+}
+
+
+void Assembler::lfs(const DoubleRegister frt, const MemOperand& src) {
+  int offset = src.offset();
+  Register ra = src.ra();
+  DCHECK(is_int16(offset));
+  DCHECK(!ra.is(r0));
+  int imm16 = offset & kImm16Mask;
+  // could be x_form instruction with some casting magic
+  emit(LFS | frt.code() * B21 | ra.code() * B16 | imm16);
+}
+
+
+void Assembler::lfsu(const DoubleRegister frt, const MemOperand& src) {
+  int offset = src.offset();
+  Register ra = src.ra();
+  DCHECK(is_int16(offset));
+  DCHECK(!ra.is(r0));
+  int imm16 = offset & kImm16Mask;
+  // could be x_form instruction with some casting magic
+  emit(LFSU | frt.code() * B21 | ra.code() * B16 | imm16);
+}
+
+
+void Assembler::lfsx(const DoubleRegister frt, const MemOperand& src) {
+  Register ra = src.ra();
+  Register rb = src.rb();
+  DCHECK(!ra.is(r0));
+  emit(EXT2 | LFSX | frt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
+       LeaveRC);
+}
+
+
+void Assembler::lfsux(const DoubleRegister frt, const MemOperand& src) {
+  Register ra = src.ra();
+  Register rb = src.rb();
+  DCHECK(!ra.is(r0));
+  emit(EXT2 | LFSUX | frt.code() * B21 | ra.code() * B16 | rb.code() * B11 |
+       LeaveRC);
+}
+
+
+void Assembler::stfd(const DoubleRegister frs, const MemOperand& src) {
+  int offset = src.offset();
+  Register ra = src.ra();
+  DCHECK(is_int16(offset));
+  DCHECK(!ra.is(r0));
+  int imm16 = offset & kImm16Mask;
+  // could be x_form instruction with some casting magic
+  emit(STFD | frs.code() * B21 | ra.code() * B16 | imm16);
+}
+
+
+void Assembler::stfdu(const DoubleRegister frs, const MemOperand& src) {
+  int offset = src.offset();
+  Register ra = src.ra();
+  DCHECK(is_int16(offset));
+  DCHECK(!ra.is(r0));
+  int imm16 = offset & kImm16Mask;
+  // could be x_form instruction with some casting magic
+  emit(STFDU | frs.code() * B21 | ra.code() * B16 | imm16);
+}
+
+
+void Assembler::stfdx(const DoubleRegister frs, const MemOperand& src) {
+  Register ra = src.ra();
+  Register rb = src.rb();
+  DCHECK(!ra.is(r0));
+  emit(EXT2 | STFDX | frs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
+       LeaveRC);
+}
+
+
+void Assembler::stfdux(const DoubleRegister frs, const MemOperand& src) {
+  Register ra = src.ra();
+  Register rb = src.rb();
+  DCHECK(!ra.is(r0));
+  emit(EXT2 | STFDUX | frs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
+       LeaveRC);
+}
+
+
+void Assembler::stfs(const DoubleRegister frs, const MemOperand& src) {
+  int offset = src.offset();
+  Register ra = src.ra();
+  DCHECK(is_int16(offset));
+  DCHECK(!ra.is(r0));
+  int imm16 = offset & kImm16Mask;
+  // could be x_form instruction with some casting magic
+  emit(STFS | frs.code() * B21 | ra.code() * B16 | imm16);
+}
+
+
+void Assembler::stfsu(const DoubleRegister frs, const MemOperand& src) {
+  int offset = src.offset();
+  Register ra = src.ra();
+  DCHECK(is_int16(offset));
+  DCHECK(!ra.is(r0));
+  int imm16 = offset & kImm16Mask;
+  // could be x_form instruction with some casting magic
+  emit(STFSU | frs.code() * B21 | ra.code() * B16 | imm16);
+}
+
+
+void Assembler::stfsx(const DoubleRegister frs, const MemOperand& src) {
+  Register ra = src.ra();
+  Register rb = src.rb();
+  DCHECK(!ra.is(r0));
+  emit(EXT2 | STFSX | frs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
+       LeaveRC);
+}
+
+
+void Assembler::stfsux(const DoubleRegister frs, const MemOperand& src) {
+  Register ra = src.ra();
+  Register rb = src.rb();
+  DCHECK(!ra.is(r0));
+  emit(EXT2 | STFSUX | frs.code() * B21 | ra.code() * B16 | rb.code() * B11 |
+       LeaveRC);
+}
+
+
+void Assembler::fsub(const DoubleRegister frt, const DoubleRegister fra,
+                     const DoubleRegister frb, RCBit rc) {
+  a_form(EXT4 | FSUB, frt, fra, frb, rc);
+}
+
+
+void Assembler::fadd(const DoubleRegister frt, const DoubleRegister fra,
+                     const DoubleRegister frb, RCBit rc) {
+  a_form(EXT4 | FADD, frt, fra, frb, rc);
+}
+
+
+void Assembler::fmul(const DoubleRegister frt, const DoubleRegister fra,
+                     const DoubleRegister frc, RCBit rc) {
+  emit(EXT4 | FMUL | frt.code() * B21 | fra.code() * B16 | frc.code() * B6 |
+       rc);
+}
+
+
+void Assembler::fdiv(const DoubleRegister frt, const DoubleRegister fra,
+                     const DoubleRegister frb, RCBit rc) {
+  a_form(EXT4 | FDIV, frt, fra, frb, rc);
+}
+
+
+void Assembler::fcmpu(const DoubleRegister fra, const DoubleRegister frb,
+                      CRegister cr) {
+  DCHECK(cr.code() >= 0 && cr.code() <= 7);
+  emit(EXT4 | FCMPU | cr.code() * B23 | fra.code() * B16 | frb.code() * B11);
+}
+
+
+void Assembler::fmr(const DoubleRegister frt, const DoubleRegister frb,
+                    RCBit rc) {
+  emit(EXT4 | FMR | frt.code() * B21 | frb.code() * B11 | rc);
+}
+
+
+void Assembler::fctiwz(const DoubleRegister frt, const DoubleRegister frb) {
+  emit(EXT4 | FCTIWZ | frt.code() * B21 | frb.code() * B11);
+}
+
+
+void Assembler::fctiw(const DoubleRegister frt, const DoubleRegister frb) {
+  emit(EXT4 | FCTIW | frt.code() * B21 | frb.code() * B11);
+}
+
+
+void Assembler::frim(const DoubleRegister frt, const DoubleRegister frb) {
+  emit(EXT4 | FRIM | frt.code() * B21 | frb.code() * B11);
+}
+
+
+void Assembler::frsp(const DoubleRegister frt, const DoubleRegister frb,
+                     RCBit rc) {
+  emit(EXT4 | FRSP | frt.code() * B21 | frb.code() * B11 | rc);
+}
+
+
+void Assembler::fcfid(const DoubleRegister frt, const DoubleRegister frb,
+                      RCBit rc) {
+  emit(EXT4 | FCFID | frt.code() * B21 | frb.code() * B11 | rc);
+}
+
+
+void Assembler::fctid(const DoubleRegister frt, const DoubleRegister frb,
+                      RCBit rc) {
+  emit(EXT4 | FCTID | frt.code() * B21 | frb.code() * B11 | rc);
+}
+
+
+void Assembler::fctidz(const DoubleRegister frt, const DoubleRegister frb,
+                       RCBit rc) {
+  emit(EXT4 | FCTIDZ | frt.code() * B21 | frb.code() * B11 | rc);
+}
+
+
+void Assembler::fsel(const DoubleRegister frt, const DoubleRegister fra,
+                     const DoubleRegister frc, const DoubleRegister frb,
+                     RCBit rc) {
+  emit(EXT4 | FSEL | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 |
+       frc.code() * B6 | rc);
+}
+
+
+void Assembler::fneg(const DoubleRegister frt, const DoubleRegister frb,
+                     RCBit rc) {
+  emit(EXT4 | FNEG | frt.code() * B21 | frb.code() * B11 | rc);
+}
+
+
+void Assembler::mtfsfi(int bf, int immediate, RCBit rc) {
+  emit(EXT4 | MTFSFI | bf * B23 | immediate * B12 | rc);
+}
+
+
+void Assembler::mffs(const DoubleRegister frt, RCBit rc) {
+  emit(EXT4 | MFFS | frt.code() * B21 | rc);
+}
+
+
+void Assembler::mtfsf(const DoubleRegister frb, bool L, int FLM, bool W,
+                      RCBit rc) {
+  emit(EXT4 | MTFSF | frb.code() * B11 | W * B16 | FLM * B17 | L * B25 | rc);
+}
+
+
+void Assembler::fsqrt(const DoubleRegister frt, const DoubleRegister frb,
+                      RCBit rc) {
+  emit(EXT4 | FSQRT | frt.code() * B21 | frb.code() * B11 | rc);
+}
+
+
+void Assembler::fabs(const DoubleRegister frt, const DoubleRegister frb,
+                     RCBit rc) {
+  emit(EXT4 | FABS | frt.code() * B21 | frb.code() * B11 | rc);
+}
+
+
+void Assembler::fmadd(const DoubleRegister frt, const DoubleRegister fra,
+                      const DoubleRegister frc, const DoubleRegister frb,
+                      RCBit rc) {
+  emit(EXT4 | FMADD | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 |
+       frc.code() * B6 | rc);
+}
+
+
+void Assembler::fmsub(const DoubleRegister frt, const DoubleRegister fra,
+                      const DoubleRegister frc, const DoubleRegister frb,
+                      RCBit rc) {
+  emit(EXT4 | FMSUB | frt.code() * B21 | fra.code() * B16 | frb.code() * B11 |
+       frc.code() * B6 | rc);
+}
+
+
+// Pseudo instructions.
+void Assembler::nop(int type) {
+  Register reg = r0;
+  switch (type) {
+    case NON_MARKING_NOP:
+      reg = r0;
+      break;
+    case GROUP_ENDING_NOP:
+      reg = r2;
+      break;
+    case DEBUG_BREAK_NOP:
+      reg = r3;
+      break;
+    default:
+      UNIMPLEMENTED();
+  }
+
+  ori(reg, reg, Operand::Zero());
+}
+
+
+bool Assembler::IsNop(Instr instr, int type) {
+  int reg = 0;
+  switch (type) {
+    case NON_MARKING_NOP:
+      reg = 0;
+      break;
+    case GROUP_ENDING_NOP:
+      reg = 2;
+      break;
+    case DEBUG_BREAK_NOP:
+      reg = 3;
+      break;
+    default:
+      UNIMPLEMENTED();
+  }
+  return instr == (ORI | reg * B21 | reg * B16);
+}
+
+
+// Debugging.
+void Assembler::RecordJSReturn() {
+  positions_recorder()->WriteRecordedPositions();
+  CheckBuffer();
+  RecordRelocInfo(RelocInfo::JS_RETURN);
+}
+
+
+void Assembler::RecordDebugBreakSlot() {
+  positions_recorder()->WriteRecordedPositions();
+  CheckBuffer();
+  RecordRelocInfo(RelocInfo::DEBUG_BREAK_SLOT);
+}
+
+
+void Assembler::RecordComment(const char* msg) {
+  if (FLAG_code_comments) {
+    CheckBuffer();
+    RecordRelocInfo(RelocInfo::COMMENT, reinterpret_cast<intptr_t>(msg));
+  }
+}
+
+
+void Assembler::GrowBuffer() {
+  if (!own_buffer_) FATAL("external code buffer is too small");
+
+  // Compute new buffer size.
+  CodeDesc desc;  // the new buffer
+  if (buffer_size_ < 4 * KB) {
+    desc.buffer_size = 4 * KB;
+  } else if (buffer_size_ < 1 * MB) {
+    desc.buffer_size = 2 * buffer_size_;
+  } else {
+    desc.buffer_size = buffer_size_ + 1 * MB;
+  }
+  CHECK_GT(desc.buffer_size, 0);  // no overflow
+
+  // Set up new buffer.
+  desc.buffer = NewArray<byte>(desc.buffer_size);
+
+  desc.instr_size = pc_offset();
+  desc.reloc_size = (buffer_ + buffer_size_) - reloc_info_writer.pos();
+
+  // Copy the data.
+  intptr_t pc_delta = desc.buffer - buffer_;
+  intptr_t rc_delta =
+      (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
+  memmove(desc.buffer, buffer_, desc.instr_size);
+  memmove(reloc_info_writer.pos() + rc_delta, reloc_info_writer.pos(),
+          desc.reloc_size);
+
+  // Switch buffers.
+  DeleteArray(buffer_);
+  buffer_ = desc.buffer;
+  buffer_size_ = desc.buffer_size;
+  pc_ += pc_delta;
+  reloc_info_writer.Reposition(reloc_info_writer.pos() + rc_delta,
+                               reloc_info_writer.last_pc() + pc_delta);
+
+// None of our relocation types are pc relative pointing outside the code
+// buffer nor pc absolute pointing inside the code buffer, so there is no need
+// to relocate any emitted relocation entries.
+
+#if ABI_USES_FUNCTION_DESCRIPTORS || V8_OOL_CONSTANT_POOL
+  // Relocate runtime entries.
+  for (RelocIterator it(desc); !it.done(); it.next()) {
+    RelocInfo::Mode rmode = it.rinfo()->rmode();
+    if (rmode == RelocInfo::INTERNAL_REFERENCE) {
+      RelocateInternalReference(it.rinfo()->pc(), pc_delta, 0);
+    }
+  }
+#if V8_OOL_CONSTANT_POOL
+  constant_pool_builder_.Relocate(pc_delta);
+#endif
+#endif
+}
+
+
+void Assembler::db(uint8_t data) {
+  CheckBuffer();
+  *reinterpret_cast<uint8_t*>(pc_) = data;
+  pc_ += sizeof(uint8_t);
+}
+
+
+void Assembler::dd(uint32_t data) {
+  CheckBuffer();
+  *reinterpret_cast<uint32_t*>(pc_) = data;
+  pc_ += sizeof(uint32_t);
+}
+
+
+void Assembler::emit_ptr(uintptr_t data) {
+  CheckBuffer();
+  *reinterpret_cast<uintptr_t*>(pc_) = data;
+  pc_ += sizeof(uintptr_t);
+}
+
+
+void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
+  RelocInfo rinfo(pc_, rmode, data, NULL);
+  RecordRelocInfo(rinfo);
+}
+
+
+void Assembler::RecordRelocInfo(const RelocInfo& rinfo) {
+  if (rinfo.rmode() >= RelocInfo::JS_RETURN &&
+      rinfo.rmode() <= RelocInfo::DEBUG_BREAK_SLOT) {
+    // Adjust code for new modes.
+    DCHECK(RelocInfo::IsDebugBreakSlot(rinfo.rmode()) ||
+           RelocInfo::IsJSReturn(rinfo.rmode()) ||
+           RelocInfo::IsComment(rinfo.rmode()) ||
+           RelocInfo::IsPosition(rinfo.rmode()));
+  }
+  if (!RelocInfo::IsNone(rinfo.rmode())) {
+    // Don't record external references unless the heap will be serialized.
+    if (rinfo.rmode() == RelocInfo::EXTERNAL_REFERENCE) {
+      if (!serializer_enabled() && !emit_debug_code()) {
+        return;
+      }
+    }
+    DCHECK(buffer_space() >= kMaxRelocSize);  // too late to grow buffer here
+    if (rinfo.rmode() == RelocInfo::CODE_TARGET_WITH_ID) {
+      RelocInfo reloc_info_with_ast_id(rinfo.pc(), rinfo.rmode(),
+                                       RecordedAstId().ToInt(), NULL);
+      ClearRecordedAstId();
+      reloc_info_writer.Write(&reloc_info_with_ast_id);
+    } else {
+      reloc_info_writer.Write(&rinfo);
+    }
+  }
+}
+
+
+void Assembler::BlockTrampolinePoolFor(int instructions) {
+  BlockTrampolinePoolBefore(pc_offset() + instructions * kInstrSize);
+}
+
+
+void Assembler::CheckTrampolinePool() {
+  // Some small sequences of instructions must not be broken up by the
+  // insertion of a trampoline pool; such sequences are protected by setting
+  // either trampoline_pool_blocked_nesting_ or no_trampoline_pool_before_,
+  // which are both checked here. Also, recursive calls to CheckTrampolinePool
+  // are blocked by trampoline_pool_blocked_nesting_.
+  if ((trampoline_pool_blocked_nesting_ > 0) ||
+      (pc_offset() < no_trampoline_pool_before_)) {
+    // Emission is currently blocked; make sure we try again as soon as
+    // possible.
+    if (trampoline_pool_blocked_nesting_ > 0) {
+      next_buffer_check_ = pc_offset() + kInstrSize;
+    } else {
+      next_buffer_check_ = no_trampoline_pool_before_;
+    }
+    return;
+  }
+
+  DCHECK(!trampoline_emitted_);
+  DCHECK(unbound_labels_count_ >= 0);
+  if (unbound_labels_count_ > 0) {
+    // First we emit jump, then we emit trampoline pool.
+    {
+      BlockTrampolinePoolScope block_trampoline_pool(this);
+      Label after_pool;
+      b(&after_pool);
+
+      int pool_start = pc_offset();
+      for (int i = 0; i < unbound_labels_count_; i++) {
+        b(&after_pool);
+      }
+      bind(&after_pool);
+      trampoline_ = Trampoline(pool_start, unbound_labels_count_);
+
+      trampoline_emitted_ = true;
+      // As we are only going to emit trampoline once, we need to prevent any
+      // further emission.
+      next_buffer_check_ = kMaxInt;
+    }
+  } else {
+    // Number of branches to unbound label at this point is zero, so we can
+    // move next buffer check to maximum.
+    next_buffer_check_ =
+        pc_offset() + kMaxCondBranchReach - kMaxBlockTrampolineSectionSize;
+  }
+  return;
+}
+
+
+Handle<ConstantPoolArray> Assembler::NewConstantPool(Isolate* isolate) {
+#if V8_OOL_CONSTANT_POOL
+  return constant_pool_builder_.New(isolate);
+#else
+  // No out-of-line constant pool support.
+  DCHECK(!FLAG_enable_ool_constant_pool);
+  return isolate->factory()->empty_constant_pool_array();
+#endif
+}
+
+
+void Assembler::PopulateConstantPool(ConstantPoolArray* constant_pool) {
+#if V8_OOL_CONSTANT_POOL
+  constant_pool_builder_.Populate(this, constant_pool);
+#else
+  // No out-of-line constant pool support.
+  DCHECK(!FLAG_enable_ool_constant_pool);
+#endif
+}
+
+
+#if V8_OOL_CONSTANT_POOL
+ConstantPoolBuilder::ConstantPoolBuilder()
+    : size_(0),
+      entries_(),
+      current_section_(ConstantPoolArray::SMALL_SECTION) {}
+
+
+bool ConstantPoolBuilder::IsEmpty() { return entries_.size() == 0; }
+
+
+ConstantPoolArray::Type ConstantPoolBuilder::GetConstantPoolType(
+    RelocInfo::Mode rmode) {
+#if V8_TARGET_ARCH_PPC64
+  // We don't support 32-bit entries at this time.
+  if (!RelocInfo::IsGCRelocMode(rmode)) {
+    return ConstantPoolArray::INT64;
+#else
+  if (rmode == RelocInfo::NONE64) {
+    return ConstantPoolArray::INT64;
+  } else if (!RelocInfo::IsGCRelocMode(rmode)) {
+    return ConstantPoolArray::INT32;
+#endif
+  } else if (RelocInfo::IsCodeTarget(rmode)) {
+    return ConstantPoolArray::CODE_PTR;
+  } else {
+    DCHECK(RelocInfo::IsGCRelocMode(rmode) && !RelocInfo::IsCodeTarget(rmode));
+    return ConstantPoolArray::HEAP_PTR;
+  }
+}
+
+
+ConstantPoolArray::LayoutSection ConstantPoolBuilder::AddEntry(
+    Assembler* assm, const RelocInfo& rinfo) {
+  RelocInfo::Mode rmode = rinfo.rmode();
+  DCHECK(rmode != RelocInfo::COMMENT && rmode != RelocInfo::POSITION &&
+         rmode != RelocInfo::STATEMENT_POSITION &&
+         rmode != RelocInfo::CONST_POOL);
+
+  // Try to merge entries which won't be patched.
+  int merged_index = -1;
+  ConstantPoolArray::LayoutSection entry_section = current_section_;
+  if (RelocInfo::IsNone(rmode) ||
+      (!assm->serializer_enabled() && (rmode >= RelocInfo::CELL))) {
+    size_t i;
+    std::vector<ConstantPoolEntry>::const_iterator it;
+    for (it = entries_.begin(), i = 0; it != entries_.end(); it++, i++) {
+      if (RelocInfo::IsEqual(rinfo, it->rinfo_)) {
+        // Merge with found entry.
+        merged_index = i;
+        entry_section = entries_[i].section_;
+        break;
+      }
+    }
+  }
+  DCHECK(entry_section <= current_section_);
+  entries_.push_back(ConstantPoolEntry(rinfo, entry_section, merged_index));
+
+  if (merged_index == -1) {
+    // Not merged, so update the appropriate count.
+    number_of_entries_[entry_section].increment(GetConstantPoolType(rmode));
+  }
+
+  // Check if we still have room for another entry in the small section
+  // given the limitations of the header's layout fields.
+  if (current_section_ == ConstantPoolArray::SMALL_SECTION) {
+    size_ = ConstantPoolArray::SizeFor(*small_entries());
+    if (!is_uint12(size_)) {
+      current_section_ = ConstantPoolArray::EXTENDED_SECTION;
+    }
+  } else {
+    size_ = ConstantPoolArray::SizeForExtended(*small_entries(),
+                                               *extended_entries());
+  }
+
+  return entry_section;
+}
+
+
+void ConstantPoolBuilder::Relocate(intptr_t pc_delta) {
+  for (std::vector<ConstantPoolEntry>::iterator entry = entries_.begin();
+       entry != entries_.end(); entry++) {
+    DCHECK(entry->rinfo_.rmode() != RelocInfo::JS_RETURN);
+    entry->rinfo_.set_pc(entry->rinfo_.pc() + pc_delta);
+  }
+}
+
+
+Handle<ConstantPoolArray> ConstantPoolBuilder::New(Isolate* isolate) {
+  if (IsEmpty()) {
+    return isolate->factory()->empty_constant_pool_array();
+  } else if (extended_entries()->is_empty()) {
+    return isolate->factory()->NewConstantPoolArray(*small_entries());
+  } else {
+    DCHECK(current_section_ == ConstantPoolArray::EXTENDED_SECTION);
+    return isolate->factory()->NewExtendedConstantPoolArray(
+        *small_entries(), *extended_entries());
+  }
+}
+
+
+void ConstantPoolBuilder::Populate(Assembler* assm,
+                                   ConstantPoolArray* constant_pool) {
+  DCHECK_EQ(extended_entries()->is_empty(),
+            !constant_pool->is_extended_layout());
+  DCHECK(small_entries()->equals(ConstantPoolArray::NumberOfEntries(
+      constant_pool, ConstantPoolArray::SMALL_SECTION)));
+  if (constant_pool->is_extended_layout()) {
+    DCHECK(extended_entries()->equals(ConstantPoolArray::NumberOfEntries(
+        constant_pool, ConstantPoolArray::EXTENDED_SECTION)));
+  }
+
+  // Set up initial offsets.
+  int offsets[ConstantPoolArray::NUMBER_OF_LAYOUT_SECTIONS]
+             [ConstantPoolArray::NUMBER_OF_TYPES];
+  for (int section = 0; section <= constant_pool->final_section(); section++) {
+    int section_start = (section == ConstantPoolArray::EXTENDED_SECTION)
+                            ? small_entries()->total_count()
+                            : 0;
+    for (int i = 0; i < ConstantPoolArray::NUMBER_OF_TYPES; i++) {
+      ConstantPoolArray::Type type = static_cast<ConstantPoolArray::Type>(i);
+      if (number_of_entries_[section].count_of(type) != 0) {
+        offsets[section][type] = constant_pool->OffsetOfElementAt(
+            number_of_entries_[section].base_of(type) + section_start);
+      }
+    }
+  }
+
+  for (std::vector<ConstantPoolEntry>::iterator entry = entries_.begin();
+       entry != entries_.end(); entry++) {
+    RelocInfo rinfo = entry->rinfo_;
+    RelocInfo::Mode rmode = entry->rinfo_.rmode();
+    ConstantPoolArray::Type type = GetConstantPoolType(rmode);
+
+    // Update constant pool if necessary and get the entry's offset.
+    int offset;
+    if (entry->merged_index_ == -1) {
+      offset = offsets[entry->section_][type];
+      offsets[entry->section_][type] += ConstantPoolArray::entry_size(type);
+      if (type == ConstantPoolArray::INT64) {
+#if V8_TARGET_ARCH_PPC64
+        constant_pool->set_at_offset(offset, rinfo.data());
+#else
+        constant_pool->set_at_offset(offset, rinfo.data64());
+      } else if (type == ConstantPoolArray::INT32) {
+        constant_pool->set_at_offset(offset,
+                                     static_cast<int32_t>(rinfo.data()));
+#endif
+      } else if (type == ConstantPoolArray::CODE_PTR) {
+        constant_pool->set_at_offset(offset,
+                                     reinterpret_cast<Address>(rinfo.data()));
+      } else {
+        DCHECK(type == ConstantPoolArray::HEAP_PTR);
+        constant_pool->set_at_offset(offset,
+                                     reinterpret_cast<Object*>(rinfo.data()));
+      }
+      offset -= kHeapObjectTag;
+      entry->merged_index_ = offset;  // Stash offset for merged entries.
+    } else {
+      DCHECK(entry->merged_index_ < (entry - entries_.begin()));
+      offset = entries_[entry->merged_index_].merged_index_;
+    }
+
+    // Patch load instruction with correct offset.
+    Assembler::SetConstantPoolOffset(rinfo.pc(), offset);
+  }
+}
+#endif
+}
+}  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_PPC
diff --git a/deps/v8/src/ppc/assembler-ppc.h b/deps/v8/src/ppc/assembler-ppc.h
new file mode 100644 (file)
index 0000000..2b112d6
--- /dev/null
@@ -0,0 +1,1493 @@
+// Copyright (c) 1994-2006 Sun Microsystems Inc.
+// All Rights Reserved.
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions
+// are met:
+//
+// - Redistributions of source code must retain the above copyright notice,
+// this list of conditions and the following disclaimer.
+//
+// - Redistribution in binary form must reproduce the above copyright
+// notice, this list of conditions and the following disclaimer in the
+// documentation and/or other materials provided with the
+// distribution.
+//
+// - Neither the name of Sun Microsystems or the names of contributors may
+// be used to endorse or promote products derived from this software without
+// specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+// INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+// SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+// HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
+// STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
+// OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// The original source code covered by the above license above has been
+// modified significantly by Google Inc.
+// Copyright 2014 the V8 project authors. All rights reserved.
+
+// A light-weight PPC Assembler
+// Generates user mode instructions for the PPC architecture up
+
+#ifndef V8_PPC_ASSEMBLER_PPC_H_
+#define V8_PPC_ASSEMBLER_PPC_H_
+
+#include <stdio.h>
+#include <vector>
+
+#include "src/assembler.h"
+#include "src/ppc/constants-ppc.h"
+#include "src/serialize.h"
+
+#define ABI_USES_FUNCTION_DESCRIPTORS \
+  (V8_HOST_ARCH_PPC && (V8_OS_AIX ||    \
+                      (V8_TARGET_ARCH_PPC64 && V8_TARGET_BIG_ENDIAN)))
+
+#define ABI_PASSES_HANDLES_IN_REGS \
+  (!V8_HOST_ARCH_PPC || V8_OS_AIX || V8_TARGET_ARCH_PPC64)
+
+#define ABI_RETURNS_HANDLES_IN_REGS \
+  (!V8_HOST_ARCH_PPC || V8_TARGET_LITTLE_ENDIAN)
+
+#define ABI_RETURNS_OBJECT_PAIRS_IN_REGS \
+  (!V8_HOST_ARCH_PPC || V8_TARGET_LITTLE_ENDIAN)
+
+#define ABI_TOC_ADDRESSABILITY_VIA_IP \
+  (V8_HOST_ARCH_PPC && V8_TARGET_ARCH_PPC64 && V8_TARGET_LITTLE_ENDIAN)
+
+#if !V8_HOST_ARCH_PPC || V8_OS_AIX || V8_TARGET_ARCH_PPC64
+#define ABI_TOC_REGISTER kRegister_r2_Code
+#else
+#define ABI_TOC_REGISTER kRegister_r13_Code
+#endif
+
+#define INSTR_AND_DATA_CACHE_COHERENCY LWSYNC
+
+namespace v8 {
+namespace internal {
+
+// CPU Registers.
+//
+// 1) We would prefer to use an enum, but enum values are assignment-
+// compatible with int, which has caused code-generation bugs.
+//
+// 2) We would prefer to use a class instead of a struct but we don't like
+// the register initialization to depend on the particular initialization
+// order (which appears to be different on OS X, Linux, and Windows for the
+// installed versions of C++ we tried). Using a struct permits C-style
+// "initialization". Also, the Register objects cannot be const as this
+// forces initialization stubs in MSVC, making us dependent on initialization
+// order.
+//
+// 3) By not using an enum, we are possibly preventing the compiler from
+// doing certain constant folds, which may significantly reduce the
+// code generated for some assembly instructions (because they boil down
+// to a few constants). If this is a problem, we could change the code
+// such that we use an enum in optimized mode, and the struct in debug
+// mode. This way we get the compile-time error checking in debug mode
+// and best performance in optimized code.
+
+// Core register
+struct Register {
+  static const int kNumRegisters = 32;
+  static const int kSizeInBytes = kPointerSize;
+
+#if V8_TARGET_LITTLE_ENDIAN
+  static const int kMantissaOffset = 0;
+  static const int kExponentOffset = 4;
+#else
+  static const int kMantissaOffset = 4;
+  static const int kExponentOffset = 0;
+#endif
+
+  static const int kAllocatableLowRangeBegin = 3;
+  static const int kAllocatableLowRangeEnd = 10;
+  static const int kAllocatableHighRangeBegin = 14;
+#if V8_OOL_CONSTANT_POOL
+  static const int kAllocatableHighRangeEnd = 27;
+#else
+  static const int kAllocatableHighRangeEnd = 28;
+#endif
+  static const int kAllocatableContext = 30;
+
+  static const int kNumAllocatableLow =
+      kAllocatableLowRangeEnd - kAllocatableLowRangeBegin + 1;
+  static const int kNumAllocatableHigh =
+      kAllocatableHighRangeEnd - kAllocatableHighRangeBegin + 1;
+  static const int kMaxNumAllocatableRegisters =
+      kNumAllocatableLow + kNumAllocatableHigh + 1;  // cp
+
+  static int NumAllocatableRegisters() { return kMaxNumAllocatableRegisters; }
+
+  static int ToAllocationIndex(Register reg) {
+    int index;
+    int code = reg.code();
+    if (code == kAllocatableContext) {
+      // Context is the last index
+      index = NumAllocatableRegisters() - 1;
+    } else if (code <= kAllocatableLowRangeEnd) {
+      // low range
+      index = code - kAllocatableLowRangeBegin;
+    } else {
+      // high range
+      index = code - kAllocatableHighRangeBegin + kNumAllocatableLow;
+    }
+    DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
+    return index;
+  }
+
+  static Register FromAllocationIndex(int index) {
+    DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
+    // Last index is always the 'cp' register.
+    if (index == kMaxNumAllocatableRegisters - 1) {
+      return from_code(kAllocatableContext);
+    }
+    return (index < kNumAllocatableLow)
+               ? from_code(index + kAllocatableLowRangeBegin)
+               : from_code(index - kNumAllocatableLow +
+                           kAllocatableHighRangeBegin);
+  }
+
+  static const char* AllocationIndexToString(int index) {
+    DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
+    const char* const names[] = {
+      "r3",
+      "r4",
+      "r5",
+      "r6",
+      "r7",
+      "r8",
+      "r9",
+      "r10",
+      "r14",
+      "r15",
+      "r16",
+      "r17",
+      "r18",
+      "r19",
+      "r20",
+      "r21",
+      "r22",
+      "r23",
+      "r24",
+      "r25",
+      "r26",
+      "r27",
+#if !V8_OOL_CONSTANT_POOL
+      "r28",
+#endif
+      "cp",
+    };
+    return names[index];
+  }
+
+  static Register from_code(int code) {
+    Register r = {code};
+    return r;
+  }
+
+  bool is_valid() const { return 0 <= code_ && code_ < kNumRegisters; }
+  bool is(Register reg) const { return code_ == reg.code_; }
+  int code() const {
+    DCHECK(is_valid());
+    return code_;
+  }
+  int bit() const {
+    DCHECK(is_valid());
+    return 1 << code_;
+  }
+
+  void set_code(int code) {
+    code_ = code;
+    DCHECK(is_valid());
+  }
+
+  // Unfortunately we can't make this private in a struct.
+  int code_;
+};
+
+// These constants are used in several locations, including static initializers
+const int kRegister_no_reg_Code = -1;
+const int kRegister_r0_Code = 0;  // general scratch
+const int kRegister_sp_Code = 1;  // stack pointer
+const int kRegister_r2_Code = 2;  // special on PowerPC
+const int kRegister_r3_Code = 3;
+const int kRegister_r4_Code = 4;
+const int kRegister_r5_Code = 5;
+const int kRegister_r6_Code = 6;
+const int kRegister_r7_Code = 7;
+const int kRegister_r8_Code = 8;
+const int kRegister_r9_Code = 9;
+const int kRegister_r10_Code = 10;
+const int kRegister_r11_Code = 11;  // lithium scratch
+const int kRegister_ip_Code = 12;   // ip (general scratch)
+const int kRegister_r13_Code = 13;  // special on PowerPC
+const int kRegister_r14_Code = 14;
+const int kRegister_r15_Code = 15;
+
+const int kRegister_r16_Code = 16;
+const int kRegister_r17_Code = 17;
+const int kRegister_r18_Code = 18;
+const int kRegister_r19_Code = 19;
+const int kRegister_r20_Code = 20;
+const int kRegister_r21_Code = 21;
+const int kRegister_r22_Code = 22;
+const int kRegister_r23_Code = 23;
+const int kRegister_r24_Code = 24;
+const int kRegister_r25_Code = 25;
+const int kRegister_r26_Code = 26;
+const int kRegister_r27_Code = 27;
+const int kRegister_r28_Code = 28;  // constant pool pointer
+const int kRegister_r29_Code = 29;  // roots array pointer
+const int kRegister_r30_Code = 30;  // context pointer
+const int kRegister_fp_Code = 31;   // frame pointer
+
+const Register no_reg = {kRegister_no_reg_Code};
+
+const Register r0 = {kRegister_r0_Code};
+const Register sp = {kRegister_sp_Code};
+const Register r2 = {kRegister_r2_Code};
+const Register r3 = {kRegister_r3_Code};
+const Register r4 = {kRegister_r4_Code};
+const Register r5 = {kRegister_r5_Code};
+const Register r6 = {kRegister_r6_Code};
+const Register r7 = {kRegister_r7_Code};
+const Register r8 = {kRegister_r8_Code};
+const Register r9 = {kRegister_r9_Code};
+const Register r10 = {kRegister_r10_Code};
+const Register r11 = {kRegister_r11_Code};
+const Register ip = {kRegister_ip_Code};
+const Register r13 = {kRegister_r13_Code};
+const Register r14 = {kRegister_r14_Code};
+const Register r15 = {kRegister_r15_Code};
+
+const Register r16 = {kRegister_r16_Code};
+const Register r17 = {kRegister_r17_Code};
+const Register r18 = {kRegister_r18_Code};
+const Register r19 = {kRegister_r19_Code};
+const Register r20 = {kRegister_r20_Code};
+const Register r21 = {kRegister_r21_Code};
+const Register r22 = {kRegister_r22_Code};
+const Register r23 = {kRegister_r23_Code};
+const Register r24 = {kRegister_r24_Code};
+const Register r25 = {kRegister_r25_Code};
+const Register r26 = {kRegister_r26_Code};
+const Register r27 = {kRegister_r27_Code};
+const Register r28 = {kRegister_r28_Code};
+const Register r29 = {kRegister_r29_Code};
+const Register r30 = {kRegister_r30_Code};
+const Register fp = {kRegister_fp_Code};
+
+// Give alias names to registers
+const Register cp = {kRegister_r30_Code};  // JavaScript context pointer
+const Register kRootRegister = {kRegister_r29_Code};  // Roots array pointer.
+#if V8_OOL_CONSTANT_POOL
+const Register kConstantPoolRegister = {kRegister_r28_Code};  // Constant pool
+#endif
+
+// Double word FP register.
+struct DoubleRegister {
+  static const int kNumRegisters = 32;
+  static const int kMaxNumRegisters = kNumRegisters;
+  static const int kNumVolatileRegisters = 14;  // d0-d13
+  static const int kSizeInBytes = 8;
+
+  static const int kAllocatableLowRangeBegin = 1;
+  static const int kAllocatableLowRangeEnd = 12;
+  static const int kAllocatableHighRangeBegin = 15;
+  static const int kAllocatableHighRangeEnd = 31;
+
+  static const int kNumAllocatableLow =
+      kAllocatableLowRangeEnd - kAllocatableLowRangeBegin + 1;
+  static const int kNumAllocatableHigh =
+      kAllocatableHighRangeEnd - kAllocatableHighRangeBegin + 1;
+  static const int kMaxNumAllocatableRegisters =
+      kNumAllocatableLow + kNumAllocatableHigh;
+  static int NumAllocatableRegisters() { return kMaxNumAllocatableRegisters; }
+
+  // TODO(turbofan)
+  inline static int NumAllocatableAliasedRegisters() {
+    return NumAllocatableRegisters();
+  }
+
+  static int ToAllocationIndex(DoubleRegister reg) {
+    int code = reg.code();
+    int index = (code <= kAllocatableLowRangeEnd)
+                    ? code - kAllocatableLowRangeBegin
+                    : code - kAllocatableHighRangeBegin + kNumAllocatableLow;
+    DCHECK(index < kMaxNumAllocatableRegisters);
+    return index;
+  }
+
+  static DoubleRegister FromAllocationIndex(int index) {
+    DCHECK(index >= 0 && index < kMaxNumAllocatableRegisters);
+    return (index < kNumAllocatableLow)
+               ? from_code(index + kAllocatableLowRangeBegin)
+               : from_code(index - kNumAllocatableLow +
+                           kAllocatableHighRangeBegin);
+  }
+
+  static const char* AllocationIndexToString(int index);
+
+  static DoubleRegister from_code(int code) {
+    DoubleRegister r = {code};
+    return r;
+  }
+
+  bool is_valid() const { return 0 <= code_ && code_ < kMaxNumRegisters; }
+  bool is(DoubleRegister reg) const { return code_ == reg.code_; }
+
+  int code() const {
+    DCHECK(is_valid());
+    return code_;
+  }
+  int bit() const {
+    DCHECK(is_valid());
+    return 1 << code_;
+  }
+  void split_code(int* vm, int* m) const {
+    DCHECK(is_valid());
+    *m = (code_ & 0x10) >> 4;
+    *vm = code_ & 0x0F;
+  }
+
+  int code_;
+};
+
+
+const DoubleRegister no_dreg = {-1};
+const DoubleRegister d0 = {0};
+const DoubleRegister d1 = {1};
+const DoubleRegister d2 = {2};
+const DoubleRegister d3 = {3};
+const DoubleRegister d4 = {4};
+const DoubleRegister d5 = {5};
+const DoubleRegister d6 = {6};
+const DoubleRegister d7 = {7};
+const DoubleRegister d8 = {8};
+const DoubleRegister d9 = {9};
+const DoubleRegister d10 = {10};
+const DoubleRegister d11 = {11};
+const DoubleRegister d12 = {12};
+const DoubleRegister d13 = {13};
+const DoubleRegister d14 = {14};
+const DoubleRegister d15 = {15};
+const DoubleRegister d16 = {16};
+const DoubleRegister d17 = {17};
+const DoubleRegister d18 = {18};
+const DoubleRegister d19 = {19};
+const DoubleRegister d20 = {20};
+const DoubleRegister d21 = {21};
+const DoubleRegister d22 = {22};
+const DoubleRegister d23 = {23};
+const DoubleRegister d24 = {24};
+const DoubleRegister d25 = {25};
+const DoubleRegister d26 = {26};
+const DoubleRegister d27 = {27};
+const DoubleRegister d28 = {28};
+const DoubleRegister d29 = {29};
+const DoubleRegister d30 = {30};
+const DoubleRegister d31 = {31};
+
+// Aliases for double registers.  Defined using #define instead of
+// "static const DoubleRegister&" because Clang complains otherwise when a
+// compilation unit that includes this header doesn't use the variables.
+#define kFirstCalleeSavedDoubleReg d14
+#define kLastCalleeSavedDoubleReg d31
+#define kDoubleRegZero d14
+#define kScratchDoubleReg d13
+
+Register ToRegister(int num);
+
+// Coprocessor register
+struct CRegister {
+  bool is_valid() const { return 0 <= code_ && code_ < 16; }
+  bool is(CRegister creg) const { return code_ == creg.code_; }
+  int code() const {
+    DCHECK(is_valid());
+    return code_;
+  }
+  int bit() const {
+    DCHECK(is_valid());
+    return 1 << code_;
+  }
+
+  // Unfortunately we can't make this private in a struct.
+  int code_;
+};
+
+
+const CRegister no_creg = {-1};
+
+const CRegister cr0 = {0};
+const CRegister cr1 = {1};
+const CRegister cr2 = {2};
+const CRegister cr3 = {3};
+const CRegister cr4 = {4};
+const CRegister cr5 = {5};
+const CRegister cr6 = {6};
+const CRegister cr7 = {7};
+const CRegister cr8 = {8};
+const CRegister cr9 = {9};
+const CRegister cr10 = {10};
+const CRegister cr11 = {11};
+const CRegister cr12 = {12};
+const CRegister cr13 = {13};
+const CRegister cr14 = {14};
+const CRegister cr15 = {15};
+
+// -----------------------------------------------------------------------------
+// Machine instruction Operands
+
+#if V8_TARGET_ARCH_PPC64
+const RelocInfo::Mode kRelocInfo_NONEPTR = RelocInfo::NONE64;
+#else
+const RelocInfo::Mode kRelocInfo_NONEPTR = RelocInfo::NONE32;
+#endif
+
+// Class Operand represents a shifter operand in data processing instructions
+class Operand BASE_EMBEDDED {
+ public:
+  // immediate
+  INLINE(explicit Operand(intptr_t immediate,
+                          RelocInfo::Mode rmode = kRelocInfo_NONEPTR));
+  INLINE(static Operand Zero()) { return Operand(static_cast<intptr_t>(0)); }
+  INLINE(explicit Operand(const ExternalReference& f));
+  explicit Operand(Handle<Object> handle);
+  INLINE(explicit Operand(Smi* value));
+
+  // rm
+  INLINE(explicit Operand(Register rm));
+
+  // Return true if this is a register operand.
+  INLINE(bool is_reg() const);
+
+  // For mov.  Return the number of actual instructions required to
+  // load the operand into a register.  This can be anywhere from
+  // one (constant pool small section) to five instructions (full
+  // 64-bit sequence).
+  //
+  // The value returned is only valid as long as no entries are added to the
+  // constant pool between this call and the actual instruction being emitted.
+  bool must_output_reloc_info(const Assembler* assembler) const;
+
+  inline intptr_t immediate() const {
+    DCHECK(!rm_.is_valid());
+    return imm_;
+  }
+
+  Register rm() const { return rm_; }
+
+ private:
+  Register rm_;
+  intptr_t imm_;  // valid if rm_ == no_reg
+  RelocInfo::Mode rmode_;
+
+  friend class Assembler;
+  friend class MacroAssembler;
+};
+
+
+// Class MemOperand represents a memory operand in load and store instructions
+// On PowerPC we have base register + 16bit signed value
+// Alternatively we can have a 16bit signed value immediate
+class MemOperand BASE_EMBEDDED {
+ public:
+  explicit MemOperand(Register rn, int32_t offset = 0);
+
+  explicit MemOperand(Register ra, Register rb);
+
+  int32_t offset() const {
+    DCHECK(rb_.is(no_reg));
+    return offset_;
+  }
+
+  // PowerPC - base register
+  Register ra() const {
+    DCHECK(!ra_.is(no_reg));
+    return ra_;
+  }
+
+  Register rb() const {
+    DCHECK(offset_ == 0 && !rb_.is(no_reg));
+    return rb_;
+  }
+
+ private:
+  Register ra_;     // base
+  int32_t offset_;  // offset
+  Register rb_;     // index
+
+  friend class Assembler;
+};
+
+
+#if V8_OOL_CONSTANT_POOL
+// Class used to build a constant pool.
+class ConstantPoolBuilder BASE_EMBEDDED {
+ public:
+  ConstantPoolBuilder();
+  ConstantPoolArray::LayoutSection AddEntry(Assembler* assm,
+                                            const RelocInfo& rinfo);
+  void Relocate(intptr_t pc_delta);
+  bool IsEmpty();
+  Handle<ConstantPoolArray> New(Isolate* isolate);
+  void Populate(Assembler* assm, ConstantPoolArray* constant_pool);
+
+  inline ConstantPoolArray::LayoutSection current_section() const {
+    return current_section_;
+  }
+
+  // Rather than increasing the capacity of the ConstantPoolArray's
+  // small section to match the longer (16-bit) reach of PPC's load
+  // instruction (at the expense of a larger header to describe the
+  // layout), the PPC implementation utilizes the extended section to
+  // satisfy that reach.  I.e. all entries (regardless of their
+  // section) are reachable with a single load instruction.
+  //
+  // This implementation does not support an unlimited constant pool
+  // size (which would require a multi-instruction sequence).  [See
+  // ARM commit e27ab337 for a reference on the changes required to
+  // support the longer instruction sequence.]  Note, however, that
+  // going down that path will necessarily generate that longer
+  // sequence for all extended section accesses since the placement of
+  // a given entry within the section is not known at the time of
+  // code generation.
+  //
+  // TODO(mbrandy): Determine whether there is a benefit to supporting
+  // the longer sequence given that nops could be used for those
+  // entries which are reachable with a single instruction.
+  inline bool is_full() const { return !is_int16(size_); }
+
+  inline ConstantPoolArray::NumberOfEntries* number_of_entries(
+      ConstantPoolArray::LayoutSection section) {
+    return &number_of_entries_[section];
+  }
+
+  inline ConstantPoolArray::NumberOfEntries* small_entries() {
+    return number_of_entries(ConstantPoolArray::SMALL_SECTION);
+  }
+
+  inline ConstantPoolArray::NumberOfEntries* extended_entries() {
+    return number_of_entries(ConstantPoolArray::EXTENDED_SECTION);
+  }
+
+ private:
+  struct ConstantPoolEntry {
+    ConstantPoolEntry(RelocInfo rinfo, ConstantPoolArray::LayoutSection section,
+                      int merged_index)
+        : rinfo_(rinfo), section_(section), merged_index_(merged_index) {}
+
+    RelocInfo rinfo_;
+    ConstantPoolArray::LayoutSection section_;
+    int merged_index_;
+  };
+
+  ConstantPoolArray::Type GetConstantPoolType(RelocInfo::Mode rmode);
+
+  uint32_t size_;
+  std::vector<ConstantPoolEntry> entries_;
+  ConstantPoolArray::LayoutSection current_section_;
+  ConstantPoolArray::NumberOfEntries number_of_entries_[2];
+};
+#endif
+
+
+class Assembler : public AssemblerBase {
+ public:
+  // Create an assembler. Instructions and relocation information are emitted
+  // into a buffer, with the instructions starting from the beginning and the
+  // relocation information starting from the end of the buffer. See CodeDesc
+  // for a detailed comment on the layout (globals.h).
+  //
+  // If the provided buffer is NULL, the assembler allocates and grows its own
+  // buffer, and buffer_size determines the initial buffer size. The buffer is
+  // owned by the assembler and deallocated upon destruction of the assembler.
+  //
+  // If the provided buffer is not NULL, the assembler uses the provided buffer
+  // for code generation and assumes its size to be buffer_size. If the buffer
+  // is too small, a fatal error occurs. No deallocation of the buffer is done
+  // upon destruction of the assembler.
+  Assembler(Isolate* isolate, void* buffer, int buffer_size);
+  virtual ~Assembler() {}
+
+  // GetCode emits any pending (non-emitted) code and fills the descriptor
+  // desc. GetCode() is idempotent; it returns the same result if no other
+  // Assembler functions are invoked in between GetCode() calls.
+  void GetCode(CodeDesc* desc);
+
+  // Label operations & relative jumps (PPUM Appendix D)
+  //
+  // Takes a branch opcode (cc) and a label (L) and generates
+  // either a backward branch or a forward branch and links it
+  // to the label fixup chain. Usage:
+  //
+  // Label L;    // unbound label
+  // j(cc, &L);  // forward branch to unbound label
+  // bind(&L);   // bind label to the current pc
+  // j(cc, &L);  // backward branch to bound label
+  // bind(&L);   // illegal: a label may be bound only once
+  //
+  // Note: The same Label can be used for forward and backward branches
+  // but it may be bound only once.
+
+  void bind(Label* L);  // binds an unbound label L to the current code position
+  // Determines if Label is bound and near enough so that a single
+  // branch instruction can be used to reach it.
+  bool is_near(Label* L, Condition cond);
+
+  // Returns the branch offset to the given label from the current code position
+  // Links the label to the current position if it is still unbound
+  // Manages the jump elimination optimization if the second parameter is true.
+  int branch_offset(Label* L, bool jump_elimination_allowed);
+
+  // Puts a labels target address at the given position.
+  // The high 8 bits are set to zero.
+  void label_at_put(Label* L, int at_offset);
+
+#if V8_OOL_CONSTANT_POOL
+  INLINE(static bool IsConstantPoolLoadStart(Address pc));
+  INLINE(static bool IsConstantPoolLoadEnd(Address pc));
+  INLINE(static int GetConstantPoolOffset(Address pc));
+  INLINE(static void SetConstantPoolOffset(Address pc, int offset));
+
+  // Return the address in the constant pool of the code target address used by
+  // the branch/call instruction at pc, or the object in a mov.
+  INLINE(static Address target_constant_pool_address_at(
+      Address pc, ConstantPoolArray* constant_pool));
+#endif
+
+  // Read/Modify the code target address in the branch/call instruction at pc.
+  INLINE(static Address target_address_at(Address pc,
+                                          ConstantPoolArray* constant_pool));
+  INLINE(static void set_target_address_at(
+      Address pc, ConstantPoolArray* constant_pool, Address target,
+      ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED));
+  INLINE(static Address target_address_at(Address pc, Code* code)) {
+    ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+    return target_address_at(pc, constant_pool);
+  }
+  INLINE(static void set_target_address_at(
+      Address pc, Code* code, Address target,
+      ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED)) {
+    ConstantPoolArray* constant_pool = code ? code->constant_pool() : NULL;
+    set_target_address_at(pc, constant_pool, target, icache_flush_mode);
+  }
+
+  // Return the code target address at a call site from the return address
+  // of that call in the instruction stream.
+  inline static Address target_address_from_return_address(Address pc);
+
+  // Given the address of the beginning of a call, return the address
+  // in the instruction stream that the call will return to.
+  INLINE(static Address return_address_from_call_start(Address pc));
+
+  // Return the code target address of the patch debug break slot
+  INLINE(static Address break_address_from_return_address(Address pc));
+
+  // This sets the branch destination.
+  // This is for calls and branches within generated code.
+  inline static void deserialization_set_special_target_at(
+      Address instruction_payload, Code* code, Address target);
+
+  // Size of an instruction.
+  static const int kInstrSize = sizeof(Instr);
+
+  // Here we are patching the address in the LUI/ORI instruction pair.
+  // These values are used in the serialization process and must be zero for
+  // PPC platform, as Code, Embedded Object or External-reference pointers
+  // are split across two consecutive instructions and don't exist separately
+  // in the code, so the serializer should not step forwards in memory after
+  // a target is resolved and written.
+  static const int kSpecialTargetSize = 0;
+
+// Number of instructions to load an address via a mov sequence.
+#if V8_TARGET_ARCH_PPC64
+  static const int kMovInstructionsConstantPool = 2;
+  static const int kMovInstructionsNoConstantPool = 5;
+#else
+  static const int kMovInstructionsConstantPool = 1;
+  static const int kMovInstructionsNoConstantPool = 2;
+#endif
+#if V8_OOL_CONSTANT_POOL
+  static const int kMovInstructions = kMovInstructionsConstantPool;
+#else
+  static const int kMovInstructions = kMovInstructionsNoConstantPool;
+#endif
+
+  // Distance between the instruction referring to the address of the call
+  // target and the return address.
+
+  // Call sequence is a FIXED_SEQUENCE:
+  // mov     r8, @ call address
+  // mtlr    r8
+  // blrl
+  //                      @ return address
+  static const int kCallTargetAddressOffset =
+      (kMovInstructions + 2) * kInstrSize;
+
+  // Distance between start of patched return sequence and the emitted address
+  // to jump to.
+  // Patched return sequence is a FIXED_SEQUENCE:
+  //   mov r0, <address>
+  //   mtlr r0
+  //   blrl
+  static const int kPatchReturnSequenceAddressOffset = 0 * kInstrSize;
+
+  // Distance between start of patched debug break slot and the emitted address
+  // to jump to.
+  // Patched debug break slot code is a FIXED_SEQUENCE:
+  //   mov r0, <address>
+  //   mtlr r0
+  //   blrl
+  static const int kPatchDebugBreakSlotAddressOffset = 0 * kInstrSize;
+
+  // This is the length of the BreakLocationIterator::SetDebugBreakAtReturn()
+  // code patch FIXED_SEQUENCE
+  static const int kJSReturnSequenceInstructions =
+      kMovInstructionsNoConstantPool + 3;
+
+  // This is the length of the code sequence from SetDebugBreakAtSlot()
+  // FIXED_SEQUENCE
+  static const int kDebugBreakSlotInstructions =
+      kMovInstructionsNoConstantPool + 2;
+  static const int kDebugBreakSlotLength =
+      kDebugBreakSlotInstructions * kInstrSize;
+
+  static inline int encode_crbit(const CRegister& cr, enum CRBit crbit) {
+    return ((cr.code() * CRWIDTH) + crbit);
+  }
+
+  // ---------------------------------------------------------------------------
+  // Code generation
+
+  // Insert the smallest number of nop instructions
+  // possible to align the pc offset to a multiple
+  // of m. m must be a power of 2 (>= 4).
+  void Align(int m);
+  // Aligns code to something that's optimal for a jump target for the platform.
+  void CodeTargetAlign();
+
+  // Branch instructions
+  void bclr(BOfield bo, LKBit lk);
+  void blr();
+  void bc(int branch_offset, BOfield bo, int condition_bit, LKBit lk = LeaveLK);
+  void b(int branch_offset, LKBit lk);
+
+  void bcctr(BOfield bo, LKBit lk);
+  void bctr();
+  void bctrl();
+
+  // Convenience branch instructions using labels
+  void b(Label* L, LKBit lk = LeaveLK) { b(branch_offset(L, false), lk); }
+
+  void bc_short(Condition cond, Label* L, CRegister cr = cr7,
+                LKBit lk = LeaveLK) {
+    DCHECK(cond != al);
+    DCHECK(cr.code() >= 0 && cr.code() <= 7);
+
+    int b_offset = branch_offset(L, false);
+
+    switch (cond) {
+      case eq:
+        bc(b_offset, BT, encode_crbit(cr, CR_EQ), lk);
+        break;
+      case ne:
+        bc(b_offset, BF, encode_crbit(cr, CR_EQ), lk);
+        break;
+      case gt:
+        bc(b_offset, BT, encode_crbit(cr, CR_GT), lk);
+        break;
+      case le:
+        bc(b_offset, BF, encode_crbit(cr, CR_GT), lk);
+        break;
+      case lt:
+        bc(b_offset, BT, encode_crbit(cr, CR_LT), lk);
+        break;
+      case ge:
+        bc(b_offset, BF, encode_crbit(cr, CR_LT), lk);
+        break;
+      case unordered:
+        bc(b_offset, BT, encode_crbit(cr, CR_FU), lk);
+        break;
+      case ordered:
+        bc(b_offset, BF, encode_crbit(cr, CR_FU), lk);
+        break;
+      case overflow:
+        bc(b_offset, BT, encode_crbit(cr, CR_SO), lk);
+        break;
+      case nooverflow:
+        bc(b_offset, BF, encode_crbit(cr, CR_SO), lk);
+        break;
+      default:
+        UNIMPLEMENTED();
+    }
+  }
+
+  void b(Condition cond, Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) {
+    if (cond == al) {
+      b(L, lk);
+      return;
+    }
+
+    if ((L->is_bound() && is_near(L, cond)) || !is_trampoline_emitted()) {
+      bc_short(cond, L, cr, lk);
+      return;
+    }
+
+    Label skip;
+    Condition neg_cond = NegateCondition(cond);
+    bc_short(neg_cond, &skip, cr);
+    b(L, lk);
+    bind(&skip);
+  }
+
+  void bne(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) {
+    b(ne, L, cr, lk);
+  }
+  void beq(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) {
+    b(eq, L, cr, lk);
+  }
+  void blt(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) {
+    b(lt, L, cr, lk);
+  }
+  void bge(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) {
+    b(ge, L, cr, lk);
+  }
+  void ble(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) {
+    b(le, L, cr, lk);
+  }
+  void bgt(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) {
+    b(gt, L, cr, lk);
+  }
+  void bunordered(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) {
+    b(unordered, L, cr, lk);
+  }
+  void bordered(Label* L, CRegister cr = cr7, LKBit lk = LeaveLK) {
+    b(ordered, L, cr, lk);
+  }
+  void boverflow(Label* L, CRegister cr = cr0, LKBit lk = LeaveLK) {
+    b(overflow, L, cr, lk);
+  }
+  void bnooverflow(Label* L, CRegister cr = cr0, LKBit lk = LeaveLK) {
+    b(nooverflow, L, cr, lk);
+  }
+
+  // Decrement CTR; branch if CTR != 0
+  void bdnz(Label* L, LKBit lk = LeaveLK) {
+    bc(branch_offset(L, false), DCBNZ, 0, lk);
+  }
+
+  // Data-processing instructions
+
+  void sub(Register dst, Register src1, Register src2, OEBit s = LeaveOE,
+           RCBit r = LeaveRC);
+
+  void subfic(Register dst, Register src, const Operand& imm);
+
+  void subfc(Register dst, Register src1, Register src2, OEBit s = LeaveOE,
+             RCBit r = LeaveRC);
+
+  void add(Register dst, Register src1, Register src2, OEBit s = LeaveOE,
+           RCBit r = LeaveRC);
+
+  void addc(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
+            RCBit r = LeaveRC);
+
+  void addze(Register dst, Register src1, OEBit o, RCBit r);
+
+  void mullw(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
+             RCBit r = LeaveRC);
+
+  void mulhw(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
+             RCBit r = LeaveRC);
+
+  void divw(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
+            RCBit r = LeaveRC);
+
+  void addi(Register dst, Register src, const Operand& imm);
+  void addis(Register dst, Register src, const Operand& imm);
+  void addic(Register dst, Register src, const Operand& imm);
+
+  void and_(Register dst, Register src1, Register src2, RCBit rc = LeaveRC);
+  void andc(Register dst, Register src1, Register src2, RCBit rc = LeaveRC);
+  void andi(Register ra, Register rs, const Operand& imm);
+  void andis(Register ra, Register rs, const Operand& imm);
+  void nor(Register dst, Register src1, Register src2, RCBit r = LeaveRC);
+  void notx(Register dst, Register src, RCBit r = LeaveRC);
+  void ori(Register dst, Register src, const Operand& imm);
+  void oris(Register dst, Register src, const Operand& imm);
+  void orx(Register dst, Register src1, Register src2, RCBit rc = LeaveRC);
+  void xori(Register dst, Register src, const Operand& imm);
+  void xoris(Register ra, Register rs, const Operand& imm);
+  void xor_(Register dst, Register src1, Register src2, RCBit rc = LeaveRC);
+  void cmpi(Register src1, const Operand& src2, CRegister cr = cr7);
+  void cmpli(Register src1, const Operand& src2, CRegister cr = cr7);
+  void cmpwi(Register src1, const Operand& src2, CRegister cr = cr7);
+  void cmplwi(Register src1, const Operand& src2, CRegister cr = cr7);
+  void li(Register dst, const Operand& src);
+  void lis(Register dst, const Operand& imm);
+  void mr(Register dst, Register src);
+
+  void lbz(Register dst, const MemOperand& src);
+  void lbzx(Register dst, const MemOperand& src);
+  void lbzux(Register dst, const MemOperand& src);
+  void lhz(Register dst, const MemOperand& src);
+  void lhzx(Register dst, const MemOperand& src);
+  void lhzux(Register dst, const MemOperand& src);
+  void lwz(Register dst, const MemOperand& src);
+  void lwzu(Register dst, const MemOperand& src);
+  void lwzx(Register dst, const MemOperand& src);
+  void lwzux(Register dst, const MemOperand& src);
+  void lwa(Register dst, const MemOperand& src);
+  void stb(Register dst, const MemOperand& src);
+  void stbx(Register dst, const MemOperand& src);
+  void stbux(Register dst, const MemOperand& src);
+  void sth(Register dst, const MemOperand& src);
+  void sthx(Register dst, const MemOperand& src);
+  void sthux(Register dst, const MemOperand& src);
+  void stw(Register dst, const MemOperand& src);
+  void stwu(Register dst, const MemOperand& src);
+  void stwx(Register rs, const MemOperand& src);
+  void stwux(Register rs, const MemOperand& src);
+
+  void extsb(Register rs, Register ra, RCBit r = LeaveRC);
+  void extsh(Register rs, Register ra, RCBit r = LeaveRC);
+
+  void neg(Register rt, Register ra, OEBit o = LeaveOE, RCBit c = LeaveRC);
+
+#if V8_TARGET_ARCH_PPC64
+  void ld(Register rd, const MemOperand& src);
+  void ldx(Register rd, const MemOperand& src);
+  void ldu(Register rd, const MemOperand& src);
+  void ldux(Register rd, const MemOperand& src);
+  void std(Register rs, const MemOperand& src);
+  void stdx(Register rs, const MemOperand& src);
+  void stdu(Register rs, const MemOperand& src);
+  void stdux(Register rs, const MemOperand& src);
+  void rldic(Register dst, Register src, int sh, int mb, RCBit r = LeaveRC);
+  void rldicl(Register dst, Register src, int sh, int mb, RCBit r = LeaveRC);
+  void rldcl(Register ra, Register rs, Register rb, int mb, RCBit r = LeaveRC);
+  void rldicr(Register dst, Register src, int sh, int me, RCBit r = LeaveRC);
+  void rldimi(Register dst, Register src, int sh, int mb, RCBit r = LeaveRC);
+  void sldi(Register dst, Register src, const Operand& val, RCBit rc = LeaveRC);
+  void srdi(Register dst, Register src, const Operand& val, RCBit rc = LeaveRC);
+  void clrrdi(Register dst, Register src, const Operand& val,
+              RCBit rc = LeaveRC);
+  void clrldi(Register dst, Register src, const Operand& val,
+              RCBit rc = LeaveRC);
+  void sradi(Register ra, Register rs, int sh, RCBit r = LeaveRC);
+  void srd(Register dst, Register src1, Register src2, RCBit r = LeaveRC);
+  void sld(Register dst, Register src1, Register src2, RCBit r = LeaveRC);
+  void srad(Register dst, Register src1, Register src2, RCBit r = LeaveRC);
+  void rotld(Register ra, Register rs, Register rb, RCBit r = LeaveRC);
+  void rotldi(Register ra, Register rs, int sh, RCBit r = LeaveRC);
+  void rotrdi(Register ra, Register rs, int sh, RCBit r = LeaveRC);
+  void cntlzd_(Register dst, Register src, RCBit rc = LeaveRC);
+  void extsw(Register rs, Register ra, RCBit r = LeaveRC);
+  void mulld(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
+             RCBit r = LeaveRC);
+  void divd(Register dst, Register src1, Register src2, OEBit o = LeaveOE,
+            RCBit r = LeaveRC);
+#endif
+
+  void rlwinm(Register ra, Register rs, int sh, int mb, int me,
+              RCBit rc = LeaveRC);
+  void rlwimi(Register ra, Register rs, int sh, int mb, int me,
+              RCBit rc = LeaveRC);
+  void rlwnm(Register ra, Register rs, Register rb, int mb, int me,
+             RCBit rc = LeaveRC);
+  void slwi(Register dst, Register src, const Operand& val, RCBit rc = LeaveRC);
+  void srwi(Register dst, Register src, const Operand& val, RCBit rc = LeaveRC);
+  void clrrwi(Register dst, Register src, const Operand& val,
+              RCBit rc = LeaveRC);
+  void clrlwi(Register dst, Register src, const Operand& val,
+              RCBit rc = LeaveRC);
+  void srawi(Register ra, Register rs, int sh, RCBit r = LeaveRC);
+  void srw(Register dst, Register src1, Register src2, RCBit r = LeaveRC);
+  void slw(Register dst, Register src1, Register src2, RCBit r = LeaveRC);
+  void sraw(Register dst, Register src1, Register src2, RCBit r = LeaveRC);
+  void rotlw(Register ra, Register rs, Register rb, RCBit r = LeaveRC);
+  void rotlwi(Register ra, Register rs, int sh, RCBit r = LeaveRC);
+  void rotrwi(Register ra, Register rs, int sh, RCBit r = LeaveRC);
+
+  void cntlzw_(Register dst, Register src, RCBit rc = LeaveRC);
+
+  void subi(Register dst, Register src1, const Operand& src2);
+
+  void cmp(Register src1, Register src2, CRegister cr = cr7);
+  void cmpl(Register src1, Register src2, CRegister cr = cr7);
+  void cmpw(Register src1, Register src2, CRegister cr = cr7);
+  void cmplw(Register src1, Register src2, CRegister cr = cr7);
+
+  void mov(Register dst, const Operand& src);
+
+  // Load the position of the label relative to the generated code object
+  // pointer in a register.
+  void mov_label_offset(Register dst, Label* label);
+
+  // Multiply instructions
+  void mul(Register dst, Register src1, Register src2, OEBit s = LeaveOE,
+           RCBit r = LeaveRC);
+
+  // Miscellaneous arithmetic instructions
+
+  // Special register access
+  void crxor(int bt, int ba, int bb);
+  void crclr(int bt) { crxor(bt, bt, bt); }
+  void creqv(int bt, int ba, int bb);
+  void crset(int bt) { creqv(bt, bt, bt); }
+  void mflr(Register dst);
+  void mtlr(Register src);
+  void mtctr(Register src);
+  void mtxer(Register src);
+  void mcrfs(int bf, int bfa);
+  void mfcr(Register dst);
+#if V8_TARGET_ARCH_PPC64
+  void mffprd(Register dst, DoubleRegister src);
+  void mffprwz(Register dst, DoubleRegister src);
+  void mtfprd(DoubleRegister dst, Register src);
+  void mtfprwz(DoubleRegister dst, Register src);
+  void mtfprwa(DoubleRegister dst, Register src);
+#endif
+
+  void fake_asm(enum FAKE_OPCODE_T fopcode);
+  void marker_asm(int mcode);
+  void function_descriptor();
+
+  // Exception-generating instructions and debugging support
+  void stop(const char* msg, Condition cond = al,
+            int32_t code = kDefaultStopCode, CRegister cr = cr7);
+
+  void bkpt(uint32_t imm16);  // v5 and above
+
+  // Informational messages when simulating
+  void info(const char* msg, Condition cond = al,
+            int32_t code = kDefaultStopCode, CRegister cr = cr7);
+
+  void dcbf(Register ra, Register rb);
+  void sync();
+  void lwsync();
+  void icbi(Register ra, Register rb);
+  void isync();
+
+  // Support for floating point
+  void lfd(const DoubleRegister frt, const MemOperand& src);
+  void lfdu(const DoubleRegister frt, const MemOperand& src);
+  void lfdx(const DoubleRegister frt, const MemOperand& src);
+  void lfdux(const DoubleRegister frt, const MemOperand& src);
+  void lfs(const DoubleRegister frt, const MemOperand& src);
+  void lfsu(const DoubleRegister frt, const MemOperand& src);
+  void lfsx(const DoubleRegister frt, const MemOperand& src);
+  void lfsux(const DoubleRegister frt, const MemOperand& src);
+  void stfd(const DoubleRegister frs, const MemOperand& src);
+  void stfdu(const DoubleRegister frs, const MemOperand& src);
+  void stfdx(const DoubleRegister frs, const MemOperand& src);
+  void stfdux(const DoubleRegister frs, const MemOperand& src);
+  void stfs(const DoubleRegister frs, const MemOperand& src);
+  void stfsu(const DoubleRegister frs, const MemOperand& src);
+  void stfsx(const DoubleRegister frs, const MemOperand& src);
+  void stfsux(const DoubleRegister frs, const MemOperand& src);
+
+  void fadd(const DoubleRegister frt, const DoubleRegister fra,
+            const DoubleRegister frb, RCBit rc = LeaveRC);
+  void fsub(const DoubleRegister frt, const DoubleRegister fra,
+            const DoubleRegister frb, RCBit rc = LeaveRC);
+  void fdiv(const DoubleRegister frt, const DoubleRegister fra,
+            const DoubleRegister frb, RCBit rc = LeaveRC);
+  void fmul(const DoubleRegister frt, const DoubleRegister fra,
+            const DoubleRegister frc, RCBit rc = LeaveRC);
+  void fcmpu(const DoubleRegister fra, const DoubleRegister frb,
+             CRegister cr = cr7);
+  void fmr(const DoubleRegister frt, const DoubleRegister frb,
+           RCBit rc = LeaveRC);
+  void fctiwz(const DoubleRegister frt, const DoubleRegister frb);
+  void fctiw(const DoubleRegister frt, const DoubleRegister frb);
+  void frim(const DoubleRegister frt, const DoubleRegister frb);
+  void frsp(const DoubleRegister frt, const DoubleRegister frb,
+            RCBit rc = LeaveRC);
+  void fcfid(const DoubleRegister frt, const DoubleRegister frb,
+             RCBit rc = LeaveRC);
+  void fctid(const DoubleRegister frt, const DoubleRegister frb,
+             RCBit rc = LeaveRC);
+  void fctidz(const DoubleRegister frt, const DoubleRegister frb,
+              RCBit rc = LeaveRC);
+  void fsel(const DoubleRegister frt, const DoubleRegister fra,
+            const DoubleRegister frc, const DoubleRegister frb,
+            RCBit rc = LeaveRC);
+  void fneg(const DoubleRegister frt, const DoubleRegister frb,
+            RCBit rc = LeaveRC);
+  void mtfsfi(int bf, int immediate, RCBit rc = LeaveRC);
+  void mffs(const DoubleRegister frt, RCBit rc = LeaveRC);
+  void mtfsf(const DoubleRegister frb, bool L = 1, int FLM = 0, bool W = 0,
+             RCBit rc = LeaveRC);
+  void fsqrt(const DoubleRegister frt, const DoubleRegister frb,
+             RCBit rc = LeaveRC);
+  void fabs(const DoubleRegister frt, const DoubleRegister frb,
+            RCBit rc = LeaveRC);
+  void fmadd(const DoubleRegister frt, const DoubleRegister fra,
+             const DoubleRegister frc, const DoubleRegister frb,
+             RCBit rc = LeaveRC);
+  void fmsub(const DoubleRegister frt, const DoubleRegister fra,
+             const DoubleRegister frc, const DoubleRegister frb,
+             RCBit rc = LeaveRC);
+
+  // Pseudo instructions
+
+  // Different nop operations are used by the code generator to detect certain
+  // states of the generated code.
+  enum NopMarkerTypes {
+    NON_MARKING_NOP = 0,
+    GROUP_ENDING_NOP,
+    DEBUG_BREAK_NOP,
+    // IC markers.
+    PROPERTY_ACCESS_INLINED,
+    PROPERTY_ACCESS_INLINED_CONTEXT,
+    PROPERTY_ACCESS_INLINED_CONTEXT_DONT_DELETE,
+    // Helper values.
+    LAST_CODE_MARKER,
+    FIRST_IC_MARKER = PROPERTY_ACCESS_INLINED
+  };
+
+  void nop(int type = 0);  // 0 is the default non-marking type.
+
+  void push(Register src) {
+#if V8_TARGET_ARCH_PPC64
+    stdu(src, MemOperand(sp, -kPointerSize));
+#else
+    stwu(src, MemOperand(sp, -kPointerSize));
+#endif
+  }
+
+  void pop(Register dst) {
+#if V8_TARGET_ARCH_PPC64
+    ld(dst, MemOperand(sp));
+#else
+    lwz(dst, MemOperand(sp));
+#endif
+    addi(sp, sp, Operand(kPointerSize));
+  }
+
+  void pop() { addi(sp, sp, Operand(kPointerSize)); }
+
+  // Jump unconditionally to given label.
+  void jmp(Label* L) { b(L); }
+
+  // Check the code size generated from label to here.
+  int SizeOfCodeGeneratedSince(Label* label) {
+    return pc_offset() - label->pos();
+  }
+
+  // Check the number of instructions generated from label to here.
+  int InstructionsGeneratedSince(Label* label) {
+    return SizeOfCodeGeneratedSince(label) / kInstrSize;
+  }
+
+  // Class for scoping postponing the trampoline pool generation.
+  class BlockTrampolinePoolScope {
+   public:
+    explicit BlockTrampolinePoolScope(Assembler* assem) : assem_(assem) {
+      assem_->StartBlockTrampolinePool();
+    }
+    ~BlockTrampolinePoolScope() { assem_->EndBlockTrampolinePool(); }
+
+   private:
+    Assembler* assem_;
+
+    DISALLOW_IMPLICIT_CONSTRUCTORS(BlockTrampolinePoolScope);
+  };
+
+  // Debugging
+
+  // Mark address of the ExitJSFrame code.
+  void RecordJSReturn();
+
+  // Mark address of a debug break slot.
+  void RecordDebugBreakSlot();
+
+  // Record the AST id of the CallIC being compiled, so that it can be placed
+  // in the relocation information.
+  void SetRecordedAstId(TypeFeedbackId ast_id) {
+    // Causes compiler to fail
+    // DCHECK(recorded_ast_id_.IsNone());
+    recorded_ast_id_ = ast_id;
+  }
+
+  TypeFeedbackId RecordedAstId() {
+    // Causes compiler to fail
+    // DCHECK(!recorded_ast_id_.IsNone());
+    return recorded_ast_id_;
+  }
+
+  void ClearRecordedAstId() { recorded_ast_id_ = TypeFeedbackId::None(); }
+
+  // Record a comment relocation entry that can be used by a disassembler.
+  // Use --code-comments to enable.
+  void RecordComment(const char* msg);
+
+  // Writes a single byte or word of data in the code stream.  Used
+  // for inline tables, e.g., jump-tables.
+  void db(uint8_t data);
+  void dd(uint32_t data);
+  void emit_ptr(uintptr_t data);
+
+  PositionsRecorder* positions_recorder() { return &positions_recorder_; }
+
+  // Read/patch instructions
+  Instr instr_at(int pos) { return *reinterpret_cast<Instr*>(buffer_ + pos); }
+  void instr_at_put(int pos, Instr instr) {
+    *reinterpret_cast<Instr*>(buffer_ + pos) = instr;
+  }
+  static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
+  static void instr_at_put(byte* pc, Instr instr) {
+    *reinterpret_cast<Instr*>(pc) = instr;
+  }
+  static Condition GetCondition(Instr instr);
+
+  static bool IsLis(Instr instr);
+  static bool IsLi(Instr instr);
+  static bool IsAddic(Instr instr);
+  static bool IsOri(Instr instr);
+
+  static bool IsBranch(Instr instr);
+  static Register GetRA(Instr instr);
+  static Register GetRB(Instr instr);
+#if V8_TARGET_ARCH_PPC64
+  static bool Is64BitLoadIntoR12(Instr instr1, Instr instr2, Instr instr3,
+                                 Instr instr4, Instr instr5);
+#else
+  static bool Is32BitLoadIntoR12(Instr instr1, Instr instr2);
+#endif
+
+  static bool IsCmpRegister(Instr instr);
+  static bool IsCmpImmediate(Instr instr);
+  static bool IsRlwinm(Instr instr);
+#if V8_TARGET_ARCH_PPC64
+  static bool IsRldicl(Instr instr);
+#endif
+  static bool IsCrSet(Instr instr);
+  static Register GetCmpImmediateRegister(Instr instr);
+  static int GetCmpImmediateRawImmediate(Instr instr);
+  static bool IsNop(Instr instr, int type = NON_MARKING_NOP);
+
+  // Postpone the generation of the trampoline pool for the specified number of
+  // instructions.
+  void BlockTrampolinePoolFor(int instructions);
+  void CheckTrampolinePool();
+
+  int instructions_required_for_mov(const Operand& x) const;
+
+#if V8_OOL_CONSTANT_POOL
+  // Decide between using the constant pool vs. a mov immediate sequence.
+  bool use_constant_pool_for_mov(const Operand& x, bool canOptimize) const;
+
+  // The code currently calls CheckBuffer() too often. This has the side
+  // effect of randomly growing the buffer in the middle of multi-instruction
+  // sequences.
+  // MacroAssembler::LoadConstantPoolPointerRegister() includes a relocation
+  // and multiple instructions. We cannot grow the buffer until the
+  // relocation and all of the instructions are written.
+  //
+  // This function allows outside callers to check and grow the buffer
+  void EnsureSpaceFor(int space_needed);
+#endif
+
+  // Allocate a constant pool of the correct size for the generated code.
+  Handle<ConstantPoolArray> NewConstantPool(Isolate* isolate);
+
+  // Generate the constant pool for the generated code.
+  void PopulateConstantPool(ConstantPoolArray* constant_pool);
+
+#if V8_OOL_CONSTANT_POOL
+  bool is_constant_pool_full() const {
+    return constant_pool_builder_.is_full();
+  }
+
+  bool use_extended_constant_pool() const {
+    return constant_pool_builder_.current_section() ==
+           ConstantPoolArray::EXTENDED_SECTION;
+  }
+#endif
+
+#if ABI_USES_FUNCTION_DESCRIPTORS || V8_OOL_CONSTANT_POOL
+  static void RelocateInternalReference(
+      Address pc, intptr_t delta, Address code_start,
+      ICacheFlushMode icache_flush_mode = FLUSH_ICACHE_IF_NEEDED);
+  static int DecodeInternalReference(Vector<char> buffer, Address pc);
+#endif
+
+ protected:
+  // Relocation for a type-recording IC has the AST id added to it.  This
+  // member variable is a way to pass the information from the call site to
+  // the relocation info.
+  TypeFeedbackId recorded_ast_id_;
+
+  int buffer_space() const { return reloc_info_writer.pos() - pc_; }
+
+  // Decode branch instruction at pos and return branch target pos
+  int target_at(int pos);
+
+  // Patch branch instruction at pos to branch to given branch target pos
+  void target_at_put(int pos, int target_pos);
+
+  // Record reloc info for current pc_
+  void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
+  void RecordRelocInfo(const RelocInfo& rinfo);
+#if V8_OOL_CONSTANT_POOL
+  ConstantPoolArray::LayoutSection ConstantPoolAddEntry(
+      const RelocInfo& rinfo) {
+    return constant_pool_builder_.AddEntry(this, rinfo);
+  }
+#endif
+
+  // Block the emission of the trampoline pool before pc_offset.
+  void BlockTrampolinePoolBefore(int pc_offset) {
+    if (no_trampoline_pool_before_ < pc_offset)
+      no_trampoline_pool_before_ = pc_offset;
+  }
+
+  void StartBlockTrampolinePool() { trampoline_pool_blocked_nesting_++; }
+
+  void EndBlockTrampolinePool() { trampoline_pool_blocked_nesting_--; }
+
+  bool is_trampoline_pool_blocked() const {
+    return trampoline_pool_blocked_nesting_ > 0;
+  }
+
+  bool has_exception() const { return internal_trampoline_exception_; }
+
+  bool is_trampoline_emitted() const { return trampoline_emitted_; }
+
+#if V8_OOL_CONSTANT_POOL
+  void set_constant_pool_available(bool available) {
+    constant_pool_available_ = available;
+  }
+#endif
+
+ private:
+  // Code generation
+  // The relocation writer's position is at least kGap bytes below the end of
+  // the generated instructions. This is so that multi-instruction sequences do
+  // not have to check for overflow. The same is true for writes of large
+  // relocation info entries.
+  static const int kGap = 32;
+
+  // Repeated checking whether the trampoline pool should be emitted is rather
+  // expensive. By default we only check again once a number of instructions
+  // has been generated.
+  int next_buffer_check_;  // pc offset of next buffer check.
+
+  // Emission of the trampoline pool may be blocked in some code sequences.
+  int trampoline_pool_blocked_nesting_;  // Block emission if this is not zero.
+  int no_trampoline_pool_before_;  // Block emission before this pc offset.
+
+  // Relocation info generation
+  // Each relocation is encoded as a variable size value
+  static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
+  RelocInfoWriter reloc_info_writer;
+
+  // The bound position, before this we cannot do instruction elimination.
+  int last_bound_pos_;
+
+#if V8_OOL_CONSTANT_POOL
+  ConstantPoolBuilder constant_pool_builder_;
+#endif
+
+  // Code emission
+  inline void CheckBuffer();
+  void GrowBuffer();
+  inline void emit(Instr x);
+  inline void CheckTrampolinePoolQuick();
+
+  // Instruction generation
+  void a_form(Instr instr, DoubleRegister frt, DoubleRegister fra,
+              DoubleRegister frb, RCBit r);
+  void d_form(Instr instr, Register rt, Register ra, const intptr_t val,
+              bool signed_disp);
+  void x_form(Instr instr, Register ra, Register rs, Register rb, RCBit r);
+  void xo_form(Instr instr, Register rt, Register ra, Register rb, OEBit o,
+               RCBit r);
+  void md_form(Instr instr, Register ra, Register rs, int shift, int maskbit,
+               RCBit r);
+  void mds_form(Instr instr, Register ra, Register rs, Register rb, int maskbit,
+                RCBit r);
+
+  // Labels
+  void print(Label* L);
+  int max_reach_from(int pos);
+  void bind_to(Label* L, int pos);
+  void next(Label* L);
+
+  class Trampoline {
+   public:
+    Trampoline() {
+      next_slot_ = 0;
+      free_slot_count_ = 0;
+    }
+    Trampoline(int start, int slot_count) {
+      next_slot_ = start;
+      free_slot_count_ = slot_count;
+    }
+    int take_slot() {
+      int trampoline_slot = kInvalidSlotPos;
+      if (free_slot_count_ <= 0) {
+        // We have run out of space on trampolines.
+        // Make sure we fail in debug mode, so we become aware of each case
+        // when this happens.
+        DCHECK(0);
+        // Internal exception will be caught.
+      } else {
+        trampoline_slot = next_slot_;
+        free_slot_count_--;
+        next_slot_ += kTrampolineSlotsSize;
+      }
+      return trampoline_slot;
+    }
+
+   private:
+    int next_slot_;
+    int free_slot_count_;
+  };
+
+  int32_t get_trampoline_entry();
+  int unbound_labels_count_;
+  // If trampoline is emitted, generated code is becoming large. As
+  // this is already a slow case which can possibly break our code
+  // generation for the extreme case, we use this information to
+  // trigger different mode of branch instruction generation, where we
+  // no longer use a single branch instruction.
+  bool trampoline_emitted_;
+  static const int kTrampolineSlotsSize = kInstrSize;
+  static const int kMaxCondBranchReach = (1 << (16 - 1)) - 1;
+  static const int kMaxBlockTrampolineSectionSize = 64 * kInstrSize;
+  static const int kInvalidSlotPos = -1;
+
+  Trampoline trampoline_;
+  bool internal_trampoline_exception_;
+
+  friend class RegExpMacroAssemblerPPC;
+  friend class RelocInfo;
+  friend class CodePatcher;
+  friend class BlockTrampolinePoolScope;
+  PositionsRecorder positions_recorder_;
+  friend class PositionsRecorder;
+  friend class EnsureSpace;
+};
+
+
+class EnsureSpace BASE_EMBEDDED {
+ public:
+  explicit EnsureSpace(Assembler* assembler) { assembler->CheckBuffer(); }
+};
+}
+}  // namespace v8::internal
+
+#endif  // V8_PPC_ASSEMBLER_PPC_H_
diff --git a/deps/v8/src/ppc/builtins-ppc.cc b/deps/v8/src/ppc/builtins-ppc.cc
new file mode 100644 (file)
index 0000000..7817fcd
--- /dev/null
@@ -0,0 +1,1615 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_PPC
+
+#include "src/codegen.h"
+#include "src/debug.h"
+#include "src/deoptimizer.h"
+#include "src/full-codegen.h"
+#include "src/runtime/runtime.h"
+
+namespace v8 {
+namespace internal {
+
+
+#define __ ACCESS_MASM(masm)
+
+
+void Builtins::Generate_Adaptor(MacroAssembler* masm, CFunctionId id,
+                                BuiltinExtraArguments extra_args) {
+  // ----------- S t a t e -------------
+  //  -- r3                 : number of arguments excluding receiver
+  //  -- r4                 : called function (only guaranteed when
+  //                          extra_args requires it)
+  //  -- cp                 : context
+  //  -- sp[0]              : last argument
+  //  -- ...
+  //  -- sp[4 * (argc - 1)] : first argument (argc == r0)
+  //  -- sp[4 * argc]       : receiver
+  // -----------------------------------
+
+  // Insert extra arguments.
+  int num_extra_args = 0;
+  if (extra_args == NEEDS_CALLED_FUNCTION) {
+    num_extra_args = 1;
+    __ push(r4);
+  } else {
+    DCHECK(extra_args == NO_EXTRA_ARGUMENTS);
+  }
+
+  // JumpToExternalReference expects r0 to contain the number of arguments
+  // including the receiver and the extra arguments.
+  __ addi(r3, r3, Operand(num_extra_args + 1));
+  __ JumpToExternalReference(ExternalReference(id, masm->isolate()));
+}
+
+
+// Load the built-in InternalArray function from the current context.
+static void GenerateLoadInternalArrayFunction(MacroAssembler* masm,
+                                              Register result) {
+  // Load the native context.
+
+  __ LoadP(result,
+           MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+  __ LoadP(result, FieldMemOperand(result, GlobalObject::kNativeContextOffset));
+  // Load the InternalArray function from the native context.
+  __ LoadP(result,
+           MemOperand(result, Context::SlotOffset(
+                                  Context::INTERNAL_ARRAY_FUNCTION_INDEX)));
+}
+
+
+// Load the built-in Array function from the current context.
+static void GenerateLoadArrayFunction(MacroAssembler* masm, Register result) {
+  // Load the native context.
+
+  __ LoadP(result,
+           MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+  __ LoadP(result, FieldMemOperand(result, GlobalObject::kNativeContextOffset));
+  // Load the Array function from the native context.
+  __ LoadP(
+      result,
+      MemOperand(result, Context::SlotOffset(Context::ARRAY_FUNCTION_INDEX)));
+}
+
+
+void Builtins::Generate_InternalArrayCode(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r3     : number of arguments
+  //  -- lr     : return address
+  //  -- sp[...]: constructor arguments
+  // -----------------------------------
+  Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
+
+  // Get the InternalArray function.
+  GenerateLoadInternalArrayFunction(masm, r4);
+
+  if (FLAG_debug_code) {
+    // Initial map for the builtin InternalArray functions should be maps.
+    __ LoadP(r5, FieldMemOperand(r4, JSFunction::kPrototypeOrInitialMapOffset));
+    __ TestIfSmi(r5, r0);
+    __ Assert(ne, kUnexpectedInitialMapForInternalArrayFunction, cr0);
+    __ CompareObjectType(r5, r6, r7, MAP_TYPE);
+    __ Assert(eq, kUnexpectedInitialMapForInternalArrayFunction);
+  }
+
+  // Run the native code for the InternalArray function called as a normal
+  // function.
+  // tail call a stub
+  InternalArrayConstructorStub stub(masm->isolate());
+  __ TailCallStub(&stub);
+}
+
+
+void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r3     : number of arguments
+  //  -- lr     : return address
+  //  -- sp[...]: constructor arguments
+  // -----------------------------------
+  Label generic_array_code, one_or_more_arguments, two_or_more_arguments;
+
+  // Get the Array function.
+  GenerateLoadArrayFunction(masm, r4);
+
+  if (FLAG_debug_code) {
+    // Initial map for the builtin Array functions should be maps.
+    __ LoadP(r5, FieldMemOperand(r4, JSFunction::kPrototypeOrInitialMapOffset));
+    __ TestIfSmi(r5, r0);
+    __ Assert(ne, kUnexpectedInitialMapForArrayFunction, cr0);
+    __ CompareObjectType(r5, r6, r7, MAP_TYPE);
+    __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
+  }
+
+  // Run the native code for the Array function called as a normal function.
+  // tail call a stub
+  __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+  ArrayConstructorStub stub(masm->isolate());
+  __ TailCallStub(&stub);
+}
+
+
+void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r3                     : number of arguments
+  //  -- r4                     : constructor function
+  //  -- lr                     : return address
+  //  -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
+  //  -- sp[argc * 4]           : receiver
+  // -----------------------------------
+  Counters* counters = masm->isolate()->counters();
+  __ IncrementCounter(counters->string_ctor_calls(), 1, r5, r6);
+
+  Register function = r4;
+  if (FLAG_debug_code) {
+    __ LoadGlobalFunction(Context::STRING_FUNCTION_INDEX, r5);
+    __ cmp(function, r5);
+    __ Assert(eq, kUnexpectedStringFunction);
+  }
+
+  // Load the first arguments in r3 and get rid of the rest.
+  Label no_arguments;
+  __ cmpi(r3, Operand::Zero());
+  __ beq(&no_arguments);
+  // First args = sp[(argc - 1) * 4].
+  __ subi(r3, r3, Operand(1));
+  __ ShiftLeftImm(r3, r3, Operand(kPointerSizeLog2));
+  __ add(sp, sp, r3);
+  __ LoadP(r3, MemOperand(sp));
+  // sp now point to args[0], drop args[0] + receiver.
+  __ Drop(2);
+
+  Register argument = r5;
+  Label not_cached, argument_is_string;
+  __ LookupNumberStringCache(r3,        // Input.
+                             argument,  // Result.
+                             r6,        // Scratch.
+                             r7,        // Scratch.
+                             r8,        // Scratch.
+                             &not_cached);
+  __ IncrementCounter(counters->string_ctor_cached_number(), 1, r6, r7);
+  __ bind(&argument_is_string);
+
+  // ----------- S t a t e -------------
+  //  -- r5     : argument converted to string
+  //  -- r4     : constructor function
+  //  -- lr     : return address
+  // -----------------------------------
+
+  Label gc_required;
+  __ Allocate(JSValue::kSize,
+              r3,  // Result.
+              r6,  // Scratch.
+              r7,  // Scratch.
+              &gc_required, TAG_OBJECT);
+
+  // Initialising the String Object.
+  Register map = r6;
+  __ LoadGlobalFunctionInitialMap(function, map, r7);
+  if (FLAG_debug_code) {
+    __ lbz(r7, FieldMemOperand(map, Map::kInstanceSizeOffset));
+    __ cmpi(r7, Operand(JSValue::kSize >> kPointerSizeLog2));
+    __ Assert(eq, kUnexpectedStringWrapperInstanceSize);
+    __ lbz(r7, FieldMemOperand(map, Map::kUnusedPropertyFieldsOffset));
+    __ cmpi(r7, Operand::Zero());
+    __ Assert(eq, kUnexpectedUnusedPropertiesOfStringWrapper);
+  }
+  __ StoreP(map, FieldMemOperand(r3, HeapObject::kMapOffset), r0);
+
+  __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
+  __ StoreP(r6, FieldMemOperand(r3, JSObject::kPropertiesOffset), r0);
+  __ StoreP(r6, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
+
+  __ StoreP(argument, FieldMemOperand(r3, JSValue::kValueOffset), r0);
+
+  // Ensure the object is fully initialized.
+  STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize);
+
+  __ Ret();
+
+  // The argument was not found in the number to string cache. Check
+  // if it's a string already before calling the conversion builtin.
+  Label convert_argument;
+  __ bind(&not_cached);
+  __ JumpIfSmi(r3, &convert_argument);
+
+  // Is it a String?
+  __ LoadP(r5, FieldMemOperand(r3, HeapObject::kMapOffset));
+  __ lbz(r6, FieldMemOperand(r5, Map::kInstanceTypeOffset));
+  STATIC_ASSERT(kNotStringTag != 0);
+  __ andi(r0, r6, Operand(kIsNotStringMask));
+  __ bne(&convert_argument, cr0);
+  __ mr(argument, r3);
+  __ IncrementCounter(counters->string_ctor_conversions(), 1, r6, r7);
+  __ b(&argument_is_string);
+
+  // Invoke the conversion builtin and put the result into r5.
+  __ bind(&convert_argument);
+  __ push(function);  // Preserve the function.
+  __ IncrementCounter(counters->string_ctor_conversions(), 1, r6, r7);
+  {
+    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+    __ push(r3);
+    __ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
+  }
+  __ pop(function);
+  __ mr(argument, r3);
+  __ b(&argument_is_string);
+
+  // Load the empty string into r5, remove the receiver from the
+  // stack, and jump back to the case where the argument is a string.
+  __ bind(&no_arguments);
+  __ LoadRoot(argument, Heap::kempty_stringRootIndex);
+  __ Drop(1);
+  __ b(&argument_is_string);
+
+  // At this point the argument is already a string. Call runtime to
+  // create a string wrapper.
+  __ bind(&gc_required);
+  __ IncrementCounter(counters->string_ctor_gc_required(), 1, r6, r7);
+  {
+    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+    __ push(argument);
+    __ CallRuntime(Runtime::kNewStringWrapper, 1);
+  }
+  __ Ret();
+}
+
+
+static void CallRuntimePassFunction(MacroAssembler* masm,
+                                    Runtime::FunctionId function_id) {
+  FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+  // Push a copy of the function onto the stack.
+  // Push function as parameter to the runtime call.
+  __ Push(r4, r4);
+
+  __ CallRuntime(function_id, 1);
+  // Restore reciever.
+  __ Pop(r4);
+}
+
+
+static void GenerateTailCallToSharedCode(MacroAssembler* masm) {
+  __ LoadP(ip, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+  __ LoadP(ip, FieldMemOperand(ip, SharedFunctionInfo::kCodeOffset));
+  __ addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ JumpToJSEntry(ip);
+}
+
+
+static void GenerateTailCallToReturnedCode(MacroAssembler* masm) {
+  __ addi(ip, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ JumpToJSEntry(ip);
+}
+
+
+void Builtins::Generate_InOptimizationQueue(MacroAssembler* masm) {
+  // Checking whether the queued function is ready for install is optional,
+  // since we come across interrupts and stack checks elsewhere.  However,
+  // not checking may delay installing ready functions, and always checking
+  // would be quite expensive.  A good compromise is to first check against
+  // stack limit as a cue for an interrupt signal.
+  Label ok;
+  __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+  __ cmpl(sp, ip);
+  __ bge(&ok);
+
+  CallRuntimePassFunction(masm, Runtime::kTryInstallOptimizedCode);
+  GenerateTailCallToReturnedCode(masm);
+
+  __ bind(&ok);
+  GenerateTailCallToSharedCode(masm);
+}
+
+
+static void Generate_JSConstructStubHelper(MacroAssembler* masm,
+                                           bool is_api_function,
+                                           bool create_memento) {
+  // ----------- S t a t e -------------
+  //  -- r3     : number of arguments
+  //  -- r4     : constructor function
+  //  -- r5     : allocation site or undefined
+  //  -- lr     : return address
+  //  -- sp[...]: constructor arguments
+  // -----------------------------------
+
+  // Should never create mementos for api functions.
+  DCHECK(!is_api_function || !create_memento);
+
+  Isolate* isolate = masm->isolate();
+
+  // Enter a construct frame.
+  {
+    FrameAndConstantPoolScope scope(masm, StackFrame::CONSTRUCT);
+
+    if (create_memento) {
+      __ AssertUndefinedOrAllocationSite(r5, r6);
+      __ push(r5);
+    }
+
+    // Preserve the two incoming parameters on the stack.
+    __ SmiTag(r3);
+    __ push(r3);  // Smi-tagged arguments count.
+    __ push(r4);  // Constructor function.
+
+    // Try to allocate the object without transitioning into C code. If any of
+    // the preconditions is not met, the code bails out to the runtime call.
+    Label rt_call, allocated;
+    if (FLAG_inline_new) {
+      Label undo_allocation;
+      ExternalReference debug_step_in_fp =
+          ExternalReference::debug_step_in_fp_address(isolate);
+      __ mov(r5, Operand(debug_step_in_fp));
+      __ LoadP(r5, MemOperand(r5));
+      __ cmpi(r5, Operand::Zero());
+      __ bne(&rt_call);
+
+      // Load the initial map and verify that it is in fact a map.
+      // r4: constructor function
+      __ LoadP(r5,
+               FieldMemOperand(r4, JSFunction::kPrototypeOrInitialMapOffset));
+      __ JumpIfSmi(r5, &rt_call);
+      __ CompareObjectType(r5, r6, r7, MAP_TYPE);
+      __ bne(&rt_call);
+
+      // Check that the constructor is not constructing a JSFunction (see
+      // comments in Runtime_NewObject in runtime.cc). In which case the
+      // initial map's instance type would be JS_FUNCTION_TYPE.
+      // r4: constructor function
+      // r5: initial map
+      __ CompareInstanceType(r5, r6, JS_FUNCTION_TYPE);
+      __ beq(&rt_call);
+
+      if (!is_api_function) {
+        Label allocate;
+        MemOperand bit_field3 = FieldMemOperand(r5, Map::kBitField3Offset);
+        // Check if slack tracking is enabled.
+        __ lwz(r7, bit_field3);
+        __ DecodeField<Map::ConstructionCount>(r11, r7);
+        STATIC_ASSERT(JSFunction::kNoSlackTracking == 0);
+        __ cmpi(r11, Operand::Zero());  // JSFunction::kNoSlackTracking
+        __ beq(&allocate);
+        // Decrease generous allocation count.
+        __ Add(r7, r7, -(1 << Map::ConstructionCount::kShift), r0);
+        __ stw(r7, bit_field3);
+        __ cmpi(r11, Operand(JSFunction::kFinishSlackTracking));
+        __ bne(&allocate);
+
+        __ push(r4);
+
+        __ Push(r5, r4);  // r4 = constructor
+        __ CallRuntime(Runtime::kFinalizeInstanceSize, 1);
+
+        __ Pop(r4, r5);
+
+        __ bind(&allocate);
+      }
+
+      // Now allocate the JSObject on the heap.
+      // r4: constructor function
+      // r5: initial map
+      __ lbz(r6, FieldMemOperand(r5, Map::kInstanceSizeOffset));
+      if (create_memento) {
+        __ addi(r6, r6, Operand(AllocationMemento::kSize / kPointerSize));
+      }
+
+      __ Allocate(r6, r7, r8, r9, &rt_call, SIZE_IN_WORDS);
+
+      // Allocated the JSObject, now initialize the fields. Map is set to
+      // initial map and properties and elements are set to empty fixed array.
+      // r4: constructor function
+      // r5: initial map
+      // r6: object size (not including memento if create_memento)
+      // r7: JSObject (not tagged)
+      __ LoadRoot(r9, Heap::kEmptyFixedArrayRootIndex);
+      __ mr(r8, r7);
+      __ StoreP(r5, MemOperand(r8, JSObject::kMapOffset));
+      __ StoreP(r9, MemOperand(r8, JSObject::kPropertiesOffset));
+      __ StoreP(r9, MemOperand(r8, JSObject::kElementsOffset));
+      __ addi(r8, r8, Operand(JSObject::kElementsOffset + kPointerSize));
+
+      __ ShiftLeftImm(r9, r6, Operand(kPointerSizeLog2));
+      __ add(r9, r7, r9);  // End of object.
+
+      // Fill all the in-object properties with the appropriate filler.
+      // r4: constructor function
+      // r5: initial map
+      // r6: object size (in words, including memento if create_memento)
+      // r7: JSObject (not tagged)
+      // r8: First in-object property of JSObject (not tagged)
+      // r9: End of object
+      DCHECK_EQ(3 * kPointerSize, JSObject::kHeaderSize);
+      __ LoadRoot(r10, Heap::kUndefinedValueRootIndex);
+
+      if (!is_api_function) {
+        Label no_inobject_slack_tracking;
+
+        // Check if slack tracking is enabled.
+        STATIC_ASSERT(JSFunction::kNoSlackTracking == 0);
+        __ cmpi(r11, Operand::Zero());  // JSFunction::kNoSlackTracking
+        __ beq(&no_inobject_slack_tracking);
+
+        // Allocate object with a slack.
+        __ lbz(r3, FieldMemOperand(r5, Map::kPreAllocatedPropertyFieldsOffset));
+        if (FLAG_debug_code) {
+          __ ShiftLeftImm(r0, r3, Operand(kPointerSizeLog2));
+          __ add(r0, r8, r0);
+          // r0: offset of first field after pre-allocated fields
+          __ cmp(r0, r9);
+          __ Assert(le, kUnexpectedNumberOfPreAllocatedPropertyFields);
+        }
+        {
+          Label done;
+          __ cmpi(r3, Operand::Zero());
+          __ beq(&done);
+          __ InitializeNFieldsWithFiller(r8, r3, r10);
+          __ bind(&done);
+        }
+        // To allow for truncation.
+        __ LoadRoot(r10, Heap::kOnePointerFillerMapRootIndex);
+        // Fill the remaining fields with one pointer filler map.
+
+        __ bind(&no_inobject_slack_tracking);
+      }
+
+      if (create_memento) {
+        __ subi(r3, r9, Operand(AllocationMemento::kSize));
+        __ InitializeFieldsWithFiller(r8, r3, r10);
+
+        // Fill in memento fields.
+        // r8: points to the allocated but uninitialized memento.
+        __ LoadRoot(r10, Heap::kAllocationMementoMapRootIndex);
+        __ StoreP(r10, MemOperand(r8, AllocationMemento::kMapOffset));
+        // Load the AllocationSite
+        __ LoadP(r10, MemOperand(sp, 2 * kPointerSize));
+        __ StoreP(r10,
+                  MemOperand(r8, AllocationMemento::kAllocationSiteOffset));
+        __ addi(r8, r8, Operand(AllocationMemento::kAllocationSiteOffset +
+                                kPointerSize));
+      } else {
+        __ InitializeFieldsWithFiller(r8, r9, r10);
+      }
+
+      // Add the object tag to make the JSObject real, so that we can continue
+      // and jump into the continuation code at any time from now on. Any
+      // failures need to undo the allocation, so that the heap is in a
+      // consistent state and verifiable.
+      __ addi(r7, r7, Operand(kHeapObjectTag));
+
+      // Check if a non-empty properties array is needed. Continue with
+      // allocated object if not fall through to runtime call if it is.
+      // r4: constructor function
+      // r7: JSObject
+      // r8: start of next object (not tagged)
+      __ lbz(r6, FieldMemOperand(r5, Map::kUnusedPropertyFieldsOffset));
+      // The field instance sizes contains both pre-allocated property fields
+      // and in-object properties.
+      __ lbz(r0, FieldMemOperand(r5, Map::kPreAllocatedPropertyFieldsOffset));
+      __ add(r6, r6, r0);
+      __ lbz(r0, FieldMemOperand(r5, Map::kInObjectPropertiesOffset));
+      __ sub(r6, r6, r0, LeaveOE, SetRC);
+
+      // Done if no extra properties are to be allocated.
+      __ beq(&allocated, cr0);
+      __ Assert(ge, kPropertyAllocationCountFailed, cr0);
+
+      // Scale the number of elements by pointer size and add the header for
+      // FixedArrays to the start of the next object calculation from above.
+      // r4: constructor
+      // r6: number of elements in properties array
+      // r7: JSObject
+      // r8: start of next object
+      __ addi(r3, r6, Operand(FixedArray::kHeaderSize / kPointerSize));
+      __ Allocate(
+          r3, r8, r9, r5, &undo_allocation,
+          static_cast<AllocationFlags>(RESULT_CONTAINS_TOP | SIZE_IN_WORDS));
+
+      // Initialize the FixedArray.
+      // r4: constructor
+      // r6: number of elements in properties array
+      // r7: JSObject
+      // r8: FixedArray (not tagged)
+      __ LoadRoot(r9, Heap::kFixedArrayMapRootIndex);
+      __ mr(r5, r8);
+      DCHECK_EQ(0 * kPointerSize, JSObject::kMapOffset);
+      __ StoreP(r9, MemOperand(r5));
+      DCHECK_EQ(1 * kPointerSize, FixedArray::kLengthOffset);
+      __ SmiTag(r3, r6);
+      __ StoreP(r3, MemOperand(r5, kPointerSize));
+      __ addi(r5, r5, Operand(2 * kPointerSize));
+
+      // Initialize the fields to undefined.
+      // r4: constructor function
+      // r5: First element of FixedArray (not tagged)
+      // r6: number of elements in properties array
+      // r7: JSObject
+      // r8: FixedArray (not tagged)
+      DCHECK_EQ(2 * kPointerSize, FixedArray::kHeaderSize);
+      {
+        Label done;
+        __ cmpi(r6, Operand::Zero());
+        __ beq(&done);
+        if (!is_api_function || create_memento) {
+          __ LoadRoot(r10, Heap::kUndefinedValueRootIndex);
+        } else if (FLAG_debug_code) {
+          __ LoadRoot(r11, Heap::kUndefinedValueRootIndex);
+          __ cmp(r10, r11);
+          __ Assert(eq, kUndefinedValueNotLoaded);
+        }
+        __ InitializeNFieldsWithFiller(r5, r6, r10);
+        __ bind(&done);
+      }
+
+      // Store the initialized FixedArray into the properties field of
+      // the JSObject
+      // r4: constructor function
+      // r7: JSObject
+      // r8: FixedArray (not tagged)
+      __ addi(r8, r8, Operand(kHeapObjectTag));  // Add the heap tag.
+      __ StoreP(r8, FieldMemOperand(r7, JSObject::kPropertiesOffset), r0);
+
+      // Continue with JSObject being successfully allocated
+      // r4: constructor function
+      // r7: JSObject
+      __ b(&allocated);
+
+      // Undo the setting of the new top so that the heap is verifiable. For
+      // example, the map's unused properties potentially do not match the
+      // allocated objects unused properties.
+      // r7: JSObject (previous new top)
+      __ bind(&undo_allocation);
+      __ UndoAllocationInNewSpace(r7, r8);
+    }
+
+    // Allocate the new receiver object using the runtime call.
+    // r4: constructor function
+    __ bind(&rt_call);
+    if (create_memento) {
+      // Get the cell or allocation site.
+      __ LoadP(r5, MemOperand(sp, 2 * kPointerSize));
+      __ push(r5);
+    }
+
+    __ push(r4);  // argument for Runtime_NewObject
+    if (create_memento) {
+      __ CallRuntime(Runtime::kNewObjectWithAllocationSite, 2);
+    } else {
+      __ CallRuntime(Runtime::kNewObject, 1);
+    }
+    __ mr(r7, r3);
+
+    // If we ended up using the runtime, and we want a memento, then the
+    // runtime call made it for us, and we shouldn't do create count
+    // increment.
+    Label count_incremented;
+    if (create_memento) {
+      __ b(&count_incremented);
+    }
+
+    // Receiver for constructor call allocated.
+    // r7: JSObject
+    __ bind(&allocated);
+
+    if (create_memento) {
+      __ LoadP(r5, MemOperand(sp, kPointerSize * 2));
+      __ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
+      __ cmp(r5, r8);
+      __ beq(&count_incremented);
+      // r5 is an AllocationSite. We are creating a memento from it, so we
+      // need to increment the memento create count.
+      __ LoadP(
+          r6, FieldMemOperand(r5, AllocationSite::kPretenureCreateCountOffset));
+      __ AddSmiLiteral(r6, r6, Smi::FromInt(1), r0);
+      __ StoreP(
+          r6, FieldMemOperand(r5, AllocationSite::kPretenureCreateCountOffset),
+          r0);
+      __ bind(&count_incremented);
+    }
+
+    __ Push(r7, r7);
+
+    // Reload the number of arguments and the constructor from the stack.
+    // sp[0]: receiver
+    // sp[1]: receiver
+    // sp[2]: constructor function
+    // sp[3]: number of arguments (smi-tagged)
+    __ LoadP(r4, MemOperand(sp, 2 * kPointerSize));
+    __ LoadP(r6, MemOperand(sp, 3 * kPointerSize));
+
+    // Set up pointer to last argument.
+    __ addi(r5, fp, Operand(StandardFrameConstants::kCallerSPOffset));
+
+    // Set up number of arguments for function call below
+    __ SmiUntag(r3, r6);
+
+    // Copy arguments and receiver to the expression stack.
+    // r3: number of arguments
+    // r4: constructor function
+    // r5: address of last argument (caller sp)
+    // r6: number of arguments (smi-tagged)
+    // sp[0]: receiver
+    // sp[1]: receiver
+    // sp[2]: constructor function
+    // sp[3]: number of arguments (smi-tagged)
+    Label loop, no_args;
+    __ cmpi(r3, Operand::Zero());
+    __ beq(&no_args);
+    __ ShiftLeftImm(ip, r3, Operand(kPointerSizeLog2));
+    __ mtctr(r3);
+    __ bind(&loop);
+    __ subi(ip, ip, Operand(kPointerSize));
+    __ LoadPX(r0, MemOperand(r5, ip));
+    __ push(r0);
+    __ bdnz(&loop);
+    __ bind(&no_args);
+
+    // Call the function.
+    // r3: number of arguments
+    // r4: constructor function
+    if (is_api_function) {
+      __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
+      Handle<Code> code = masm->isolate()->builtins()->HandleApiCallConstruct();
+      __ Call(code, RelocInfo::CODE_TARGET);
+    } else {
+      ParameterCount actual(r3);
+      __ InvokeFunction(r4, actual, CALL_FUNCTION, NullCallWrapper());
+    }
+
+    // Store offset of return address for deoptimizer.
+    if (!is_api_function) {
+      masm->isolate()->heap()->SetConstructStubDeoptPCOffset(masm->pc_offset());
+    }
+
+    // Restore context from the frame.
+    // r3: result
+    // sp[0]: receiver
+    // sp[1]: constructor function
+    // sp[2]: number of arguments (smi-tagged)
+    __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+    // If the result is an object (in the ECMA sense), we should get rid
+    // of the receiver and use the result; see ECMA-262 section 13.2.2-7
+    // on page 74.
+    Label use_receiver, exit;
+
+    // If the result is a smi, it is *not* an object in the ECMA sense.
+    // r3: result
+    // sp[0]: receiver (newly allocated object)
+    // sp[1]: constructor function
+    // sp[2]: number of arguments (smi-tagged)
+    __ JumpIfSmi(r3, &use_receiver);
+
+    // If the type of the result (stored in its map) is less than
+    // FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
+    __ CompareObjectType(r3, r4, r6, FIRST_SPEC_OBJECT_TYPE);
+    __ bge(&exit);
+
+    // Throw away the result of the constructor invocation and use the
+    // on-stack receiver as the result.
+    __ bind(&use_receiver);
+    __ LoadP(r3, MemOperand(sp));
+
+    // Remove receiver from the stack, remove caller arguments, and
+    // return.
+    __ bind(&exit);
+    // r3: result
+    // sp[0]: receiver (newly allocated object)
+    // sp[1]: constructor function
+    // sp[2]: number of arguments (smi-tagged)
+    __ LoadP(r4, MemOperand(sp, 2 * kPointerSize));
+
+    // Leave construct frame.
+  }
+
+  __ SmiToPtrArrayOffset(r4, r4);
+  __ add(sp, sp, r4);
+  __ addi(sp, sp, Operand(kPointerSize));
+  __ IncrementCounter(isolate->counters()->constructed_objects(), 1, r4, r5);
+  __ blr();
+}
+
+
+void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
+  Generate_JSConstructStubHelper(masm, false, FLAG_pretenuring_call_new);
+}
+
+
+void Builtins::Generate_JSConstructStubApi(MacroAssembler* masm) {
+  Generate_JSConstructStubHelper(masm, true, false);
+}
+
+
+static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
+                                             bool is_construct) {
+  // Called from Generate_JS_Entry
+  // r3: code entry
+  // r4: function
+  // r5: receiver
+  // r6: argc
+  // r7: argv
+  // r0,r8-r9, cp may be clobbered
+  ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
+  // Clear the context before we push it when entering the internal frame.
+  __ li(cp, Operand::Zero());
+
+  // Enter an internal frame.
+  {
+    FrameScope scope(masm, StackFrame::INTERNAL);
+
+    // Set up the context from the function argument.
+    __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
+
+    __ InitializeRootRegister();
+
+    // Push the function and the receiver onto the stack.
+    __ push(r4);
+    __ push(r5);
+
+    // Copy arguments to the stack in a loop.
+    // r4: function
+    // r6: argc
+    // r7: argv, i.e. points to first arg
+    Label loop, entry;
+    __ ShiftLeftImm(r0, r6, Operand(kPointerSizeLog2));
+    __ add(r5, r7, r0);
+    // r5 points past last arg.
+    __ b(&entry);
+    __ bind(&loop);
+    __ LoadP(r8, MemOperand(r7));  // read next parameter
+    __ addi(r7, r7, Operand(kPointerSize));
+    __ LoadP(r0, MemOperand(r8));  // dereference handle
+    __ push(r0);                   // push parameter
+    __ bind(&entry);
+    __ cmp(r7, r5);
+    __ bne(&loop);
+
+    // Initialize all JavaScript callee-saved registers, since they will be seen
+    // by the garbage collector as part of handlers.
+    __ LoadRoot(r7, Heap::kUndefinedValueRootIndex);
+    __ mr(r14, r7);
+    __ mr(r15, r7);
+    __ mr(r16, r7);
+    __ mr(r17, r7);
+
+    // Invoke the code and pass argc as r3.
+    __ mr(r3, r6);
+    if (is_construct) {
+      // No type feedback cell is available
+      __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+      CallConstructStub stub(masm->isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
+      __ CallStub(&stub);
+    } else {
+      ParameterCount actual(r3);
+      __ InvokeFunction(r4, actual, CALL_FUNCTION, NullCallWrapper());
+    }
+    // Exit the JS frame and remove the parameters (except function), and
+    // return.
+  }
+  __ blr();
+
+  // r3: result
+}
+
+
+void Builtins::Generate_JSEntryTrampoline(MacroAssembler* masm) {
+  Generate_JSEntryTrampolineHelper(masm, false);
+}
+
+
+void Builtins::Generate_JSConstructEntryTrampoline(MacroAssembler* masm) {
+  Generate_JSEntryTrampolineHelper(masm, true);
+}
+
+
+void Builtins::Generate_CompileLazy(MacroAssembler* masm) {
+  CallRuntimePassFunction(masm, Runtime::kCompileLazy);
+  GenerateTailCallToReturnedCode(masm);
+}
+
+
+static void CallCompileOptimized(MacroAssembler* masm, bool concurrent) {
+  FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+  // Push a copy of the function onto the stack.
+  // Push function as parameter to the runtime call.
+  __ Push(r4, r4);
+  // Whether to compile in a background thread.
+  __ Push(masm->isolate()->factory()->ToBoolean(concurrent));
+
+  __ CallRuntime(Runtime::kCompileOptimized, 2);
+  // Restore receiver.
+  __ pop(r4);
+}
+
+
+void Builtins::Generate_CompileOptimized(MacroAssembler* masm) {
+  CallCompileOptimized(masm, false);
+  GenerateTailCallToReturnedCode(masm);
+}
+
+
+void Builtins::Generate_CompileOptimizedConcurrent(MacroAssembler* masm) {
+  CallCompileOptimized(masm, true);
+  GenerateTailCallToReturnedCode(masm);
+}
+
+
+static void GenerateMakeCodeYoungAgainCommon(MacroAssembler* masm) {
+  // For now, we are relying on the fact that make_code_young doesn't do any
+  // garbage collection which allows us to save/restore the registers without
+  // worrying about which of them contain pointers. We also don't build an
+  // internal frame to make the code faster, since we shouldn't have to do stack
+  // crawls in MakeCodeYoung. This seems a bit fragile.
+
+  // Point r3 at the start of the PlatformCodeAge sequence.
+  __ mr(r3, ip);
+
+  // The following registers must be saved and restored when calling through to
+  // the runtime:
+  //   r3 - contains return address (beginning of patch sequence)
+  //   r4 - isolate
+  //   lr - return address
+  FrameScope scope(masm, StackFrame::MANUAL);
+  __ mflr(r0);
+  __ MultiPush(r0.bit() | r3.bit() | r4.bit() | fp.bit());
+  __ PrepareCallCFunction(2, 0, r5);
+  __ mov(r4, Operand(ExternalReference::isolate_address(masm->isolate())));
+  __ CallCFunction(
+      ExternalReference::get_make_code_young_function(masm->isolate()), 2);
+  __ MultiPop(r0.bit() | r3.bit() | r4.bit() | fp.bit());
+  __ mtlr(r0);
+  __ mr(ip, r3);
+  __ Jump(ip);
+}
+
+#define DEFINE_CODE_AGE_BUILTIN_GENERATOR(C)                  \
+  void Builtins::Generate_Make##C##CodeYoungAgainEvenMarking( \
+      MacroAssembler* masm) {                                 \
+    GenerateMakeCodeYoungAgainCommon(masm);                   \
+  }                                                           \
+  void Builtins::Generate_Make##C##CodeYoungAgainOddMarking(  \
+      MacroAssembler* masm) {                                 \
+    GenerateMakeCodeYoungAgainCommon(masm);                   \
+  }
+CODE_AGE_LIST(DEFINE_CODE_AGE_BUILTIN_GENERATOR)
+#undef DEFINE_CODE_AGE_BUILTIN_GENERATOR
+
+
+void Builtins::Generate_MarkCodeAsExecutedOnce(MacroAssembler* masm) {
+  // For now, we are relying on the fact that make_code_young doesn't do any
+  // garbage collection which allows us to save/restore the registers without
+  // worrying about which of them contain pointers. We also don't build an
+  // internal frame to make the code faster, since we shouldn't have to do stack
+  // crawls in MakeCodeYoung. This seems a bit fragile.
+
+  // Point r3 at the start of the PlatformCodeAge sequence.
+  __ mr(r3, ip);
+
+  // The following registers must be saved and restored when calling through to
+  // the runtime:
+  //   r3 - contains return address (beginning of patch sequence)
+  //   r4 - isolate
+  //   lr - return address
+  FrameScope scope(masm, StackFrame::MANUAL);
+  __ mflr(r0);
+  __ MultiPush(r0.bit() | r3.bit() | r4.bit() | fp.bit());
+  __ PrepareCallCFunction(2, 0, r5);
+  __ mov(r4, Operand(ExternalReference::isolate_address(masm->isolate())));
+  __ CallCFunction(
+      ExternalReference::get_mark_code_as_executed_function(masm->isolate()),
+      2);
+  __ MultiPop(r0.bit() | r3.bit() | r4.bit() | fp.bit());
+  __ mtlr(r0);
+  __ mr(ip, r3);
+
+  // Perform prologue operations usually performed by the young code stub.
+  __ PushFixedFrame(r4);
+  __ addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+
+  // Jump to point after the code-age stub.
+  __ addi(r3, ip, Operand(kNoCodeAgeSequenceLength));
+  __ Jump(r3);
+}
+
+
+void Builtins::Generate_MarkCodeAsExecutedTwice(MacroAssembler* masm) {
+  GenerateMakeCodeYoungAgainCommon(masm);
+}
+
+
+static void Generate_NotifyStubFailureHelper(MacroAssembler* masm,
+                                             SaveFPRegsMode save_doubles) {
+  {
+    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+
+    // Preserve registers across notification, this is important for compiled
+    // stubs that tail call the runtime on deopts passing their parameters in
+    // registers.
+    __ MultiPush(kJSCallerSaved | kCalleeSaved);
+    // Pass the function and deoptimization type to the runtime system.
+    __ CallRuntime(Runtime::kNotifyStubFailure, 0, save_doubles);
+    __ MultiPop(kJSCallerSaved | kCalleeSaved);
+  }
+
+  __ addi(sp, sp, Operand(kPointerSize));  // Ignore state
+  __ blr();                                // Jump to miss handler
+}
+
+
+void Builtins::Generate_NotifyStubFailure(MacroAssembler* masm) {
+  Generate_NotifyStubFailureHelper(masm, kDontSaveFPRegs);
+}
+
+
+void Builtins::Generate_NotifyStubFailureSaveDoubles(MacroAssembler* masm) {
+  Generate_NotifyStubFailureHelper(masm, kSaveFPRegs);
+}
+
+
+static void Generate_NotifyDeoptimizedHelper(MacroAssembler* masm,
+                                             Deoptimizer::BailoutType type) {
+  {
+    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+    // Pass the function and deoptimization type to the runtime system.
+    __ LoadSmiLiteral(r3, Smi::FromInt(static_cast<int>(type)));
+    __ push(r3);
+    __ CallRuntime(Runtime::kNotifyDeoptimized, 1);
+  }
+
+  // Get the full codegen state from the stack and untag it -> r9.
+  __ LoadP(r9, MemOperand(sp, 0 * kPointerSize));
+  __ SmiUntag(r9);
+  // Switch on the state.
+  Label with_tos_register, unknown_state;
+  __ cmpi(r9, Operand(FullCodeGenerator::NO_REGISTERS));
+  __ bne(&with_tos_register);
+  __ addi(sp, sp, Operand(1 * kPointerSize));  // Remove state.
+  __ Ret();
+
+  __ bind(&with_tos_register);
+  __ LoadP(r3, MemOperand(sp, 1 * kPointerSize));
+  __ cmpi(r9, Operand(FullCodeGenerator::TOS_REG));
+  __ bne(&unknown_state);
+  __ addi(sp, sp, Operand(2 * kPointerSize));  // Remove state.
+  __ Ret();
+
+  __ bind(&unknown_state);
+  __ stop("no cases left");
+}
+
+
+void Builtins::Generate_NotifyDeoptimized(MacroAssembler* masm) {
+  Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::EAGER);
+}
+
+
+void Builtins::Generate_NotifySoftDeoptimized(MacroAssembler* masm) {
+  Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::SOFT);
+}
+
+
+void Builtins::Generate_NotifyLazyDeoptimized(MacroAssembler* masm) {
+  Generate_NotifyDeoptimizedHelper(masm, Deoptimizer::LAZY);
+}
+
+
+void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
+  // Lookup the function in the JavaScript frame.
+  __ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  {
+    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+    // Pass function as argument.
+    __ push(r3);
+    __ CallRuntime(Runtime::kCompileForOnStackReplacement, 1);
+  }
+
+  // If the code object is null, just return to the unoptimized code.
+  Label skip;
+  __ CmpSmiLiteral(r3, Smi::FromInt(0), r0);
+  __ bne(&skip);
+  __ Ret();
+
+  __ bind(&skip);
+
+  // Load deoptimization data from the code object.
+  // <deopt_data> = <code>[#deoptimization_data_offset]
+  __ LoadP(r4, FieldMemOperand(r3, Code::kDeoptimizationDataOffset));
+
+#if V8_OOL_CONSTANT_POOL
+  {
+    ConstantPoolUnavailableScope constant_pool_unavailable(masm);
+    __ LoadP(kConstantPoolRegister,
+             FieldMemOperand(r3, Code::kConstantPoolOffset));
+#endif
+
+    // Load the OSR entrypoint offset from the deoptimization data.
+    // <osr_offset> = <deopt_data>[#header_size + #osr_pc_offset]
+    __ LoadP(r4, FieldMemOperand(
+                     r4, FixedArray::OffsetOfElementAt(
+                             DeoptimizationInputData::kOsrPcOffsetIndex)));
+    __ SmiUntag(r4);
+
+    // Compute the target address = code_obj + header_size + osr_offset
+    // <entry_addr> = <code_obj> + #header_size + <osr_offset>
+    __ add(r3, r3, r4);
+    __ addi(r0, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
+    __ mtlr(r0);
+
+    // And "return" to the OSR entry point of the function.
+    __ Ret();
+#if V8_OOL_CONSTANT_POOL
+  }
+#endif
+}
+
+
+void Builtins::Generate_OsrAfterStackCheck(MacroAssembler* masm) {
+  // We check the stack limit as indicator that recompilation might be done.
+  Label ok;
+  __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+  __ cmpl(sp, ip);
+  __ bge(&ok);
+  {
+    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+    __ CallRuntime(Runtime::kStackGuard, 0);
+  }
+  __ Jump(masm->isolate()->builtins()->OnStackReplacement(),
+          RelocInfo::CODE_TARGET);
+
+  __ bind(&ok);
+  __ Ret();
+}
+
+
+void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
+  // 1. Make sure we have at least one argument.
+  // r3: actual number of arguments
+  {
+    Label done;
+    __ cmpi(r3, Operand::Zero());
+    __ bne(&done);
+    __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+    __ push(r5);
+    __ addi(r3, r3, Operand(1));
+    __ bind(&done);
+  }
+
+  // 2. Get the function to call (passed as receiver) from the stack, check
+  //    if it is a function.
+  // r3: actual number of arguments
+  Label slow, non_function;
+  __ ShiftLeftImm(r4, r3, Operand(kPointerSizeLog2));
+  __ add(r4, sp, r4);
+  __ LoadP(r4, MemOperand(r4));
+  __ JumpIfSmi(r4, &non_function);
+  __ CompareObjectType(r4, r5, r5, JS_FUNCTION_TYPE);
+  __ bne(&slow);
+
+  // 3a. Patch the first argument if necessary when calling a function.
+  // r3: actual number of arguments
+  // r4: function
+  Label shift_arguments;
+  __ li(r7, Operand::Zero());  // indicate regular JS_FUNCTION
+  {
+    Label convert_to_object, use_global_proxy, patch_receiver;
+    // Change context eagerly in case we need the global receiver.
+    __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
+
+    // Do not transform the receiver for strict mode functions.
+    __ LoadP(r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+    __ lwz(r6, FieldMemOperand(r5, SharedFunctionInfo::kCompilerHintsOffset));
+    __ TestBit(r6,
+#if V8_TARGET_ARCH_PPC64
+               SharedFunctionInfo::kStrictModeFunction,
+#else
+               SharedFunctionInfo::kStrictModeFunction + kSmiTagSize,
+#endif
+               r0);
+    __ bne(&shift_arguments, cr0);
+
+    // Do not transform the receiver for native (Compilerhints already in r6).
+    __ TestBit(r6,
+#if V8_TARGET_ARCH_PPC64
+               SharedFunctionInfo::kNative,
+#else
+               SharedFunctionInfo::kNative + kSmiTagSize,
+#endif
+               r0);
+    __ bne(&shift_arguments, cr0);
+
+    // Compute the receiver in sloppy mode.
+    __ ShiftLeftImm(ip, r3, Operand(kPointerSizeLog2));
+    __ add(r5, sp, ip);
+    __ LoadP(r5, MemOperand(r5, -kPointerSize));
+    // r3: actual number of arguments
+    // r4: function
+    // r5: first argument
+    __ JumpIfSmi(r5, &convert_to_object);
+
+    __ LoadRoot(r6, Heap::kUndefinedValueRootIndex);
+    __ cmp(r5, r6);
+    __ beq(&use_global_proxy);
+    __ LoadRoot(r6, Heap::kNullValueRootIndex);
+    __ cmp(r5, r6);
+    __ beq(&use_global_proxy);
+
+    STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+    __ CompareObjectType(r5, r6, r6, FIRST_SPEC_OBJECT_TYPE);
+    __ bge(&shift_arguments);
+
+    __ bind(&convert_to_object);
+
+    {
+      // Enter an internal frame in order to preserve argument count.
+      FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+      __ SmiTag(r3);
+      __ Push(r3, r5);
+      __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+      __ mr(r5, r3);
+
+      __ pop(r3);
+      __ SmiUntag(r3);
+
+      // Exit the internal frame.
+    }
+
+    // Restore the function to r4, and the flag to r7.
+    __ ShiftLeftImm(r7, r3, Operand(kPointerSizeLog2));
+    __ add(r7, sp, r7);
+    __ LoadP(r4, MemOperand(r7));
+    __ li(r7, Operand::Zero());
+    __ b(&patch_receiver);
+
+    __ bind(&use_global_proxy);
+    __ LoadP(r5, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
+    __ LoadP(r5, FieldMemOperand(r5, GlobalObject::kGlobalProxyOffset));
+
+    __ bind(&patch_receiver);
+    __ ShiftLeftImm(ip, r3, Operand(kPointerSizeLog2));
+    __ add(r6, sp, ip);
+    __ StoreP(r5, MemOperand(r6, -kPointerSize));
+
+    __ b(&shift_arguments);
+  }
+
+  // 3b. Check for function proxy.
+  __ bind(&slow);
+  __ li(r7, Operand(1, RelocInfo::NONE32));  // indicate function proxy
+  __ cmpi(r5, Operand(JS_FUNCTION_PROXY_TYPE));
+  __ beq(&shift_arguments);
+  __ bind(&non_function);
+  __ li(r7, Operand(2, RelocInfo::NONE32));  // indicate non-function
+
+  // 3c. Patch the first argument when calling a non-function.  The
+  //     CALL_NON_FUNCTION builtin expects the non-function callee as
+  //     receiver, so overwrite the first argument which will ultimately
+  //     become the receiver.
+  // r3: actual number of arguments
+  // r4: function
+  // r7: call type (0: JS function, 1: function proxy, 2: non-function)
+  __ ShiftLeftImm(ip, r3, Operand(kPointerSizeLog2));
+  __ add(r5, sp, ip);
+  __ StoreP(r4, MemOperand(r5, -kPointerSize));
+
+  // 4. Shift arguments and return address one slot down on the stack
+  //    (overwriting the original receiver).  Adjust argument count to make
+  //    the original first argument the new receiver.
+  // r3: actual number of arguments
+  // r4: function
+  // r7: call type (0: JS function, 1: function proxy, 2: non-function)
+  __ bind(&shift_arguments);
+  {
+    Label loop;
+    // Calculate the copy start address (destination). Copy end address is sp.
+    __ ShiftLeftImm(ip, r3, Operand(kPointerSizeLog2));
+    __ add(r5, sp, ip);
+
+    __ bind(&loop);
+    __ LoadP(ip, MemOperand(r5, -kPointerSize));
+    __ StoreP(ip, MemOperand(r5));
+    __ subi(r5, r5, Operand(kPointerSize));
+    __ cmp(r5, sp);
+    __ bne(&loop);
+    // Adjust the actual number of arguments and remove the top element
+    // (which is a copy of the last argument).
+    __ subi(r3, r3, Operand(1));
+    __ pop();
+  }
+
+  // 5a. Call non-function via tail call to CALL_NON_FUNCTION builtin,
+  //     or a function proxy via CALL_FUNCTION_PROXY.
+  // r3: actual number of arguments
+  // r4: function
+  // r7: call type (0: JS function, 1: function proxy, 2: non-function)
+  {
+    Label function, non_proxy;
+    __ cmpi(r7, Operand::Zero());
+    __ beq(&function);
+    // Expected number of arguments is 0 for CALL_NON_FUNCTION.
+    __ li(r5, Operand::Zero());
+    __ cmpi(r7, Operand(1));
+    __ bne(&non_proxy);
+
+    __ push(r4);  // re-add proxy object as additional argument
+    __ addi(r3, r3, Operand(1));
+    __ GetBuiltinFunction(r4, Builtins::CALL_FUNCTION_PROXY);
+    __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+            RelocInfo::CODE_TARGET);
+
+    __ bind(&non_proxy);
+    __ GetBuiltinFunction(r4, Builtins::CALL_NON_FUNCTION);
+    __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+            RelocInfo::CODE_TARGET);
+    __ bind(&function);
+  }
+
+  // 5b. Get the code to call from the function and check that the number of
+  //     expected arguments matches what we're providing.  If so, jump
+  //     (tail-call) to the code in register edx without checking arguments.
+  // r3: actual number of arguments
+  // r4: function
+  __ LoadP(r6, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+  __ LoadWordArith(
+      r5, FieldMemOperand(r6, SharedFunctionInfo::kFormalParameterCountOffset));
+#if !V8_TARGET_ARCH_PPC64
+  __ SmiUntag(r5);
+#endif
+  __ cmp(r5, r3);  // Check formal and actual parameter counts.
+  __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+          RelocInfo::CODE_TARGET, ne);
+
+  __ LoadP(ip, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
+  ParameterCount expected(0);
+  __ InvokeCode(ip, expected, expected, JUMP_FUNCTION, NullCallWrapper());
+}
+
+
+void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
+  const int kIndexOffset =
+      StandardFrameConstants::kExpressionsOffset - (2 * kPointerSize);
+  const int kLimitOffset =
+      StandardFrameConstants::kExpressionsOffset - (1 * kPointerSize);
+  const int kArgsOffset = 2 * kPointerSize;
+  const int kRecvOffset = 3 * kPointerSize;
+  const int kFunctionOffset = 4 * kPointerSize;
+
+  {
+    FrameAndConstantPoolScope frame_scope(masm, StackFrame::INTERNAL);
+
+    __ LoadP(r3, MemOperand(fp, kFunctionOffset));  // get the function
+    __ push(r3);
+    __ LoadP(r3, MemOperand(fp, kArgsOffset));  // get the args array
+    __ push(r3);
+    __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
+
+    // Check the stack for overflow. We are not trying to catch
+    // interruptions (e.g. debug break and preemption) here, so the "real stack
+    // limit" is checked.
+    Label okay;
+    __ LoadRoot(r5, Heap::kRealStackLimitRootIndex);
+    // Make r5 the space we have left. The stack might already be overflowed
+    // here which will cause r5 to become negative.
+    __ sub(r5, sp, r5);
+    // Check if the arguments will overflow the stack.
+    __ SmiToPtrArrayOffset(r0, r3);
+    __ cmp(r5, r0);
+    __ bgt(&okay);  // Signed comparison.
+
+    // Out of stack space.
+    __ LoadP(r4, MemOperand(fp, kFunctionOffset));
+    __ Push(r4, r3);
+    __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+    // End of stack check.
+
+    // Push current limit and index.
+    __ bind(&okay);
+    __ li(r4, Operand::Zero());
+    __ Push(r3, r4);  // limit and initial index.
+
+    // Get the receiver.
+    __ LoadP(r3, MemOperand(fp, kRecvOffset));
+
+    // Check that the function is a JS function (otherwise it must be a proxy).
+    Label push_receiver;
+    __ LoadP(r4, MemOperand(fp, kFunctionOffset));
+    __ CompareObjectType(r4, r5, r5, JS_FUNCTION_TYPE);
+    __ bne(&push_receiver);
+
+    // Change context eagerly to get the right global object if necessary.
+    __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
+    // Load the shared function info while the function is still in r4.
+    __ LoadP(r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+
+    // Compute the receiver.
+    // Do not transform the receiver for strict mode functions.
+    Label call_to_object, use_global_proxy;
+    __ lwz(r5, FieldMemOperand(r5, SharedFunctionInfo::kCompilerHintsOffset));
+    __ TestBit(r5,
+#if V8_TARGET_ARCH_PPC64
+               SharedFunctionInfo::kStrictModeFunction,
+#else
+               SharedFunctionInfo::kStrictModeFunction + kSmiTagSize,
+#endif
+               r0);
+    __ bne(&push_receiver, cr0);
+
+    // Do not transform the receiver for strict mode functions.
+    __ TestBit(r5,
+#if V8_TARGET_ARCH_PPC64
+               SharedFunctionInfo::kNative,
+#else
+               SharedFunctionInfo::kNative + kSmiTagSize,
+#endif
+               r0);
+    __ bne(&push_receiver, cr0);
+
+    // Compute the receiver in sloppy mode.
+    __ JumpIfSmi(r3, &call_to_object);
+    __ LoadRoot(r4, Heap::kNullValueRootIndex);
+    __ cmp(r3, r4);
+    __ beq(&use_global_proxy);
+    __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
+    __ cmp(r3, r4);
+    __ beq(&use_global_proxy);
+
+    // Check if the receiver is already a JavaScript object.
+    // r3: receiver
+    STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+    __ CompareObjectType(r3, r4, r4, FIRST_SPEC_OBJECT_TYPE);
+    __ bge(&push_receiver);
+
+    // Convert the receiver to a regular object.
+    // r3: receiver
+    __ bind(&call_to_object);
+    __ push(r3);
+    __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+    __ b(&push_receiver);
+
+    __ bind(&use_global_proxy);
+    __ LoadP(r3, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
+    __ LoadP(r3, FieldMemOperand(r3, GlobalObject::kGlobalProxyOffset));
+
+    // Push the receiver.
+    // r3: receiver
+    __ bind(&push_receiver);
+    __ push(r3);
+
+    // Copy all arguments from the array to the stack.
+    Label entry, loop;
+    __ LoadP(r3, MemOperand(fp, kIndexOffset));
+    __ b(&entry);
+
+    // Load the current argument from the arguments array and push it to the
+    // stack.
+    // r3: current argument index
+    __ bind(&loop);
+    __ LoadP(r4, MemOperand(fp, kArgsOffset));
+    __ Push(r4, r3);
+
+    // Call the runtime to access the property in the arguments array.
+    __ CallRuntime(Runtime::kGetProperty, 2);
+    __ push(r3);
+
+    // Use inline caching to access the arguments.
+    __ LoadP(r3, MemOperand(fp, kIndexOffset));
+    __ AddSmiLiteral(r3, r3, Smi::FromInt(1), r0);
+    __ StoreP(r3, MemOperand(fp, kIndexOffset));
+
+    // Test if the copy loop has finished copying all the elements from the
+    // arguments object.
+    __ bind(&entry);
+    __ LoadP(r4, MemOperand(fp, kLimitOffset));
+    __ cmp(r3, r4);
+    __ bne(&loop);
+
+    // Call the function.
+    Label call_proxy;
+    ParameterCount actual(r3);
+    __ SmiUntag(r3);
+    __ LoadP(r4, MemOperand(fp, kFunctionOffset));
+    __ CompareObjectType(r4, r5, r5, JS_FUNCTION_TYPE);
+    __ bne(&call_proxy);
+    __ InvokeFunction(r4, actual, CALL_FUNCTION, NullCallWrapper());
+
+    __ LeaveFrame(StackFrame::INTERNAL, 3 * kPointerSize);
+    __ blr();
+
+    // Call the function proxy.
+    __ bind(&call_proxy);
+    __ push(r4);  // add function proxy as last argument
+    __ addi(r3, r3, Operand(1));
+    __ li(r5, Operand::Zero());
+    __ GetBuiltinFunction(r4, Builtins::CALL_FUNCTION_PROXY);
+    __ Call(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+            RelocInfo::CODE_TARGET);
+
+    // Tear down the internal frame and remove function, receiver and args.
+  }
+  __ addi(sp, sp, Operand(3 * kPointerSize));
+  __ blr();
+}
+
+
+static void ArgumentAdaptorStackCheck(MacroAssembler* masm,
+                                      Label* stack_overflow) {
+  // ----------- S t a t e -------------
+  //  -- r3 : actual number of arguments
+  //  -- r4 : function (passed through to callee)
+  //  -- r5 : expected number of arguments
+  // -----------------------------------
+  // Check the stack for overflow. We are not trying to catch
+  // interruptions (e.g. debug break and preemption) here, so the "real stack
+  // limit" is checked.
+  __ LoadRoot(r8, Heap::kRealStackLimitRootIndex);
+  // Make r8 the space we have left. The stack might already be overflowed
+  // here which will cause r8 to become negative.
+  __ sub(r8, sp, r8);
+  // Check if the arguments will overflow the stack.
+  __ ShiftLeftImm(r0, r5, Operand(kPointerSizeLog2));
+  __ cmp(r8, r0);
+  __ ble(stack_overflow);  // Signed comparison.
+}
+
+
+static void EnterArgumentsAdaptorFrame(MacroAssembler* masm) {
+  __ SmiTag(r3);
+  __ LoadSmiLiteral(r7, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR));
+  __ mflr(r0);
+  __ push(r0);
+#if V8_OOL_CONSTANT_POOL
+  __ Push(fp, kConstantPoolRegister, r7, r4, r3);
+#else
+  __ Push(fp, r7, r4, r3);
+#endif
+  __ addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
+                          kPointerSize));
+}
+
+
+static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r3 : result being passed through
+  // -----------------------------------
+  // Get the number of arguments passed (as a smi), tear down the frame and
+  // then tear down the parameters.
+  __ LoadP(r4, MemOperand(fp, -(StandardFrameConstants::kFixedFrameSizeFromFp +
+                                kPointerSize)));
+  int stack_adjustment = kPointerSize;  // adjust for receiver
+  __ LeaveFrame(StackFrame::ARGUMENTS_ADAPTOR, stack_adjustment);
+  __ SmiToPtrArrayOffset(r0, r4);
+  __ add(sp, sp, r0);
+}
+
+
+void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r3 : actual number of arguments
+  //  -- r4 : function (passed through to callee)
+  //  -- r5 : expected number of arguments
+  // -----------------------------------
+
+  Label stack_overflow;
+  ArgumentAdaptorStackCheck(masm, &stack_overflow);
+  Label invoke, dont_adapt_arguments;
+
+  Label enough, too_few;
+  __ LoadP(ip, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
+  __ cmp(r3, r5);
+  __ blt(&too_few);
+  __ cmpi(r5, Operand(SharedFunctionInfo::kDontAdaptArgumentsSentinel));
+  __ beq(&dont_adapt_arguments);
+
+  {  // Enough parameters: actual >= expected
+    __ bind(&enough);
+    EnterArgumentsAdaptorFrame(masm);
+
+    // Calculate copy start address into r3 and copy end address into r5.
+    // r3: actual number of arguments as a smi
+    // r4: function
+    // r5: expected number of arguments
+    // ip: code entry to call
+    __ SmiToPtrArrayOffset(r3, r3);
+    __ add(r3, r3, fp);
+    // adjust for return address and receiver
+    __ addi(r3, r3, Operand(2 * kPointerSize));
+    __ ShiftLeftImm(r5, r5, Operand(kPointerSizeLog2));
+    __ sub(r5, r3, r5);
+
+    // Copy the arguments (including the receiver) to the new stack frame.
+    // r3: copy start address
+    // r4: function
+    // r5: copy end address
+    // ip: code entry to call
+
+    Label copy;
+    __ bind(&copy);
+    __ LoadP(r0, MemOperand(r3, 0));
+    __ push(r0);
+    __ cmp(r3, r5);  // Compare before moving to next argument.
+    __ subi(r3, r3, Operand(kPointerSize));
+    __ bne(&copy);
+
+    __ b(&invoke);
+  }
+
+  {  // Too few parameters: Actual < expected
+    __ bind(&too_few);
+    EnterArgumentsAdaptorFrame(masm);
+
+    // Calculate copy start address into r0 and copy end address is fp.
+    // r3: actual number of arguments as a smi
+    // r4: function
+    // r5: expected number of arguments
+    // ip: code entry to call
+    __ SmiToPtrArrayOffset(r3, r3);
+    __ add(r3, r3, fp);
+
+    // Copy the arguments (including the receiver) to the new stack frame.
+    // r3: copy start address
+    // r4: function
+    // r5: expected number of arguments
+    // ip: code entry to call
+    Label copy;
+    __ bind(&copy);
+    // Adjust load for return address and receiver.
+    __ LoadP(r0, MemOperand(r3, 2 * kPointerSize));
+    __ push(r0);
+    __ cmp(r3, fp);  // Compare before moving to next argument.
+    __ subi(r3, r3, Operand(kPointerSize));
+    __ bne(&copy);
+
+    // Fill the remaining expected arguments with undefined.
+    // r4: function
+    // r5: expected number of arguments
+    // ip: code entry to call
+    __ LoadRoot(r0, Heap::kUndefinedValueRootIndex);
+    __ ShiftLeftImm(r5, r5, Operand(kPointerSizeLog2));
+    __ sub(r5, fp, r5);
+    // Adjust for frame.
+    __ subi(r5, r5, Operand(StandardFrameConstants::kFixedFrameSizeFromFp +
+                            2 * kPointerSize));
+
+    Label fill;
+    __ bind(&fill);
+    __ push(r0);
+    __ cmp(sp, r5);
+    __ bne(&fill);
+  }
+
+  // Call the entry point.
+  __ bind(&invoke);
+  __ CallJSEntry(ip);
+
+  // Store offset of return address for deoptimizer.
+  masm->isolate()->heap()->SetArgumentsAdaptorDeoptPCOffset(masm->pc_offset());
+
+  // Exit frame and return.
+  LeaveArgumentsAdaptorFrame(masm);
+  __ blr();
+
+
+  // -------------------------------------------
+  // Dont adapt arguments.
+  // -------------------------------------------
+  __ bind(&dont_adapt_arguments);
+  __ JumpToJSEntry(ip);
+
+  __ bind(&stack_overflow);
+  {
+    FrameScope frame(masm, StackFrame::MANUAL);
+    EnterArgumentsAdaptorFrame(masm);
+    __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+    __ bkpt(0);
+  }
+}
+
+
+#undef __
+}
+}  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_PPC
diff --git a/deps/v8/src/ppc/code-stubs-ppc.cc b/deps/v8/src/ppc/code-stubs-ppc.cc
new file mode 100644 (file)
index 0000000..3e84a21
--- /dev/null
@@ -0,0 +1,4893 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_PPC
+
+#include "src/base/bits.h"
+#include "src/bootstrapper.h"
+#include "src/code-stubs.h"
+#include "src/codegen.h"
+#include "src/ic/handler-compiler.h"
+#include "src/ic/ic.h"
+#include "src/isolate.h"
+#include "src/jsregexp.h"
+#include "src/regexp-macro-assembler.h"
+#include "src/runtime/runtime.h"
+
+namespace v8 {
+namespace internal {
+
+
+static void InitializeArrayConstructorDescriptor(
+    Isolate* isolate, CodeStubDescriptor* descriptor,
+    int constant_stack_parameter_count) {
+  Address deopt_handler =
+      Runtime::FunctionForId(Runtime::kArrayConstructor)->entry;
+
+  if (constant_stack_parameter_count == 0) {
+    descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
+                           JS_FUNCTION_STUB_MODE);
+  } else {
+    descriptor->Initialize(r3, deopt_handler, constant_stack_parameter_count,
+                           JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
+  }
+}
+
+
+static void InitializeInternalArrayConstructorDescriptor(
+    Isolate* isolate, CodeStubDescriptor* descriptor,
+    int constant_stack_parameter_count) {
+  Address deopt_handler =
+      Runtime::FunctionForId(Runtime::kInternalArrayConstructor)->entry;
+
+  if (constant_stack_parameter_count == 0) {
+    descriptor->Initialize(deopt_handler, constant_stack_parameter_count,
+                           JS_FUNCTION_STUB_MODE);
+  } else {
+    descriptor->Initialize(r3, deopt_handler, constant_stack_parameter_count,
+                           JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS);
+  }
+}
+
+
+void ArrayNoArgumentConstructorStub::InitializeDescriptor(
+    CodeStubDescriptor* descriptor) {
+  InitializeArrayConstructorDescriptor(isolate(), descriptor, 0);
+}
+
+
+void ArraySingleArgumentConstructorStub::InitializeDescriptor(
+    CodeStubDescriptor* descriptor) {
+  InitializeArrayConstructorDescriptor(isolate(), descriptor, 1);
+}
+
+
+void ArrayNArgumentsConstructorStub::InitializeDescriptor(
+    CodeStubDescriptor* descriptor) {
+  InitializeArrayConstructorDescriptor(isolate(), descriptor, -1);
+}
+
+
+void InternalArrayNoArgumentConstructorStub::InitializeDescriptor(
+    CodeStubDescriptor* descriptor) {
+  InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0);
+}
+
+
+void InternalArraySingleArgumentConstructorStub::InitializeDescriptor(
+    CodeStubDescriptor* descriptor) {
+  InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1);
+}
+
+
+void InternalArrayNArgumentsConstructorStub::InitializeDescriptor(
+    CodeStubDescriptor* descriptor) {
+  InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1);
+}
+
+
+#define __ ACCESS_MASM(masm)
+
+
+static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
+                                          Condition cond);
+static void EmitSmiNonsmiComparison(MacroAssembler* masm, Register lhs,
+                                    Register rhs, Label* lhs_not_nan,
+                                    Label* slow, bool strict);
+static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, Register lhs,
+                                           Register rhs);
+
+
+void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm,
+                                               ExternalReference miss) {
+  // Update the static counter each time a new code stub is generated.
+  isolate()->counters()->code_stubs()->Increment();
+
+  CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor();
+  int param_count = descriptor.GetEnvironmentParameterCount();
+  {
+    // Call the runtime system in a fresh internal frame.
+    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+    DCHECK(param_count == 0 ||
+           r3.is(descriptor.GetEnvironmentParameterRegister(param_count - 1)));
+    // Push arguments
+    for (int i = 0; i < param_count; ++i) {
+      __ push(descriptor.GetEnvironmentParameterRegister(i));
+    }
+    __ CallExternalReference(miss, param_count);
+  }
+
+  __ Ret();
+}
+
+
+void DoubleToIStub::Generate(MacroAssembler* masm) {
+  Label out_of_range, only_low, negate, done, fastpath_done;
+  Register input_reg = source();
+  Register result_reg = destination();
+  DCHECK(is_truncating());
+
+  int double_offset = offset();
+
+  // Immediate values for this stub fit in instructions, so it's safe to use ip.
+  Register scratch = GetRegisterThatIsNotOneOf(input_reg, result_reg);
+  Register scratch_low =
+      GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch);
+  Register scratch_high =
+      GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch_low);
+  DoubleRegister double_scratch = kScratchDoubleReg;
+
+  __ push(scratch);
+  // Account for saved regs if input is sp.
+  if (input_reg.is(sp)) double_offset += kPointerSize;
+
+  if (!skip_fastpath()) {
+    // Load double input.
+    __ lfd(double_scratch, MemOperand(input_reg, double_offset));
+
+    // Do fast-path convert from double to int.
+    __ ConvertDoubleToInt64(double_scratch,
+#if !V8_TARGET_ARCH_PPC64
+                            scratch,
+#endif
+                            result_reg, d0);
+
+// Test for overflow
+#if V8_TARGET_ARCH_PPC64
+    __ TestIfInt32(result_reg, scratch, r0);
+#else
+    __ TestIfInt32(scratch, result_reg, r0);
+#endif
+    __ beq(&fastpath_done);
+  }
+
+  __ Push(scratch_high, scratch_low);
+  // Account for saved regs if input is sp.
+  if (input_reg.is(sp)) double_offset += 2 * kPointerSize;
+
+  __ lwz(scratch_high,
+         MemOperand(input_reg, double_offset + Register::kExponentOffset));
+  __ lwz(scratch_low,
+         MemOperand(input_reg, double_offset + Register::kMantissaOffset));
+
+  __ ExtractBitMask(scratch, scratch_high, HeapNumber::kExponentMask);
+  // Load scratch with exponent - 1. This is faster than loading
+  // with exponent because Bias + 1 = 1024 which is a *PPC* immediate value.
+  STATIC_ASSERT(HeapNumber::kExponentBias + 1 == 1024);
+  __ subi(scratch, scratch, Operand(HeapNumber::kExponentBias + 1));
+  // If exponent is greater than or equal to 84, the 32 less significant
+  // bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits),
+  // the result is 0.
+  // Compare exponent with 84 (compare exponent - 1 with 83).
+  __ cmpi(scratch, Operand(83));
+  __ bge(&out_of_range);
+
+  // If we reach this code, 31 <= exponent <= 83.
+  // So, we don't have to handle cases where 0 <= exponent <= 20 for
+  // which we would need to shift right the high part of the mantissa.
+  // Scratch contains exponent - 1.
+  // Load scratch with 52 - exponent (load with 51 - (exponent - 1)).
+  __ subfic(scratch, scratch, Operand(51));
+  __ cmpi(scratch, Operand::Zero());
+  __ ble(&only_low);
+  // 21 <= exponent <= 51, shift scratch_low and scratch_high
+  // to generate the result.
+  __ srw(scratch_low, scratch_low, scratch);
+  // Scratch contains: 52 - exponent.
+  // We needs: exponent - 20.
+  // So we use: 32 - scratch = 32 - 52 + exponent = exponent - 20.
+  __ subfic(scratch, scratch, Operand(32));
+  __ ExtractBitMask(result_reg, scratch_high, HeapNumber::kMantissaMask);
+  // Set the implicit 1 before the mantissa part in scratch_high.
+  STATIC_ASSERT(HeapNumber::kMantissaBitsInTopWord >= 16);
+  __ oris(result_reg, result_reg,
+          Operand(1 << ((HeapNumber::kMantissaBitsInTopWord) - 16)));
+  __ slw(r0, result_reg, scratch);
+  __ orx(result_reg, scratch_low, r0);
+  __ b(&negate);
+
+  __ bind(&out_of_range);
+  __ mov(result_reg, Operand::Zero());
+  __ b(&done);
+
+  __ bind(&only_low);
+  // 52 <= exponent <= 83, shift only scratch_low.
+  // On entry, scratch contains: 52 - exponent.
+  __ neg(scratch, scratch);
+  __ slw(result_reg, scratch_low, scratch);
+
+  __ bind(&negate);
+  // If input was positive, scratch_high ASR 31 equals 0 and
+  // scratch_high LSR 31 equals zero.
+  // New result = (result eor 0) + 0 = result.
+  // If the input was negative, we have to negate the result.
+  // Input_high ASR 31 equals 0xffffffff and scratch_high LSR 31 equals 1.
+  // New result = (result eor 0xffffffff) + 1 = 0 - result.
+  __ srawi(r0, scratch_high, 31);
+#if V8_TARGET_ARCH_PPC64
+  __ srdi(r0, r0, Operand(32));
+#endif
+  __ xor_(result_reg, result_reg, r0);
+  __ srwi(r0, scratch_high, Operand(31));
+  __ add(result_reg, result_reg, r0);
+
+  __ bind(&done);
+  __ Pop(scratch_high, scratch_low);
+
+  __ bind(&fastpath_done);
+  __ pop(scratch);
+
+  __ Ret();
+}
+
+
+// Handle the case where the lhs and rhs are the same object.
+// Equality is almost reflexive (everything but NaN), so this is a test
+// for "identity and not NaN".
+static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow,
+                                          Condition cond) {
+  Label not_identical;
+  Label heap_number, return_equal;
+  __ cmp(r3, r4);
+  __ bne(&not_identical);
+
+  // Test for NaN. Sadly, we can't just compare to Factory::nan_value(),
+  // so we do the second best thing - test it ourselves.
+  // They are both equal and they are not both Smis so both of them are not
+  // Smis.  If it's not a heap number, then return equal.
+  if (cond == lt || cond == gt) {
+    __ CompareObjectType(r3, r7, r7, FIRST_SPEC_OBJECT_TYPE);
+    __ bge(slow);
+  } else {
+    __ CompareObjectType(r3, r7, r7, HEAP_NUMBER_TYPE);
+    __ beq(&heap_number);
+    // Comparing JS objects with <=, >= is complicated.
+    if (cond != eq) {
+      __ cmpi(r7, Operand(FIRST_SPEC_OBJECT_TYPE));
+      __ bge(slow);
+      // Normally here we fall through to return_equal, but undefined is
+      // special: (undefined == undefined) == true, but
+      // (undefined <= undefined) == false!  See ECMAScript 11.8.5.
+      if (cond == le || cond == ge) {
+        __ cmpi(r7, Operand(ODDBALL_TYPE));
+        __ bne(&return_equal);
+        __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+        __ cmp(r3, r5);
+        __ bne(&return_equal);
+        if (cond == le) {
+          // undefined <= undefined should fail.
+          __ li(r3, Operand(GREATER));
+        } else {
+          // undefined >= undefined should fail.
+          __ li(r3, Operand(LESS));
+        }
+        __ Ret();
+      }
+    }
+  }
+
+  __ bind(&return_equal);
+  if (cond == lt) {
+    __ li(r3, Operand(GREATER));  // Things aren't less than themselves.
+  } else if (cond == gt) {
+    __ li(r3, Operand(LESS));  // Things aren't greater than themselves.
+  } else {
+    __ li(r3, Operand(EQUAL));  // Things are <=, >=, ==, === themselves.
+  }
+  __ Ret();
+
+  // For less and greater we don't have to check for NaN since the result of
+  // x < x is false regardless.  For the others here is some code to check
+  // for NaN.
+  if (cond != lt && cond != gt) {
+    __ bind(&heap_number);
+    // It is a heap number, so return non-equal if it's NaN and equal if it's
+    // not NaN.
+
+    // The representation of NaN values has all exponent bits (52..62) set,
+    // and not all mantissa bits (0..51) clear.
+    // Read top bits of double representation (second word of value).
+    __ lwz(r5, FieldMemOperand(r3, HeapNumber::kExponentOffset));
+    // Test that exponent bits are all set.
+    STATIC_ASSERT(HeapNumber::kExponentMask == 0x7ff00000u);
+    __ ExtractBitMask(r6, r5, HeapNumber::kExponentMask);
+    __ cmpli(r6, Operand(0x7ff));
+    __ bne(&return_equal);
+
+    // Shift out flag and all exponent bits, retaining only mantissa.
+    __ slwi(r5, r5, Operand(HeapNumber::kNonMantissaBitsInTopWord));
+    // Or with all low-bits of mantissa.
+    __ lwz(r6, FieldMemOperand(r3, HeapNumber::kMantissaOffset));
+    __ orx(r3, r6, r5);
+    __ cmpi(r3, Operand::Zero());
+    // For equal we already have the right value in r3:  Return zero (equal)
+    // if all bits in mantissa are zero (it's an Infinity) and non-zero if
+    // not (it's a NaN).  For <= and >= we need to load r0 with the failing
+    // value if it's a NaN.
+    if (cond != eq) {
+      Label not_equal;
+      __ bne(&not_equal);
+      // All-zero means Infinity means equal.
+      __ Ret();
+      __ bind(&not_equal);
+      if (cond == le) {
+        __ li(r3, Operand(GREATER));  // NaN <= NaN should fail.
+      } else {
+        __ li(r3, Operand(LESS));  // NaN >= NaN should fail.
+      }
+    }
+    __ Ret();
+  }
+  // No fall through here.
+
+  __ bind(&not_identical);
+}
+
+
+// See comment at call site.
+static void EmitSmiNonsmiComparison(MacroAssembler* masm, Register lhs,
+                                    Register rhs, Label* lhs_not_nan,
+                                    Label* slow, bool strict) {
+  DCHECK((lhs.is(r3) && rhs.is(r4)) || (lhs.is(r4) && rhs.is(r3)));
+
+  Label rhs_is_smi;
+  __ JumpIfSmi(rhs, &rhs_is_smi);
+
+  // Lhs is a Smi.  Check whether the rhs is a heap number.
+  __ CompareObjectType(rhs, r6, r7, HEAP_NUMBER_TYPE);
+  if (strict) {
+    // If rhs is not a number and lhs is a Smi then strict equality cannot
+    // succeed.  Return non-equal
+    // If rhs is r3 then there is already a non zero value in it.
+    Label skip;
+    __ beq(&skip);
+    if (!rhs.is(r3)) {
+      __ mov(r3, Operand(NOT_EQUAL));
+    }
+    __ Ret();
+    __ bind(&skip);
+  } else {
+    // Smi compared non-strictly with a non-Smi non-heap-number.  Call
+    // the runtime.
+    __ bne(slow);
+  }
+
+  // Lhs is a smi, rhs is a number.
+  // Convert lhs to a double in d7.
+  __ SmiToDouble(d7, lhs);
+  // Load the double from rhs, tagged HeapNumber r3, to d6.
+  __ lfd(d6, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+
+  // We now have both loaded as doubles but we can skip the lhs nan check
+  // since it's a smi.
+  __ b(lhs_not_nan);
+
+  __ bind(&rhs_is_smi);
+  // Rhs is a smi.  Check whether the non-smi lhs is a heap number.
+  __ CompareObjectType(lhs, r7, r7, HEAP_NUMBER_TYPE);
+  if (strict) {
+    // If lhs is not a number and rhs is a smi then strict equality cannot
+    // succeed.  Return non-equal.
+    // If lhs is r3 then there is already a non zero value in it.
+    Label skip;
+    __ beq(&skip);
+    if (!lhs.is(r3)) {
+      __ mov(r3, Operand(NOT_EQUAL));
+    }
+    __ Ret();
+    __ bind(&skip);
+  } else {
+    // Smi compared non-strictly with a non-smi non-heap-number.  Call
+    // the runtime.
+    __ bne(slow);
+  }
+
+  // Rhs is a smi, lhs is a heap number.
+  // Load the double from lhs, tagged HeapNumber r4, to d7.
+  __ lfd(d7, FieldMemOperand(lhs, HeapNumber::kValueOffset));
+  // Convert rhs to a double in d6.
+  __ SmiToDouble(d6, rhs);
+  // Fall through to both_loaded_as_doubles.
+}
+
+
+// See comment at call site.
+static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, Register lhs,
+                                           Register rhs) {
+  DCHECK((lhs.is(r3) && rhs.is(r4)) || (lhs.is(r4) && rhs.is(r3)));
+
+  // If either operand is a JS object or an oddball value, then they are
+  // not equal since their pointers are different.
+  // There is no test for undetectability in strict equality.
+  STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE);
+  Label first_non_object;
+  // Get the type of the first operand into r5 and compare it with
+  // FIRST_SPEC_OBJECT_TYPE.
+  __ CompareObjectType(rhs, r5, r5, FIRST_SPEC_OBJECT_TYPE);
+  __ blt(&first_non_object);
+
+  // Return non-zero (r3 is not zero)
+  Label return_not_equal;
+  __ bind(&return_not_equal);
+  __ Ret();
+
+  __ bind(&first_non_object);
+  // Check for oddballs: true, false, null, undefined.
+  __ cmpi(r5, Operand(ODDBALL_TYPE));
+  __ beq(&return_not_equal);
+
+  __ CompareObjectType(lhs, r6, r6, FIRST_SPEC_OBJECT_TYPE);
+  __ bge(&return_not_equal);
+
+  // Check for oddballs: true, false, null, undefined.
+  __ cmpi(r6, Operand(ODDBALL_TYPE));
+  __ beq(&return_not_equal);
+
+  // Now that we have the types we might as well check for
+  // internalized-internalized.
+  STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
+  __ orx(r5, r5, r6);
+  __ andi(r0, r5, Operand(kIsNotStringMask | kIsNotInternalizedMask));
+  __ beq(&return_not_equal, cr0);
+}
+
+
+// See comment at call site.
+static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm, Register lhs,
+                                       Register rhs,
+                                       Label* both_loaded_as_doubles,
+                                       Label* not_heap_numbers, Label* slow) {
+  DCHECK((lhs.is(r3) && rhs.is(r4)) || (lhs.is(r4) && rhs.is(r3)));
+
+  __ CompareObjectType(rhs, r6, r5, HEAP_NUMBER_TYPE);
+  __ bne(not_heap_numbers);
+  __ LoadP(r5, FieldMemOperand(lhs, HeapObject::kMapOffset));
+  __ cmp(r5, r6);
+  __ bne(slow);  // First was a heap number, second wasn't.  Go slow case.
+
+  // Both are heap numbers.  Load them up then jump to the code we have
+  // for that.
+  __ lfd(d6, FieldMemOperand(rhs, HeapNumber::kValueOffset));
+  __ lfd(d7, FieldMemOperand(lhs, HeapNumber::kValueOffset));
+
+  __ b(both_loaded_as_doubles);
+}
+
+
+// Fast negative check for internalized-to-internalized equality.
+static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm,
+                                                     Register lhs, Register rhs,
+                                                     Label* possible_strings,
+                                                     Label* not_both_strings) {
+  DCHECK((lhs.is(r3) && rhs.is(r4)) || (lhs.is(r4) && rhs.is(r3)));
+
+  // r5 is object type of rhs.
+  Label object_test;
+  STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
+  __ andi(r0, r5, Operand(kIsNotStringMask));
+  __ bne(&object_test, cr0);
+  __ andi(r0, r5, Operand(kIsNotInternalizedMask));
+  __ bne(possible_strings, cr0);
+  __ CompareObjectType(lhs, r6, r6, FIRST_NONSTRING_TYPE);
+  __ bge(not_both_strings);
+  __ andi(r0, r6, Operand(kIsNotInternalizedMask));
+  __ bne(possible_strings, cr0);
+
+  // Both are internalized.  We already checked they weren't the same pointer
+  // so they are not equal.
+  __ li(r3, Operand(NOT_EQUAL));
+  __ Ret();
+
+  __ bind(&object_test);
+  __ cmpi(r5, Operand(FIRST_SPEC_OBJECT_TYPE));
+  __ blt(not_both_strings);
+  __ CompareObjectType(lhs, r5, r6, FIRST_SPEC_OBJECT_TYPE);
+  __ blt(not_both_strings);
+  // If both objects are undetectable, they are equal. Otherwise, they
+  // are not equal, since they are different objects and an object is not
+  // equal to undefined.
+  __ LoadP(r6, FieldMemOperand(rhs, HeapObject::kMapOffset));
+  __ lbz(r5, FieldMemOperand(r5, Map::kBitFieldOffset));
+  __ lbz(r6, FieldMemOperand(r6, Map::kBitFieldOffset));
+  __ and_(r3, r5, r6);
+  __ andi(r3, r3, Operand(1 << Map::kIsUndetectable));
+  __ xori(r3, r3, Operand(1 << Map::kIsUndetectable));
+  __ Ret();
+}
+
+
+static void CompareICStub_CheckInputType(MacroAssembler* masm, Register input,
+                                         Register scratch,
+                                         CompareICState::State expected,
+                                         Label* fail) {
+  Label ok;
+  if (expected == CompareICState::SMI) {
+    __ JumpIfNotSmi(input, fail);
+  } else if (expected == CompareICState::NUMBER) {
+    __ JumpIfSmi(input, &ok);
+    __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail,
+                DONT_DO_SMI_CHECK);
+  }
+  // We could be strict about internalized/non-internalized here, but as long as
+  // hydrogen doesn't care, the stub doesn't have to care either.
+  __ bind(&ok);
+}
+
+
+// On entry r4 and r5 are the values to be compared.
+// On exit r3 is 0, positive or negative to indicate the result of
+// the comparison.
+void CompareICStub::GenerateGeneric(MacroAssembler* masm) {
+  Register lhs = r4;
+  Register rhs = r3;
+  Condition cc = GetCondition();
+
+  Label miss;
+  CompareICStub_CheckInputType(masm, lhs, r5, left(), &miss);
+  CompareICStub_CheckInputType(masm, rhs, r6, right(), &miss);
+
+  Label slow;  // Call builtin.
+  Label not_smis, both_loaded_as_doubles, lhs_not_nan;
+
+  Label not_two_smis, smi_done;
+  __ orx(r5, r4, r3);
+  __ JumpIfNotSmi(r5, &not_two_smis);
+  __ SmiUntag(r4);
+  __ SmiUntag(r3);
+  __ sub(r3, r4, r3);
+  __ Ret();
+  __ bind(&not_two_smis);
+
+  // NOTICE! This code is only reached after a smi-fast-case check, so
+  // it is certain that at least one operand isn't a smi.
+
+  // Handle the case where the objects are identical.  Either returns the answer
+  // or goes to slow.  Only falls through if the objects were not identical.
+  EmitIdenticalObjectComparison(masm, &slow, cc);
+
+  // If either is a Smi (we know that not both are), then they can only
+  // be strictly equal if the other is a HeapNumber.
+  STATIC_ASSERT(kSmiTag == 0);
+  DCHECK_EQ(0, Smi::FromInt(0));
+  __ and_(r5, lhs, rhs);
+  __ JumpIfNotSmi(r5, &not_smis);
+  // One operand is a smi.  EmitSmiNonsmiComparison generates code that can:
+  // 1) Return the answer.
+  // 2) Go to slow.
+  // 3) Fall through to both_loaded_as_doubles.
+  // 4) Jump to lhs_not_nan.
+  // In cases 3 and 4 we have found out we were dealing with a number-number
+  // comparison.  The double values of the numbers have been loaded
+  // into d7 and d6.
+  EmitSmiNonsmiComparison(masm, lhs, rhs, &lhs_not_nan, &slow, strict());
+
+  __ bind(&both_loaded_as_doubles);
+  // The arguments have been converted to doubles and stored in d6 and d7
+  __ bind(&lhs_not_nan);
+  Label no_nan;
+  __ fcmpu(d7, d6);
+
+  Label nan, equal, less_than;
+  __ bunordered(&nan);
+  __ beq(&equal);
+  __ blt(&less_than);
+  __ li(r3, Operand(GREATER));
+  __ Ret();
+  __ bind(&equal);
+  __ li(r3, Operand(EQUAL));
+  __ Ret();
+  __ bind(&less_than);
+  __ li(r3, Operand(LESS));
+  __ Ret();
+
+  __ bind(&nan);
+  // If one of the sides was a NaN then the v flag is set.  Load r3 with
+  // whatever it takes to make the comparison fail, since comparisons with NaN
+  // always fail.
+  if (cc == lt || cc == le) {
+    __ li(r3, Operand(GREATER));
+  } else {
+    __ li(r3, Operand(LESS));
+  }
+  __ Ret();
+
+  __ bind(&not_smis);
+  // At this point we know we are dealing with two different objects,
+  // and neither of them is a Smi.  The objects are in rhs_ and lhs_.
+  if (strict()) {
+    // This returns non-equal for some object types, or falls through if it
+    // was not lucky.
+    EmitStrictTwoHeapObjectCompare(masm, lhs, rhs);
+  }
+
+  Label check_for_internalized_strings;
+  Label flat_string_check;
+  // Check for heap-number-heap-number comparison.  Can jump to slow case,
+  // or load both doubles into r3, r4, r5, r6 and jump to the code that handles
+  // that case.  If the inputs are not doubles then jumps to
+  // check_for_internalized_strings.
+  // In this case r5 will contain the type of rhs_.  Never falls through.
+  EmitCheckForTwoHeapNumbers(masm, lhs, rhs, &both_loaded_as_doubles,
+                             &check_for_internalized_strings,
+                             &flat_string_check);
+
+  __ bind(&check_for_internalized_strings);
+  // In the strict case the EmitStrictTwoHeapObjectCompare already took care of
+  // internalized strings.
+  if (cc == eq && !strict()) {
+    // Returns an answer for two internalized strings or two detectable objects.
+    // Otherwise jumps to string case or not both strings case.
+    // Assumes that r5 is the type of rhs_ on entry.
+    EmitCheckForInternalizedStringsOrObjects(masm, lhs, rhs, &flat_string_check,
+                                             &slow);
+  }
+
+  // Check for both being sequential one-byte strings,
+  // and inline if that is the case.
+  __ bind(&flat_string_check);
+
+  __ JumpIfNonSmisNotBothSequentialOneByteStrings(lhs, rhs, r5, r6, &slow);
+
+  __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, r5,
+                      r6);
+  if (cc == eq) {
+    StringHelper::GenerateFlatOneByteStringEquals(masm, lhs, rhs, r5, r6);
+  } else {
+    StringHelper::GenerateCompareFlatOneByteStrings(masm, lhs, rhs, r5, r6, r7);
+  }
+  // Never falls through to here.
+
+  __ bind(&slow);
+
+  __ Push(lhs, rhs);
+  // Figure out which native to call and setup the arguments.
+  Builtins::JavaScript native;
+  if (cc == eq) {
+    native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS;
+  } else {
+    native = Builtins::COMPARE;
+    int ncr;  // NaN compare result
+    if (cc == lt || cc == le) {
+      ncr = GREATER;
+    } else {
+      DCHECK(cc == gt || cc == ge);  // remaining cases
+      ncr = LESS;
+    }
+    __ LoadSmiLiteral(r3, Smi::FromInt(ncr));
+    __ push(r3);
+  }
+
+  // Call the native; it returns -1 (less), 0 (equal), or 1 (greater)
+  // tagged as a small integer.
+  __ InvokeBuiltin(native, JUMP_FUNCTION);
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+
+void StoreBufferOverflowStub::Generate(MacroAssembler* masm) {
+  // We don't allow a GC during a store buffer overflow so there is no need to
+  // store the registers in any particular way, but we do have to store and
+  // restore them.
+  __ mflr(r0);
+  __ MultiPush(kJSCallerSaved | r0.bit());
+  if (save_doubles()) {
+    __ SaveFPRegs(sp, 0, DoubleRegister::kNumVolatileRegisters);
+  }
+  const int argument_count = 1;
+  const int fp_argument_count = 0;
+  const Register scratch = r4;
+
+  AllowExternalCallThatCantCauseGC scope(masm);
+  __ PrepareCallCFunction(argument_count, fp_argument_count, scratch);
+  __ mov(r3, Operand(ExternalReference::isolate_address(isolate())));
+  __ CallCFunction(ExternalReference::store_buffer_overflow_function(isolate()),
+                   argument_count);
+  if (save_doubles()) {
+    __ RestoreFPRegs(sp, 0, DoubleRegister::kNumVolatileRegisters);
+  }
+  __ MultiPop(kJSCallerSaved | r0.bit());
+  __ mtlr(r0);
+  __ Ret();
+}
+
+
+void StoreRegistersStateStub::Generate(MacroAssembler* masm) {
+  __ PushSafepointRegisters();
+  __ blr();
+}
+
+
+void RestoreRegistersStateStub::Generate(MacroAssembler* masm) {
+  __ PopSafepointRegisters();
+  __ blr();
+}
+
+
+void MathPowStub::Generate(MacroAssembler* masm) {
+  const Register base = r4;
+  const Register exponent = MathPowTaggedDescriptor::exponent();
+  DCHECK(exponent.is(r5));
+  const Register heapnumbermap = r8;
+  const Register heapnumber = r3;
+  const DoubleRegister double_base = d1;
+  const DoubleRegister double_exponent = d2;
+  const DoubleRegister double_result = d3;
+  const DoubleRegister double_scratch = d0;
+  const Register scratch = r11;
+  const Register scratch2 = r10;
+
+  Label call_runtime, done, int_exponent;
+  if (exponent_type() == ON_STACK) {
+    Label base_is_smi, unpack_exponent;
+    // The exponent and base are supplied as arguments on the stack.
+    // This can only happen if the stub is called from non-optimized code.
+    // Load input parameters from stack to double registers.
+    __ LoadP(base, MemOperand(sp, 1 * kPointerSize));
+    __ LoadP(exponent, MemOperand(sp, 0 * kPointerSize));
+
+    __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex);
+
+    __ UntagAndJumpIfSmi(scratch, base, &base_is_smi);
+    __ LoadP(scratch, FieldMemOperand(base, JSObject::kMapOffset));
+    __ cmp(scratch, heapnumbermap);
+    __ bne(&call_runtime);
+
+    __ lfd(double_base, FieldMemOperand(base, HeapNumber::kValueOffset));
+    __ b(&unpack_exponent);
+
+    __ bind(&base_is_smi);
+    __ ConvertIntToDouble(scratch, double_base);
+    __ bind(&unpack_exponent);
+
+    __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
+    __ LoadP(scratch, FieldMemOperand(exponent, JSObject::kMapOffset));
+    __ cmp(scratch, heapnumbermap);
+    __ bne(&call_runtime);
+
+    __ lfd(double_exponent,
+           FieldMemOperand(exponent, HeapNumber::kValueOffset));
+  } else if (exponent_type() == TAGGED) {
+    // Base is already in double_base.
+    __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent);
+
+    __ lfd(double_exponent,
+           FieldMemOperand(exponent, HeapNumber::kValueOffset));
+  }
+
+  if (exponent_type() != INTEGER) {
+    // Detect integer exponents stored as double.
+    __ TryDoubleToInt32Exact(scratch, double_exponent, scratch2,
+                             double_scratch);
+    __ beq(&int_exponent);
+
+    if (exponent_type() == ON_STACK) {
+      // Detect square root case.  Crankshaft detects constant +/-0.5 at
+      // compile time and uses DoMathPowHalf instead.  We then skip this check
+      // for non-constant cases of +/-0.5 as these hardly occur.
+      Label not_plus_half, not_minus_inf1, not_minus_inf2;
+
+      // Test for 0.5.
+      __ LoadDoubleLiteral(double_scratch, 0.5, scratch);
+      __ fcmpu(double_exponent, double_scratch);
+      __ bne(&not_plus_half);
+
+      // Calculates square root of base.  Check for the special case of
+      // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13).
+      __ LoadDoubleLiteral(double_scratch, -V8_INFINITY, scratch);
+      __ fcmpu(double_base, double_scratch);
+      __ bne(&not_minus_inf1);
+      __ fneg(double_result, double_scratch);
+      __ b(&done);
+      __ bind(&not_minus_inf1);
+
+      // Add +0 to convert -0 to +0.
+      __ fadd(double_scratch, double_base, kDoubleRegZero);
+      __ fsqrt(double_result, double_scratch);
+      __ b(&done);
+
+      __ bind(&not_plus_half);
+      __ LoadDoubleLiteral(double_scratch, -0.5, scratch);
+      __ fcmpu(double_exponent, double_scratch);
+      __ bne(&call_runtime);
+
+      // Calculates square root of base.  Check for the special case of
+      // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13).
+      __ LoadDoubleLiteral(double_scratch, -V8_INFINITY, scratch);
+      __ fcmpu(double_base, double_scratch);
+      __ bne(&not_minus_inf2);
+      __ fmr(double_result, kDoubleRegZero);
+      __ b(&done);
+      __ bind(&not_minus_inf2);
+
+      // Add +0 to convert -0 to +0.
+      __ fadd(double_scratch, double_base, kDoubleRegZero);
+      __ LoadDoubleLiteral(double_result, 1.0, scratch);
+      __ fsqrt(double_scratch, double_scratch);
+      __ fdiv(double_result, double_result, double_scratch);
+      __ b(&done);
+    }
+
+    __ mflr(r0);
+    __ push(r0);
+    {
+      AllowExternalCallThatCantCauseGC scope(masm);
+      __ PrepareCallCFunction(0, 2, scratch);
+      __ MovToFloatParameters(double_base, double_exponent);
+      __ CallCFunction(
+          ExternalReference::power_double_double_function(isolate()), 0, 2);
+    }
+    __ pop(r0);
+    __ mtlr(r0);
+    __ MovFromFloatResult(double_result);
+    __ b(&done);
+  }
+
+  // Calculate power with integer exponent.
+  __ bind(&int_exponent);
+
+  // Get two copies of exponent in the registers scratch and exponent.
+  if (exponent_type() == INTEGER) {
+    __ mr(scratch, exponent);
+  } else {
+    // Exponent has previously been stored into scratch as untagged integer.
+    __ mr(exponent, scratch);
+  }
+  __ fmr(double_scratch, double_base);  // Back up base.
+  __ li(scratch2, Operand(1));
+  __ ConvertIntToDouble(scratch2, double_result);
+
+  // Get absolute value of exponent.
+  Label positive_exponent;
+  __ cmpi(scratch, Operand::Zero());
+  __ bge(&positive_exponent);
+  __ neg(scratch, scratch);
+  __ bind(&positive_exponent);
+
+  Label while_true, no_carry, loop_end;
+  __ bind(&while_true);
+  __ andi(scratch2, scratch, Operand(1));
+  __ beq(&no_carry, cr0);
+  __ fmul(double_result, double_result, double_scratch);
+  __ bind(&no_carry);
+  __ ShiftRightArithImm(scratch, scratch, 1, SetRC);
+  __ beq(&loop_end, cr0);
+  __ fmul(double_scratch, double_scratch, double_scratch);
+  __ b(&while_true);
+  __ bind(&loop_end);
+
+  __ cmpi(exponent, Operand::Zero());
+  __ bge(&done);
+
+  __ li(scratch2, Operand(1));
+  __ ConvertIntToDouble(scratch2, double_scratch);
+  __ fdiv(double_result, double_scratch, double_result);
+  // Test whether result is zero.  Bail out to check for subnormal result.
+  // Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
+  __ fcmpu(double_result, kDoubleRegZero);
+  __ bne(&done);
+  // double_exponent may not containe the exponent value if the input was a
+  // smi.  We set it with exponent value before bailing out.
+  __ ConvertIntToDouble(exponent, double_exponent);
+
+  // Returning or bailing out.
+  Counters* counters = isolate()->counters();
+  if (exponent_type() == ON_STACK) {
+    // The arguments are still on the stack.
+    __ bind(&call_runtime);
+    __ TailCallRuntime(Runtime::kMathPowRT, 2, 1);
+
+    // The stub is called from non-optimized code, which expects the result
+    // as heap number in exponent.
+    __ bind(&done);
+    __ AllocateHeapNumber(heapnumber, scratch, scratch2, heapnumbermap,
+                          &call_runtime);
+    __ stfd(double_result,
+            FieldMemOperand(heapnumber, HeapNumber::kValueOffset));
+    DCHECK(heapnumber.is(r3));
+    __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
+    __ Ret(2);
+  } else {
+    __ mflr(r0);
+    __ push(r0);
+    {
+      AllowExternalCallThatCantCauseGC scope(masm);
+      __ PrepareCallCFunction(0, 2, scratch);
+      __ MovToFloatParameters(double_base, double_exponent);
+      __ CallCFunction(
+          ExternalReference::power_double_double_function(isolate()), 0, 2);
+    }
+    __ pop(r0);
+    __ mtlr(r0);
+    __ MovFromFloatResult(double_result);
+
+    __ bind(&done);
+    __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2);
+    __ Ret();
+  }
+}
+
+
+bool CEntryStub::NeedsImmovableCode() { return true; }
+
+
+void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
+  CEntryStub::GenerateAheadOfTime(isolate);
+  //  WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate);
+  StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
+  StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
+  ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate);
+  CreateAllocationSiteStub::GenerateAheadOfTime(isolate);
+  BinaryOpICStub::GenerateAheadOfTime(isolate);
+  StoreRegistersStateStub::GenerateAheadOfTime(isolate);
+  RestoreRegistersStateStub::GenerateAheadOfTime(isolate);
+  BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate);
+}
+
+
+void StoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
+  StoreRegistersStateStub stub(isolate);
+  stub.GetCode();
+}
+
+
+void RestoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) {
+  RestoreRegistersStateStub stub(isolate);
+  stub.GetCode();
+}
+
+
+void CodeStub::GenerateFPStubs(Isolate* isolate) {
+  // Generate if not already in cache.
+  SaveFPRegsMode mode = kSaveFPRegs;
+  CEntryStub(isolate, 1, mode).GetCode();
+  StoreBufferOverflowStub(isolate, mode).GetCode();
+  isolate->set_fp_stubs_generated(true);
+}
+
+
+void CEntryStub::GenerateAheadOfTime(Isolate* isolate) {
+  CEntryStub stub(isolate, 1, kDontSaveFPRegs);
+  stub.GetCode();
+}
+
+
+void CEntryStub::Generate(MacroAssembler* masm) {
+  // Called from JavaScript; parameters are on stack as if calling JS function.
+  // r3: number of arguments including receiver
+  // r4: pointer to builtin function
+  // fp: frame pointer  (restored after C call)
+  // sp: stack pointer  (restored as callee's sp after C call)
+  // cp: current context  (C callee-saved)
+
+  ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
+  __ mr(r15, r4);
+
+  // Compute the argv pointer.
+  __ ShiftLeftImm(r4, r3, Operand(kPointerSizeLog2));
+  __ add(r4, r4, sp);
+  __ subi(r4, r4, Operand(kPointerSize));
+
+  // Enter the exit frame that transitions from JavaScript to C++.
+  FrameScope scope(masm, StackFrame::MANUAL);
+
+  // Need at least one extra slot for return address location.
+  int arg_stack_space = 1;
+
+// PPC LINUX ABI:
+#if V8_TARGET_ARCH_PPC64 && !ABI_RETURNS_OBJECT_PAIRS_IN_REGS
+  // Pass buffer for return value on stack if necessary
+  if (result_size() > 1) {
+    DCHECK_EQ(2, result_size());
+    arg_stack_space += 2;
+  }
+#endif
+
+  __ EnterExitFrame(save_doubles(), arg_stack_space);
+
+  // Store a copy of argc in callee-saved registers for later.
+  __ mr(r14, r3);
+
+  // r3, r14: number of arguments including receiver  (C callee-saved)
+  // r4: pointer to the first argument
+  // r15: pointer to builtin function  (C callee-saved)
+
+  // Result returned in registers or stack, depending on result size and ABI.
+
+  Register isolate_reg = r5;
+#if V8_TARGET_ARCH_PPC64 && !ABI_RETURNS_OBJECT_PAIRS_IN_REGS
+  if (result_size() > 1) {
+    // The return value is 16-byte non-scalar value.
+    // Use frame storage reserved by calling function to pass return
+    // buffer as implicit first argument.
+    __ mr(r5, r4);
+    __ mr(r4, r3);
+    __ addi(r3, sp, Operand((kStackFrameExtraParamSlot + 1) * kPointerSize));
+    isolate_reg = r6;
+  }
+#endif
+
+  // Call C built-in.
+  __ mov(isolate_reg, Operand(ExternalReference::isolate_address(isolate())));
+
+#if ABI_USES_FUNCTION_DESCRIPTORS && !defined(USE_SIMULATOR)
+  // Native AIX/PPC64 Linux use a function descriptor.
+  __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(r15, kPointerSize));
+  __ LoadP(ip, MemOperand(r15, 0));  // Instruction address
+  Register target = ip;
+#elif ABI_TOC_ADDRESSABILITY_VIA_IP
+  __ Move(ip, r15);
+  Register target = ip;
+#else
+  Register target = r15;
+#endif
+
+  // To let the GC traverse the return address of the exit frames, we need to
+  // know where the return address is. The CEntryStub is unmovable, so
+  // we can store the address on the stack to be able to find it again and
+  // we never have to restore it, because it will not change.
+  // Compute the return address in lr to return to after the jump below. Pc is
+  // already at '+ 8' from the current instruction but return is after three
+  // instructions so add another 4 to pc to get the return address.
+  {
+    Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
+    Label here;
+    __ b(&here, SetLK);
+    __ bind(&here);
+    __ mflr(r8);
+
+    // Constant used below is dependent on size of Call() macro instructions
+    __ addi(r0, r8, Operand(20));
+
+    __ StoreP(r0, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize));
+    __ Call(target);
+  }
+
+#if V8_TARGET_ARCH_PPC64 && !ABI_RETURNS_OBJECT_PAIRS_IN_REGS
+  // If return value is on the stack, pop it to registers.
+  if (result_size() > 1) {
+    __ LoadP(r4, MemOperand(r3, kPointerSize));
+    __ LoadP(r3, MemOperand(r3));
+  }
+#endif
+
+  // Runtime functions should not return 'the hole'.  Allowing it to escape may
+  // lead to crashes in the IC code later.
+  if (FLAG_debug_code) {
+    Label okay;
+    __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
+    __ bne(&okay);
+    __ stop("The hole escaped");
+    __ bind(&okay);
+  }
+
+  // Check result for exception sentinel.
+  Label exception_returned;
+  __ CompareRoot(r3, Heap::kExceptionRootIndex);
+  __ beq(&exception_returned);
+
+  ExternalReference pending_exception_address(Isolate::kPendingExceptionAddress,
+                                              isolate());
+
+  // Check that there is no pending exception, otherwise we
+  // should have returned the exception sentinel.
+  if (FLAG_debug_code) {
+    Label okay;
+    __ mov(r5, Operand(pending_exception_address));
+    __ LoadP(r5, MemOperand(r5));
+    __ CompareRoot(r5, Heap::kTheHoleValueRootIndex);
+    // Cannot use check here as it attempts to generate call into runtime.
+    __ beq(&okay);
+    __ stop("Unexpected pending exception");
+    __ bind(&okay);
+  }
+
+  // Exit C frame and return.
+  // r3:r4: result
+  // sp: stack pointer
+  // fp: frame pointer
+  // r14: still holds argc (callee-saved).
+  __ LeaveExitFrame(save_doubles(), r14, true);
+  __ blr();
+
+  // Handling of exception.
+  __ bind(&exception_returned);
+
+  // Retrieve the pending exception.
+  __ mov(r5, Operand(pending_exception_address));
+  __ LoadP(r3, MemOperand(r5));
+
+  // Clear the pending exception.
+  __ LoadRoot(r6, Heap::kTheHoleValueRootIndex);
+  __ StoreP(r6, MemOperand(r5));
+
+  // Special handling of termination exceptions which are uncatchable
+  // by javascript code.
+  Label throw_termination_exception;
+  __ CompareRoot(r3, Heap::kTerminationExceptionRootIndex);
+  __ beq(&throw_termination_exception);
+
+  // Handle normal exception.
+  __ Throw(r3);
+
+  __ bind(&throw_termination_exception);
+  __ ThrowUncatchable(r3);
+}
+
+
+void JSEntryStub::Generate(MacroAssembler* masm) {
+  // r3: code entry
+  // r4: function
+  // r5: receiver
+  // r6: argc
+  // [sp+0]: argv
+
+  Label invoke, handler_entry, exit;
+
+// Called from C
+#if ABI_USES_FUNCTION_DESCRIPTORS
+  __ function_descriptor();
+#endif
+
+  ProfileEntryHookStub::MaybeCallEntryHook(masm);
+
+  // PPC LINUX ABI:
+  // preserve LR in pre-reserved slot in caller's frame
+  __ mflr(r0);
+  __ StoreP(r0, MemOperand(sp, kStackFrameLRSlot * kPointerSize));
+
+  // Save callee saved registers on the stack.
+  __ MultiPush(kCalleeSaved);
+
+  // Floating point regs FPR0 - FRP13 are volatile
+  // FPR14-FPR31 are non-volatile, but sub-calls will save them for us
+
+  //  int offset_to_argv = kPointerSize * 22; // matches (22*4) above
+  //  __ lwz(r7, MemOperand(sp, offset_to_argv));
+
+  // Push a frame with special values setup to mark it as an entry frame.
+  // r3: code entry
+  // r4: function
+  // r5: receiver
+  // r6: argc
+  // r7: argv
+  __ li(r0, Operand(-1));  // Push a bad frame pointer to fail if it is used.
+  __ push(r0);
+#if V8_OOL_CONSTANT_POOL
+  __ mov(kConstantPoolRegister,
+         Operand(isolate()->factory()->empty_constant_pool_array()));
+  __ push(kConstantPoolRegister);
+#endif
+  int marker = type();
+  __ LoadSmiLiteral(r0, Smi::FromInt(marker));
+  __ push(r0);
+  __ push(r0);
+  // Save copies of the top frame descriptor on the stack.
+  __ mov(r8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
+  __ LoadP(r0, MemOperand(r8));
+  __ push(r0);
+
+  // Set up frame pointer for the frame to be pushed.
+  __ addi(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
+
+  // If this is the outermost JS call, set js_entry_sp value.
+  Label non_outermost_js;
+  ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate());
+  __ mov(r8, Operand(ExternalReference(js_entry_sp)));
+  __ LoadP(r9, MemOperand(r8));
+  __ cmpi(r9, Operand::Zero());
+  __ bne(&non_outermost_js);
+  __ StoreP(fp, MemOperand(r8));
+  __ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME));
+  Label cont;
+  __ b(&cont);
+  __ bind(&non_outermost_js);
+  __ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME));
+  __ bind(&cont);
+  __ push(ip);  // frame-type
+
+  // Jump to a faked try block that does the invoke, with a faked catch
+  // block that sets the pending exception.
+  __ b(&invoke);
+
+  __ bind(&handler_entry);
+  handler_offset_ = handler_entry.pos();
+  // Caught exception: Store result (exception) in the pending exception
+  // field in the JSEnv and return a failure sentinel.  Coming in here the
+  // fp will be invalid because the PushTryHandler below sets it to 0 to
+  // signal the existence of the JSEntry frame.
+  __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
+                                       isolate())));
+
+  __ StoreP(r3, MemOperand(ip));
+  __ LoadRoot(r3, Heap::kExceptionRootIndex);
+  __ b(&exit);
+
+  // Invoke: Link this frame into the handler chain.  There's only one
+  // handler block in this code object, so its index is 0.
+  __ bind(&invoke);
+  // Must preserve r0-r4, r5-r7 are available. (needs update for PPC)
+  __ PushTryHandler(StackHandler::JS_ENTRY, 0);
+  // If an exception not caught by another handler occurs, this handler
+  // returns control to the code after the b(&invoke) above, which
+  // restores all kCalleeSaved registers (including cp and fp) to their
+  // saved values before returning a failure to C.
+
+  // Clear any pending exceptions.
+  __ mov(r8, Operand(isolate()->factory()->the_hole_value()));
+  __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
+                                       isolate())));
+  __ StoreP(r8, MemOperand(ip));
+
+  // Invoke the function by calling through JS entry trampoline builtin.
+  // Notice that we cannot store a reference to the trampoline code directly in
+  // this stub, because runtime stubs are not traversed when doing GC.
+
+  // Expected registers by Builtins::JSEntryTrampoline
+  // r3: code entry
+  // r4: function
+  // r5: receiver
+  // r6: argc
+  // r7: argv
+  if (type() == StackFrame::ENTRY_CONSTRUCT) {
+    ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline,
+                                      isolate());
+    __ mov(ip, Operand(construct_entry));
+  } else {
+    ExternalReference entry(Builtins::kJSEntryTrampoline, isolate());
+    __ mov(ip, Operand(entry));
+  }
+  __ LoadP(ip, MemOperand(ip));  // deref address
+
+  // Branch and link to JSEntryTrampoline.
+  // the address points to the start of the code object, skip the header
+  __ addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ mtctr(ip);
+  __ bctrl();  // make the call
+
+  // Unlink this frame from the handler chain.
+  __ PopTryHandler();
+
+  __ bind(&exit);  // r3 holds result
+  // Check if the current stack frame is marked as the outermost JS frame.
+  Label non_outermost_js_2;
+  __ pop(r8);
+  __ CmpSmiLiteral(r8, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME), r0);
+  __ bne(&non_outermost_js_2);
+  __ mov(r9, Operand::Zero());
+  __ mov(r8, Operand(ExternalReference(js_entry_sp)));
+  __ StoreP(r9, MemOperand(r8));
+  __ bind(&non_outermost_js_2);
+
+  // Restore the top frame descriptors from the stack.
+  __ pop(r6);
+  __ mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
+  __ StoreP(r6, MemOperand(ip));
+
+  // Reset the stack to the callee saved registers.
+  __ addi(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset));
+
+// Restore callee-saved registers and return.
+#ifdef DEBUG
+  if (FLAG_debug_code) {
+    Label here;
+    __ b(&here, SetLK);
+    __ bind(&here);
+  }
+#endif
+
+  __ MultiPop(kCalleeSaved);
+
+  __ LoadP(r0, MemOperand(sp, kStackFrameLRSlot * kPointerSize));
+  __ mtctr(r0);
+  __ bctr();
+}
+
+
+// Uses registers r3 to r7.
+// Expected input (depending on whether args are in registers or on the stack):
+// * object: r3 or at sp + 1 * kPointerSize.
+// * function: r4 or at sp.
+//
+// An inlined call site may have been generated before calling this stub.
+// In this case the offset to the inline site to patch is passed in r8.
+// (See LCodeGen::DoInstanceOfKnownGlobal)
+void InstanceofStub::Generate(MacroAssembler* masm) {
+  // Call site inlining and patching implies arguments in registers.
+  DCHECK(HasArgsInRegisters() || !HasCallSiteInlineCheck());
+
+  // Fixed register usage throughout the stub:
+  const Register object = r3;     // Object (lhs).
+  Register map = r6;              // Map of the object.
+  const Register function = r4;   // Function (rhs).
+  const Register prototype = r7;  // Prototype of the function.
+  const Register inline_site = r9;
+  const Register scratch = r5;
+  Register scratch3 = no_reg;
+
+// delta = mov + unaligned LoadP + cmp + bne
+#if V8_TARGET_ARCH_PPC64
+  const int32_t kDeltaToLoadBoolResult =
+      (Assembler::kMovInstructions + 4) * Assembler::kInstrSize;
+#else
+  const int32_t kDeltaToLoadBoolResult =
+      (Assembler::kMovInstructions + 3) * Assembler::kInstrSize;
+#endif
+
+  Label slow, loop, is_instance, is_not_instance, not_js_object;
+
+  if (!HasArgsInRegisters()) {
+    __ LoadP(object, MemOperand(sp, 1 * kPointerSize));
+    __ LoadP(function, MemOperand(sp, 0));
+  }
+
+  // Check that the left hand is a JS object and load map.
+  __ JumpIfSmi(object, &not_js_object);
+  __ IsObjectJSObjectType(object, map, scratch, &not_js_object);
+
+  // If there is a call site cache don't look in the global cache, but do the
+  // real lookup and update the call site cache.
+  if (!HasCallSiteInlineCheck() && !ReturnTrueFalseObject()) {
+    Label miss;
+    __ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
+    __ bne(&miss);
+    __ CompareRoot(map, Heap::kInstanceofCacheMapRootIndex);
+    __ bne(&miss);
+    __ LoadRoot(r3, Heap::kInstanceofCacheAnswerRootIndex);
+    __ Ret(HasArgsInRegisters() ? 0 : 2);
+
+    __ bind(&miss);
+  }
+
+  // Get the prototype of the function.
+  __ TryGetFunctionPrototype(function, prototype, scratch, &slow, true);
+
+  // Check that the function prototype is a JS object.
+  __ JumpIfSmi(prototype, &slow);
+  __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
+
+  // Update the global instanceof or call site inlined cache with the current
+  // map and function. The cached answer will be set when it is known below.
+  if (!HasCallSiteInlineCheck()) {
+    __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
+    __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
+  } else {
+    DCHECK(HasArgsInRegisters());
+    // Patch the (relocated) inlined map check.
+
+    // The offset was stored in r8
+    //   (See LCodeGen::DoDeferredLInstanceOfKnownGlobal).
+    const Register offset = r8;
+    __ mflr(inline_site);
+    __ sub(inline_site, inline_site, offset);
+    // Get the map location in r8 and patch it.
+    __ GetRelocatedValue(inline_site, offset, scratch);
+    __ StoreP(map, FieldMemOperand(offset, Cell::kValueOffset), r0);
+  }
+
+  // Register mapping: r6 is object map and r7 is function prototype.
+  // Get prototype of object into r5.
+  __ LoadP(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
+
+  // We don't need map any more. Use it as a scratch register.
+  scratch3 = map;
+  map = no_reg;
+
+  // Loop through the prototype chain looking for the function prototype.
+  __ LoadRoot(scratch3, Heap::kNullValueRootIndex);
+  __ bind(&loop);
+  __ cmp(scratch, prototype);
+  __ beq(&is_instance);
+  __ cmp(scratch, scratch3);
+  __ beq(&is_not_instance);
+  __ LoadP(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
+  __ LoadP(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
+  __ b(&loop);
+  Factory* factory = isolate()->factory();
+
+  __ bind(&is_instance);
+  if (!HasCallSiteInlineCheck()) {
+    __ LoadSmiLiteral(r3, Smi::FromInt(0));
+    __ StoreRoot(r3, Heap::kInstanceofCacheAnswerRootIndex);
+    if (ReturnTrueFalseObject()) {
+      __ Move(r3, factory->true_value());
+    }
+  } else {
+    // Patch the call site to return true.
+    __ LoadRoot(r3, Heap::kTrueValueRootIndex);
+    __ addi(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
+    // Get the boolean result location in scratch and patch it.
+    __ SetRelocatedValue(inline_site, scratch, r3);
+
+    if (!ReturnTrueFalseObject()) {
+      __ LoadSmiLiteral(r3, Smi::FromInt(0));
+    }
+  }
+  __ Ret(HasArgsInRegisters() ? 0 : 2);
+
+  __ bind(&is_not_instance);
+  if (!HasCallSiteInlineCheck()) {
+    __ LoadSmiLiteral(r3, Smi::FromInt(1));
+    __ StoreRoot(r3, Heap::kInstanceofCacheAnswerRootIndex);
+    if (ReturnTrueFalseObject()) {
+      __ Move(r3, factory->false_value());
+    }
+  } else {
+    // Patch the call site to return false.
+    __ LoadRoot(r3, Heap::kFalseValueRootIndex);
+    __ addi(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
+    // Get the boolean result location in scratch and patch it.
+    __ SetRelocatedValue(inline_site, scratch, r3);
+
+    if (!ReturnTrueFalseObject()) {
+      __ LoadSmiLiteral(r3, Smi::FromInt(1));
+    }
+  }
+  __ Ret(HasArgsInRegisters() ? 0 : 2);
+
+  Label object_not_null, object_not_null_or_smi;
+  __ bind(&not_js_object);
+  // Before null, smi and string value checks, check that the rhs is a function
+  // as for a non-function rhs an exception needs to be thrown.
+  __ JumpIfSmi(function, &slow);
+  __ CompareObjectType(function, scratch3, scratch, JS_FUNCTION_TYPE);
+  __ bne(&slow);
+
+  // Null is not instance of anything.
+  __ Cmpi(object, Operand(isolate()->factory()->null_value()), r0);
+  __ bne(&object_not_null);
+  if (ReturnTrueFalseObject()) {
+    __ Move(r3, factory->false_value());
+  } else {
+    __ LoadSmiLiteral(r3, Smi::FromInt(1));
+  }
+  __ Ret(HasArgsInRegisters() ? 0 : 2);
+
+  __ bind(&object_not_null);
+  // Smi values are not instances of anything.
+  __ JumpIfNotSmi(object, &object_not_null_or_smi);
+  if (ReturnTrueFalseObject()) {
+    __ Move(r3, factory->false_value());
+  } else {
+    __ LoadSmiLiteral(r3, Smi::FromInt(1));
+  }
+  __ Ret(HasArgsInRegisters() ? 0 : 2);
+
+  __ bind(&object_not_null_or_smi);
+  // String values are not instances of anything.
+  __ IsObjectJSStringType(object, scratch, &slow);
+  if (ReturnTrueFalseObject()) {
+    __ Move(r3, factory->false_value());
+  } else {
+    __ LoadSmiLiteral(r3, Smi::FromInt(1));
+  }
+  __ Ret(HasArgsInRegisters() ? 0 : 2);
+
+  // Slow-case.  Tail call builtin.
+  __ bind(&slow);
+  if (!ReturnTrueFalseObject()) {
+    if (HasArgsInRegisters()) {
+      __ Push(r3, r4);
+    }
+    __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION);
+  } else {
+    {
+      FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+      __ Push(r3, r4);
+      __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION);
+    }
+    Label true_value, done;
+    __ cmpi(r3, Operand::Zero());
+    __ beq(&true_value);
+
+    __ LoadRoot(r3, Heap::kFalseValueRootIndex);
+    __ b(&done);
+
+    __ bind(&true_value);
+    __ LoadRoot(r3, Heap::kTrueValueRootIndex);
+
+    __ bind(&done);
+    __ Ret(HasArgsInRegisters() ? 0 : 2);
+  }
+}
+
+
+void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
+  Label miss;
+  Register receiver = LoadDescriptor::ReceiverRegister();
+
+  NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, r6,
+                                                          r7, &miss);
+  __ bind(&miss);
+  PropertyAccessCompiler::TailCallBuiltin(
+      masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
+}
+
+
+void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
+  // Return address is in lr.
+  Label miss;
+
+  Register receiver = LoadDescriptor::ReceiverRegister();
+  Register index = LoadDescriptor::NameRegister();
+  Register scratch = r6;
+  Register result = r3;
+  DCHECK(!scratch.is(receiver) && !scratch.is(index));
+
+  StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
+                                          &miss,  // When not a string.
+                                          &miss,  // When not a number.
+                                          &miss,  // When index out of range.
+                                          STRING_INDEX_IS_ARRAY_INDEX,
+                                          RECEIVER_IS_STRING);
+  char_at_generator.GenerateFast(masm);
+  __ Ret();
+
+  StubRuntimeCallHelper call_helper;
+  char_at_generator.GenerateSlow(masm, call_helper);
+
+  __ bind(&miss);
+  PropertyAccessCompiler::TailCallBuiltin(
+      masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
+}
+
+
+void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
+  // The displacement is the offset of the last parameter (if any)
+  // relative to the frame pointer.
+  const int kDisplacement =
+      StandardFrameConstants::kCallerSPOffset - kPointerSize;
+  DCHECK(r4.is(ArgumentsAccessReadDescriptor::index()));
+  DCHECK(r3.is(ArgumentsAccessReadDescriptor::parameter_count()));
+
+  // Check that the key is a smi.
+  Label slow;
+  __ JumpIfNotSmi(r4, &slow);
+
+  // Check if the calling frame is an arguments adaptor frame.
+  Label adaptor;
+  __ LoadP(r5, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ LoadP(r6, MemOperand(r5, StandardFrameConstants::kContextOffset));
+  STATIC_ASSERT(StackFrame::ARGUMENTS_ADAPTOR < 0x3fffu);
+  __ CmpSmiLiteral(r6, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+  __ beq(&adaptor);
+
+  // Check index against formal parameters count limit passed in
+  // through register r3. Use unsigned comparison to get negative
+  // check for free.
+  __ cmpl(r4, r3);
+  __ bge(&slow);
+
+  // Read the argument from the stack and return it.
+  __ sub(r6, r3, r4);
+  __ SmiToPtrArrayOffset(r6, r6);
+  __ add(r6, fp, r6);
+  __ LoadP(r3, MemOperand(r6, kDisplacement));
+  __ blr();
+
+  // Arguments adaptor case: Check index against actual arguments
+  // limit found in the arguments adaptor frame. Use unsigned
+  // comparison to get negative check for free.
+  __ bind(&adaptor);
+  __ LoadP(r3, MemOperand(r5, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ cmpl(r4, r3);
+  __ bge(&slow);
+
+  // Read the argument from the adaptor frame and return it.
+  __ sub(r6, r3, r4);
+  __ SmiToPtrArrayOffset(r6, r6);
+  __ add(r6, r5, r6);
+  __ LoadP(r3, MemOperand(r6, kDisplacement));
+  __ blr();
+
+  // Slow-case: Handle non-smi or out-of-bounds access to arguments
+  // by calling the runtime system.
+  __ bind(&slow);
+  __ push(r4);
+  __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) {
+  // sp[0] : number of parameters
+  // sp[1] : receiver displacement
+  // sp[2] : function
+
+  // Check if the calling frame is an arguments adaptor frame.
+  Label runtime;
+  __ LoadP(r6, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ LoadP(r5, MemOperand(r6, StandardFrameConstants::kContextOffset));
+  STATIC_ASSERT(StackFrame::ARGUMENTS_ADAPTOR < 0x3fffu);
+  __ CmpSmiLiteral(r5, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+  __ bne(&runtime);
+
+  // Patch the arguments.length and the parameters pointer in the current frame.
+  __ LoadP(r5, MemOperand(r6, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ StoreP(r5, MemOperand(sp, 0 * kPointerSize));
+  __ SmiToPtrArrayOffset(r5, r5);
+  __ add(r6, r6, r5);
+  __ addi(r6, r6, Operand(StandardFrameConstants::kCallerSPOffset));
+  __ StoreP(r6, MemOperand(sp, 1 * kPointerSize));
+
+  __ bind(&runtime);
+  __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
+}
+
+
+void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) {
+  // Stack layout:
+  //  sp[0] : number of parameters (tagged)
+  //  sp[1] : address of receiver argument
+  //  sp[2] : function
+  // Registers used over whole function:
+  //  r9 : allocated object (tagged)
+  //  r11 : mapped parameter count (tagged)
+
+  __ LoadP(r4, MemOperand(sp, 0 * kPointerSize));
+  // r4 = parameter count (tagged)
+
+  // Check if the calling frame is an arguments adaptor frame.
+  Label runtime;
+  Label adaptor_frame, try_allocate;
+  __ LoadP(r6, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ LoadP(r5, MemOperand(r6, StandardFrameConstants::kContextOffset));
+  STATIC_ASSERT(StackFrame::ARGUMENTS_ADAPTOR < 0x3fffu);
+  __ CmpSmiLiteral(r5, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+  __ beq(&adaptor_frame);
+
+  // No adaptor, parameter count = argument count.
+  __ mr(r5, r4);
+  __ b(&try_allocate);
+
+  // We have an adaptor frame. Patch the parameters pointer.
+  __ bind(&adaptor_frame);
+  __ LoadP(r5, MemOperand(r6, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ SmiToPtrArrayOffset(r7, r5);
+  __ add(r6, r6, r7);
+  __ addi(r6, r6, Operand(StandardFrameConstants::kCallerSPOffset));
+  __ StoreP(r6, MemOperand(sp, 1 * kPointerSize));
+
+  // r4 = parameter count (tagged)
+  // r5 = argument count (tagged)
+  // Compute the mapped parameter count = min(r4, r5) in r4.
+  Label skip;
+  __ cmp(r4, r5);
+  __ blt(&skip);
+  __ mr(r4, r5);
+  __ bind(&skip);
+
+  __ bind(&try_allocate);
+
+  // Compute the sizes of backing store, parameter map, and arguments object.
+  // 1. Parameter map, has 2 extra words containing context and backing store.
+  const int kParameterMapHeaderSize =
+      FixedArray::kHeaderSize + 2 * kPointerSize;
+  // If there are no mapped parameters, we do not need the parameter_map.
+  Label skip2, skip3;
+  __ CmpSmiLiteral(r4, Smi::FromInt(0), r0);
+  __ bne(&skip2);
+  __ li(r11, Operand::Zero());
+  __ b(&skip3);
+  __ bind(&skip2);
+  __ SmiToPtrArrayOffset(r11, r4);
+  __ addi(r11, r11, Operand(kParameterMapHeaderSize));
+  __ bind(&skip3);
+
+  // 2. Backing store.
+  __ SmiToPtrArrayOffset(r7, r5);
+  __ add(r11, r11, r7);
+  __ addi(r11, r11, Operand(FixedArray::kHeaderSize));
+
+  // 3. Arguments object.
+  __ addi(r11, r11, Operand(Heap::kSloppyArgumentsObjectSize));
+
+  // Do the allocation of all three objects in one go.
+  __ Allocate(r11, r3, r6, r7, &runtime, TAG_OBJECT);
+
+  // r3 = address of new object(s) (tagged)
+  // r5 = argument count (smi-tagged)
+  // Get the arguments boilerplate from the current native context into r4.
+  const int kNormalOffset =
+      Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX);
+  const int kAliasedOffset =
+      Context::SlotOffset(Context::ALIASED_ARGUMENTS_MAP_INDEX);
+
+  __ LoadP(r7,
+           MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+  __ LoadP(r7, FieldMemOperand(r7, GlobalObject::kNativeContextOffset));
+  Label skip4, skip5;
+  __ cmpi(r4, Operand::Zero());
+  __ bne(&skip4);
+  __ LoadP(r7, MemOperand(r7, kNormalOffset));
+  __ b(&skip5);
+  __ bind(&skip4);
+  __ LoadP(r7, MemOperand(r7, kAliasedOffset));
+  __ bind(&skip5);
+
+  // r3 = address of new object (tagged)
+  // r4 = mapped parameter count (tagged)
+  // r5 = argument count (smi-tagged)
+  // r7 = address of arguments map (tagged)
+  __ StoreP(r7, FieldMemOperand(r3, JSObject::kMapOffset), r0);
+  __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
+  __ StoreP(r6, FieldMemOperand(r3, JSObject::kPropertiesOffset), r0);
+  __ StoreP(r6, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
+
+  // Set up the callee in-object property.
+  STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1);
+  __ LoadP(r6, MemOperand(sp, 2 * kPointerSize));
+  __ AssertNotSmi(r6);
+  const int kCalleeOffset =
+      JSObject::kHeaderSize + Heap::kArgumentsCalleeIndex * kPointerSize;
+  __ StoreP(r6, FieldMemOperand(r3, kCalleeOffset), r0);
+
+  // Use the length (smi tagged) and set that as an in-object property too.
+  __ AssertSmi(r5);
+  STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
+  const int kLengthOffset =
+      JSObject::kHeaderSize + Heap::kArgumentsLengthIndex * kPointerSize;
+  __ StoreP(r5, FieldMemOperand(r3, kLengthOffset), r0);
+
+  // Set up the elements pointer in the allocated arguments object.
+  // If we allocated a parameter map, r7 will point there, otherwise
+  // it will point to the backing store.
+  __ addi(r7, r3, Operand(Heap::kSloppyArgumentsObjectSize));
+  __ StoreP(r7, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
+
+  // r3 = address of new object (tagged)
+  // r4 = mapped parameter count (tagged)
+  // r5 = argument count (tagged)
+  // r7 = address of parameter map or backing store (tagged)
+  // Initialize parameter map. If there are no mapped arguments, we're done.
+  Label skip_parameter_map, skip6;
+  __ CmpSmiLiteral(r4, Smi::FromInt(0), r0);
+  __ bne(&skip6);
+  // Move backing store address to r6, because it is
+  // expected there when filling in the unmapped arguments.
+  __ mr(r6, r7);
+  __ b(&skip_parameter_map);
+  __ bind(&skip6);
+
+  __ LoadRoot(r9, Heap::kSloppyArgumentsElementsMapRootIndex);
+  __ StoreP(r9, FieldMemOperand(r7, FixedArray::kMapOffset), r0);
+  __ AddSmiLiteral(r9, r4, Smi::FromInt(2), r0);
+  __ StoreP(r9, FieldMemOperand(r7, FixedArray::kLengthOffset), r0);
+  __ StoreP(cp, FieldMemOperand(r7, FixedArray::kHeaderSize + 0 * kPointerSize),
+            r0);
+  __ SmiToPtrArrayOffset(r9, r4);
+  __ add(r9, r7, r9);
+  __ addi(r9, r9, Operand(kParameterMapHeaderSize));
+  __ StoreP(r9, FieldMemOperand(r7, FixedArray::kHeaderSize + 1 * kPointerSize),
+            r0);
+
+  // Copy the parameter slots and the holes in the arguments.
+  // We need to fill in mapped_parameter_count slots. They index the context,
+  // where parameters are stored in reverse order, at
+  //   MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1
+  // The mapped parameter thus need to get indices
+  //   MIN_CONTEXT_SLOTS+parameter_count-1 ..
+  //       MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count
+  // We loop from right to left.
+  Label parameters_loop, parameters_test;
+  __ mr(r9, r4);
+  __ LoadP(r11, MemOperand(sp, 0 * kPointerSize));
+  __ AddSmiLiteral(r11, r11, Smi::FromInt(Context::MIN_CONTEXT_SLOTS), r0);
+  __ sub(r11, r11, r4);
+  __ LoadRoot(r10, Heap::kTheHoleValueRootIndex);
+  __ SmiToPtrArrayOffset(r6, r9);
+  __ add(r6, r7, r6);
+  __ addi(r6, r6, Operand(kParameterMapHeaderSize));
+
+  // r9 = loop variable (tagged)
+  // r4 = mapping index (tagged)
+  // r6 = address of backing store (tagged)
+  // r7 = address of parameter map (tagged)
+  // r8 = temporary scratch (a.o., for address calculation)
+  // r10 = the hole value
+  __ b(&parameters_test);
+
+  __ bind(&parameters_loop);
+  __ SubSmiLiteral(r9, r9, Smi::FromInt(1), r0);
+  __ SmiToPtrArrayOffset(r8, r9);
+  __ addi(r8, r8, Operand(kParameterMapHeaderSize - kHeapObjectTag));
+  __ StorePX(r11, MemOperand(r8, r7));
+  __ subi(r8, r8, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize));
+  __ StorePX(r10, MemOperand(r8, r6));
+  __ AddSmiLiteral(r11, r11, Smi::FromInt(1), r0);
+  __ bind(&parameters_test);
+  __ CmpSmiLiteral(r9, Smi::FromInt(0), r0);
+  __ bne(&parameters_loop);
+
+  __ bind(&skip_parameter_map);
+  // r5 = argument count (tagged)
+  // r6 = address of backing store (tagged)
+  // r8 = scratch
+  // Copy arguments header and remaining slots (if there are any).
+  __ LoadRoot(r8, Heap::kFixedArrayMapRootIndex);
+  __ StoreP(r8, FieldMemOperand(r6, FixedArray::kMapOffset), r0);
+  __ StoreP(r5, FieldMemOperand(r6, FixedArray::kLengthOffset), r0);
+
+  Label arguments_loop, arguments_test;
+  __ mr(r11, r4);
+  __ LoadP(r7, MemOperand(sp, 1 * kPointerSize));
+  __ SmiToPtrArrayOffset(r8, r11);
+  __ sub(r7, r7, r8);
+  __ b(&arguments_test);
+
+  __ bind(&arguments_loop);
+  __ subi(r7, r7, Operand(kPointerSize));
+  __ LoadP(r9, MemOperand(r7, 0));
+  __ SmiToPtrArrayOffset(r8, r11);
+  __ add(r8, r6, r8);
+  __ StoreP(r9, FieldMemOperand(r8, FixedArray::kHeaderSize), r0);
+  __ AddSmiLiteral(r11, r11, Smi::FromInt(1), r0);
+
+  __ bind(&arguments_test);
+  __ cmp(r11, r5);
+  __ blt(&arguments_loop);
+
+  // Return and remove the on-stack parameters.
+  __ addi(sp, sp, Operand(3 * kPointerSize));
+  __ Ret();
+
+  // Do the runtime call to allocate the arguments object.
+  // r5 = argument count (tagged)
+  __ bind(&runtime);
+  __ StoreP(r5, MemOperand(sp, 0 * kPointerSize));  // Patch argument count.
+  __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1);
+}
+
+
+void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) {
+  // Return address is in lr.
+  Label slow;
+
+  Register receiver = LoadDescriptor::ReceiverRegister();
+  Register key = LoadDescriptor::NameRegister();
+
+  // Check that the key is an array index, that is Uint32.
+  __ TestIfPositiveSmi(key, r0);
+  __ bne(&slow, cr0);
+
+  // Everything is fine, call runtime.
+  __ Push(receiver, key);  // Receiver, key.
+
+  // Perform tail call to the entry.
+  __ TailCallExternalReference(
+      ExternalReference(IC_Utility(IC::kLoadElementWithInterceptor),
+                        masm->isolate()),
+      2, 1);
+
+  __ bind(&slow);
+  PropertyAccessCompiler::TailCallBuiltin(
+      masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC));
+}
+
+
+void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) {
+  // sp[0] : number of parameters
+  // sp[4] : receiver displacement
+  // sp[8] : function
+  // Check if the calling frame is an arguments adaptor frame.
+  Label adaptor_frame, try_allocate, runtime;
+  __ LoadP(r5, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ LoadP(r6, MemOperand(r5, StandardFrameConstants::kContextOffset));
+  STATIC_ASSERT(StackFrame::ARGUMENTS_ADAPTOR < 0x3fffu);
+  __ CmpSmiLiteral(r6, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+  __ beq(&adaptor_frame);
+
+  // Get the length from the frame.
+  __ LoadP(r4, MemOperand(sp, 0));
+  __ b(&try_allocate);
+
+  // Patch the arguments.length and the parameters pointer.
+  __ bind(&adaptor_frame);
+  __ LoadP(r4, MemOperand(r5, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ StoreP(r4, MemOperand(sp, 0));
+  __ SmiToPtrArrayOffset(r6, r4);
+  __ add(r6, r5, r6);
+  __ addi(r6, r6, Operand(StandardFrameConstants::kCallerSPOffset));
+  __ StoreP(r6, MemOperand(sp, 1 * kPointerSize));
+
+  // Try the new space allocation. Start out with computing the size
+  // of the arguments object and the elements array in words.
+  Label add_arguments_object;
+  __ bind(&try_allocate);
+  __ cmpi(r4, Operand::Zero());
+  __ beq(&add_arguments_object);
+  __ SmiUntag(r4);
+  __ addi(r4, r4, Operand(FixedArray::kHeaderSize / kPointerSize));
+  __ bind(&add_arguments_object);
+  __ addi(r4, r4, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize));
+
+  // Do the allocation of both objects in one go.
+  __ Allocate(r4, r3, r5, r6, &runtime,
+              static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS));
+
+  // Get the arguments boilerplate from the current native context.
+  __ LoadP(r7,
+           MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+  __ LoadP(r7, FieldMemOperand(r7, GlobalObject::kNativeContextOffset));
+  __ LoadP(
+      r7,
+      MemOperand(r7, Context::SlotOffset(Context::STRICT_ARGUMENTS_MAP_INDEX)));
+
+  __ StoreP(r7, FieldMemOperand(r3, JSObject::kMapOffset), r0);
+  __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex);
+  __ StoreP(r6, FieldMemOperand(r3, JSObject::kPropertiesOffset), r0);
+  __ StoreP(r6, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
+
+  // Get the length (smi tagged) and set that as an in-object property too.
+  STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0);
+  __ LoadP(r4, MemOperand(sp, 0 * kPointerSize));
+  __ AssertSmi(r4);
+  __ StoreP(r4,
+            FieldMemOperand(r3, JSObject::kHeaderSize +
+                                    Heap::kArgumentsLengthIndex * kPointerSize),
+            r0);
+
+  // If there are no actual arguments, we're done.
+  Label done;
+  __ cmpi(r4, Operand::Zero());
+  __ beq(&done);
+
+  // Get the parameters pointer from the stack.
+  __ LoadP(r5, MemOperand(sp, 1 * kPointerSize));
+
+  // Set up the elements pointer in the allocated arguments object and
+  // initialize the header in the elements fixed array.
+  __ addi(r7, r3, Operand(Heap::kStrictArgumentsObjectSize));
+  __ StoreP(r7, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
+  __ LoadRoot(r6, Heap::kFixedArrayMapRootIndex);
+  __ StoreP(r6, FieldMemOperand(r7, FixedArray::kMapOffset), r0);
+  __ StoreP(r4, FieldMemOperand(r7, FixedArray::kLengthOffset), r0);
+  // Untag the length for the loop.
+  __ SmiUntag(r4);
+
+  // Copy the fixed array slots.
+  Label loop;
+  // Set up r7 to point just prior to the first array slot.
+  __ addi(r7, r7,
+          Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
+  __ mtctr(r4);
+  __ bind(&loop);
+  // Pre-decrement r5 with kPointerSize on each iteration.
+  // Pre-decrement in order to skip receiver.
+  __ LoadPU(r6, MemOperand(r5, -kPointerSize));
+  // Pre-increment r7 with kPointerSize on each iteration.
+  __ StorePU(r6, MemOperand(r7, kPointerSize));
+  __ bdnz(&loop);
+
+  // Return and remove the on-stack parameters.
+  __ bind(&done);
+  __ addi(sp, sp, Operand(3 * kPointerSize));
+  __ Ret();
+
+  // Do the runtime call to allocate the arguments object.
+  __ bind(&runtime);
+  __ TailCallRuntime(Runtime::kNewStrictArguments, 3, 1);
+}
+
+
+void RegExpExecStub::Generate(MacroAssembler* masm) {
+// Just jump directly to runtime if native RegExp is not selected at compile
+// time or if regexp entry in generated code is turned off runtime switch or
+// at compilation.
+#ifdef V8_INTERPRETED_REGEXP
+  __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
+#else  // V8_INTERPRETED_REGEXP
+
+  // Stack frame on entry.
+  //  sp[0]: last_match_info (expected JSArray)
+  //  sp[4]: previous index
+  //  sp[8]: subject string
+  //  sp[12]: JSRegExp object
+
+  const int kLastMatchInfoOffset = 0 * kPointerSize;
+  const int kPreviousIndexOffset = 1 * kPointerSize;
+  const int kSubjectOffset = 2 * kPointerSize;
+  const int kJSRegExpOffset = 3 * kPointerSize;
+
+  Label runtime, br_over, encoding_type_UC16;
+
+  // Allocation of registers for this function. These are in callee save
+  // registers and will be preserved by the call to the native RegExp code, as
+  // this code is called using the normal C calling convention. When calling
+  // directly from generated code the native RegExp code will not do a GC and
+  // therefore the content of these registers are safe to use after the call.
+  Register subject = r14;
+  Register regexp_data = r15;
+  Register last_match_info_elements = r16;
+  Register code = r17;
+
+  // Ensure register assigments are consistent with callee save masks
+  DCHECK(subject.bit() & kCalleeSaved);
+  DCHECK(regexp_data.bit() & kCalleeSaved);
+  DCHECK(last_match_info_elements.bit() & kCalleeSaved);
+  DCHECK(code.bit() & kCalleeSaved);
+
+  // Ensure that a RegExp stack is allocated.
+  ExternalReference address_of_regexp_stack_memory_address =
+      ExternalReference::address_of_regexp_stack_memory_address(isolate());
+  ExternalReference address_of_regexp_stack_memory_size =
+      ExternalReference::address_of_regexp_stack_memory_size(isolate());
+  __ mov(r3, Operand(address_of_regexp_stack_memory_size));
+  __ LoadP(r3, MemOperand(r3, 0));
+  __ cmpi(r3, Operand::Zero());
+  __ beq(&runtime);
+
+  // Check that the first argument is a JSRegExp object.
+  __ LoadP(r3, MemOperand(sp, kJSRegExpOffset));
+  __ JumpIfSmi(r3, &runtime);
+  __ CompareObjectType(r3, r4, r4, JS_REGEXP_TYPE);
+  __ bne(&runtime);
+
+  // Check that the RegExp has been compiled (data contains a fixed array).
+  __ LoadP(regexp_data, FieldMemOperand(r3, JSRegExp::kDataOffset));
+  if (FLAG_debug_code) {
+    __ TestIfSmi(regexp_data, r0);
+    __ Check(ne, kUnexpectedTypeForRegExpDataFixedArrayExpected, cr0);
+    __ CompareObjectType(regexp_data, r3, r3, FIXED_ARRAY_TYPE);
+    __ Check(eq, kUnexpectedTypeForRegExpDataFixedArrayExpected);
+  }
+
+  // regexp_data: RegExp data (FixedArray)
+  // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
+  __ LoadP(r3, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
+  // DCHECK(Smi::FromInt(JSRegExp::IRREGEXP) < (char *)0xffffu);
+  __ CmpSmiLiteral(r3, Smi::FromInt(JSRegExp::IRREGEXP), r0);
+  __ bne(&runtime);
+
+  // regexp_data: RegExp data (FixedArray)
+  // Check that the number of captures fit in the static offsets vector buffer.
+  __ LoadP(r5,
+           FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
+  // Check (number_of_captures + 1) * 2 <= offsets vector size
+  // Or          number_of_captures * 2 <= offsets vector size - 2
+  // SmiToShortArrayOffset accomplishes the multiplication by 2 and
+  // SmiUntag (which is a nop for 32-bit).
+  __ SmiToShortArrayOffset(r5, r5);
+  STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2);
+  __ cmpli(r5, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize - 2));
+  __ bgt(&runtime);
+
+  // Reset offset for possibly sliced string.
+  __ li(r11, Operand::Zero());
+  __ LoadP(subject, MemOperand(sp, kSubjectOffset));
+  __ JumpIfSmi(subject, &runtime);
+  __ mr(r6, subject);  // Make a copy of the original subject string.
+  __ LoadP(r3, FieldMemOperand(subject, HeapObject::kMapOffset));
+  __ lbz(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
+  // subject: subject string
+  // r6: subject string
+  // r3: subject string instance type
+  // regexp_data: RegExp data (FixedArray)
+  // Handle subject string according to its encoding and representation:
+  // (1) Sequential string?  If yes, go to (5).
+  // (2) Anything but sequential or cons?  If yes, go to (6).
+  // (3) Cons string.  If the string is flat, replace subject with first string.
+  //     Otherwise bailout.
+  // (4) Is subject external?  If yes, go to (7).
+  // (5) Sequential string.  Load regexp code according to encoding.
+  // (E) Carry on.
+  /// [...]
+
+  // Deferred code at the end of the stub:
+  // (6) Not a long external string?  If yes, go to (8).
+  // (7) External string.  Make it, offset-wise, look like a sequential string.
+  //     Go to (5).
+  // (8) Short external string or not a string?  If yes, bail out to runtime.
+  // (9) Sliced string.  Replace subject with parent.  Go to (4).
+
+  Label seq_string /* 5 */, external_string /* 7 */, check_underlying /* 4 */,
+      not_seq_nor_cons /* 6 */, not_long_external /* 8 */;
+
+  // (1) Sequential string?  If yes, go to (5).
+  STATIC_ASSERT((kIsNotStringMask | kStringRepresentationMask |
+                 kShortExternalStringMask) == 0x93);
+  __ andi(r4, r3, Operand(kIsNotStringMask | kStringRepresentationMask |
+                          kShortExternalStringMask));
+  STATIC_ASSERT((kStringTag | kSeqStringTag) == 0);
+  __ beq(&seq_string, cr0);  // Go to (5).
+
+  // (2) Anything but sequential or cons?  If yes, go to (6).
+  STATIC_ASSERT(kConsStringTag < kExternalStringTag);
+  STATIC_ASSERT(kSlicedStringTag > kExternalStringTag);
+  STATIC_ASSERT(kIsNotStringMask > kExternalStringTag);
+  STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag);
+  STATIC_ASSERT(kExternalStringTag < 0xffffu);
+  __ cmpi(r4, Operand(kExternalStringTag));
+  __ bge(&not_seq_nor_cons);  // Go to (6).
+
+  // (3) Cons string.  Check that it's flat.
+  // Replace subject with first string and reload instance type.
+  __ LoadP(r3, FieldMemOperand(subject, ConsString::kSecondOffset));
+  __ CompareRoot(r3, Heap::kempty_stringRootIndex);
+  __ bne(&runtime);
+  __ LoadP(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
+
+  // (4) Is subject external?  If yes, go to (7).
+  __ bind(&check_underlying);
+  __ LoadP(r3, FieldMemOperand(subject, HeapObject::kMapOffset));
+  __ lbz(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
+  STATIC_ASSERT(kSeqStringTag == 0);
+  STATIC_ASSERT(kStringRepresentationMask == 3);
+  __ andi(r0, r3, Operand(kStringRepresentationMask));
+  // The underlying external string is never a short external string.
+  STATIC_ASSERT(ExternalString::kMaxShortLength < ConsString::kMinLength);
+  STATIC_ASSERT(ExternalString::kMaxShortLength < SlicedString::kMinLength);
+  __ bne(&external_string, cr0);  // Go to (7).
+
+  // (5) Sequential string.  Load regexp code according to encoding.
+  __ bind(&seq_string);
+  // subject: sequential subject string (or look-alike, external string)
+  // r6: original subject string
+  // Load previous index and check range before r6 is overwritten.  We have to
+  // use r6 instead of subject here because subject might have been only made
+  // to look like a sequential string when it actually is an external string.
+  __ LoadP(r4, MemOperand(sp, kPreviousIndexOffset));
+  __ JumpIfNotSmi(r4, &runtime);
+  __ LoadP(r6, FieldMemOperand(r6, String::kLengthOffset));
+  __ cmpl(r6, r4);
+  __ ble(&runtime);
+  __ SmiUntag(r4);
+
+  STATIC_ASSERT(4 == kOneByteStringTag);
+  STATIC_ASSERT(kTwoByteStringTag == 0);
+  STATIC_ASSERT(kStringEncodingMask == 4);
+  __ ExtractBitMask(r6, r3, kStringEncodingMask, SetRC);
+  __ beq(&encoding_type_UC16, cr0);
+  __ LoadP(code,
+           FieldMemOperand(regexp_data, JSRegExp::kDataOneByteCodeOffset));
+  __ b(&br_over);
+  __ bind(&encoding_type_UC16);
+  __ LoadP(code, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset));
+  __ bind(&br_over);
+
+  // (E) Carry on.  String handling is done.
+  // code: irregexp code
+  // Check that the irregexp code has been generated for the actual string
+  // encoding. If it has, the field contains a code object otherwise it contains
+  // a smi (code flushing support).
+  __ JumpIfSmi(code, &runtime);
+
+  // r4: previous index
+  // r6: encoding of subject string (1 if one_byte, 0 if two_byte);
+  // code: Address of generated regexp code
+  // subject: Subject string
+  // regexp_data: RegExp data (FixedArray)
+  // All checks done. Now push arguments for native regexp code.
+  __ IncrementCounter(isolate()->counters()->regexp_entry_native(), 1, r3, r5);
+
+  // Isolates: note we add an additional parameter here (isolate pointer).
+  const int kRegExpExecuteArguments = 10;
+  const int kParameterRegisters = 8;
+  __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters);
+
+  // Stack pointer now points to cell where return address is to be written.
+  // Arguments are before that on the stack or in registers.
+
+  // Argument 10 (in stack parameter area): Pass current isolate address.
+  __ mov(r3, Operand(ExternalReference::isolate_address(isolate())));
+  __ StoreP(r3, MemOperand(sp, (kStackFrameExtraParamSlot + 1) * kPointerSize));
+
+  // Argument 9 is a dummy that reserves the space used for
+  // the return address added by the ExitFrame in native calls.
+
+  // Argument 8 (r10): Indicate that this is a direct call from JavaScript.
+  __ li(r10, Operand(1));
+
+  // Argument 7 (r9): Start (high end) of backtracking stack memory area.
+  __ mov(r3, Operand(address_of_regexp_stack_memory_address));
+  __ LoadP(r3, MemOperand(r3, 0));
+  __ mov(r5, Operand(address_of_regexp_stack_memory_size));
+  __ LoadP(r5, MemOperand(r5, 0));
+  __ add(r9, r3, r5);
+
+  // Argument 6 (r8): Set the number of capture registers to zero to force
+  // global egexps to behave as non-global.  This does not affect non-global
+  // regexps.
+  __ li(r8, Operand::Zero());
+
+  // Argument 5 (r7): static offsets vector buffer.
+  __ mov(
+      r7,
+      Operand(ExternalReference::address_of_static_offsets_vector(isolate())));
+
+  // For arguments 4 (r6) and 3 (r5) get string length, calculate start of data
+  // and calculate the shift of the index (0 for one-byte and 1 for two-byte).
+  __ addi(r18, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag));
+  __ xori(r6, r6, Operand(1));
+  // Load the length from the original subject string from the previous stack
+  // frame. Therefore we have to use fp, which points exactly to two pointer
+  // sizes below the previous sp. (Because creating a new stack frame pushes
+  // the previous fp onto the stack and moves up sp by 2 * kPointerSize.)
+  __ LoadP(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize));
+  // If slice offset is not 0, load the length from the original sliced string.
+  // Argument 4, r6: End of string data
+  // Argument 3, r5: Start of string data
+  // Prepare start and end index of the input.
+  __ ShiftLeft_(r11, r11, r6);
+  __ add(r11, r18, r11);
+  __ ShiftLeft_(r5, r4, r6);
+  __ add(r5, r11, r5);
+
+  __ LoadP(r18, FieldMemOperand(subject, String::kLengthOffset));
+  __ SmiUntag(r18);
+  __ ShiftLeft_(r6, r18, r6);
+  __ add(r6, r11, r6);
+
+  // Argument 2 (r4): Previous index.
+  // Already there
+
+  // Argument 1 (r3): Subject string.
+  __ mr(r3, subject);
+
+  // Locate the code entry and call it.
+  __ addi(code, code, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+
+#if ABI_USES_FUNCTION_DESCRIPTORS && defined(USE_SIMULATOR)
+  // Even Simulated AIX/PPC64 Linux uses a function descriptor for the
+  // RegExp routine.  Extract the instruction address here since
+  // DirectCEntryStub::GenerateCall will not do it for calls out to
+  // what it thinks is C code compiled for the simulator/host
+  // platform.
+  __ LoadP(code, MemOperand(code, 0));  // Instruction address
+#endif
+
+  DirectCEntryStub stub(isolate());
+  stub.GenerateCall(masm, code);
+
+  __ LeaveExitFrame(false, no_reg, true);
+
+  // r3: result
+  // subject: subject string (callee saved)
+  // regexp_data: RegExp data (callee saved)
+  // last_match_info_elements: Last match info elements (callee saved)
+  // Check the result.
+  Label success;
+  __ cmpi(r3, Operand(1));
+  // We expect exactly one result since we force the called regexp to behave
+  // as non-global.
+  __ beq(&success);
+  Label failure;
+  __ cmpi(r3, Operand(NativeRegExpMacroAssembler::FAILURE));
+  __ beq(&failure);
+  __ cmpi(r3, Operand(NativeRegExpMacroAssembler::EXCEPTION));
+  // If not exception it can only be retry. Handle that in the runtime system.
+  __ bne(&runtime);
+  // Result must now be exception. If there is no pending exception already a
+  // stack overflow (on the backtrack stack) was detected in RegExp code but
+  // haven't created the exception yet. Handle that in the runtime system.
+  // TODO(592): Rerunning the RegExp to get the stack overflow exception.
+  __ mov(r4, Operand(isolate()->factory()->the_hole_value()));
+  __ mov(r5, Operand(ExternalReference(Isolate::kPendingExceptionAddress,
+                                       isolate())));
+  __ LoadP(r3, MemOperand(r5, 0));
+  __ cmp(r3, r4);
+  __ beq(&runtime);
+
+  __ StoreP(r4, MemOperand(r5, 0));  // Clear pending exception.
+
+  // Check if the exception is a termination. If so, throw as uncatchable.
+  __ CompareRoot(r3, Heap::kTerminationExceptionRootIndex);
+
+  Label termination_exception;
+  __ beq(&termination_exception);
+
+  __ Throw(r3);
+
+  __ bind(&termination_exception);
+  __ ThrowUncatchable(r3);
+
+  __ bind(&failure);
+  // For failure and exception return null.
+  __ mov(r3, Operand(isolate()->factory()->null_value()));
+  __ addi(sp, sp, Operand(4 * kPointerSize));
+  __ Ret();
+
+  // Process the result from the native regexp code.
+  __ bind(&success);
+  __ LoadP(r4,
+           FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset));
+  // Calculate number of capture registers (number_of_captures + 1) * 2.
+  // SmiToShortArrayOffset accomplishes the multiplication by 2 and
+  // SmiUntag (which is a nop for 32-bit).
+  __ SmiToShortArrayOffset(r4, r4);
+  __ addi(r4, r4, Operand(2));
+
+  __ LoadP(r3, MemOperand(sp, kLastMatchInfoOffset));
+  __ JumpIfSmi(r3, &runtime);
+  __ CompareObjectType(r3, r5, r5, JS_ARRAY_TYPE);
+  __ bne(&runtime);
+  // Check that the JSArray is in fast case.
+  __ LoadP(last_match_info_elements,
+           FieldMemOperand(r3, JSArray::kElementsOffset));
+  __ LoadP(r3,
+           FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset));
+  __ CompareRoot(r3, Heap::kFixedArrayMapRootIndex);
+  __ bne(&runtime);
+  // Check that the last match info has space for the capture registers and the
+  // additional information.
+  __ LoadP(
+      r3, FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset));
+  __ addi(r5, r4, Operand(RegExpImpl::kLastMatchOverhead));
+  __ SmiUntag(r0, r3);
+  __ cmp(r5, r0);
+  __ bgt(&runtime);
+
+  // r4: number of capture registers
+  // subject: subject string
+  // Store the capture count.
+  __ SmiTag(r5, r4);
+  __ StoreP(r5, FieldMemOperand(last_match_info_elements,
+                                RegExpImpl::kLastCaptureCountOffset),
+            r0);
+  // Store last subject and last input.
+  __ StoreP(subject, FieldMemOperand(last_match_info_elements,
+                                     RegExpImpl::kLastSubjectOffset),
+            r0);
+  __ mr(r5, subject);
+  __ RecordWriteField(last_match_info_elements, RegExpImpl::kLastSubjectOffset,
+                      subject, r10, kLRHasNotBeenSaved, kDontSaveFPRegs);
+  __ mr(subject, r5);
+  __ StoreP(subject, FieldMemOperand(last_match_info_elements,
+                                     RegExpImpl::kLastInputOffset),
+            r0);
+  __ RecordWriteField(last_match_info_elements, RegExpImpl::kLastInputOffset,
+                      subject, r10, kLRHasNotBeenSaved, kDontSaveFPRegs);
+
+  // Get the static offsets vector filled by the native regexp code.
+  ExternalReference address_of_static_offsets_vector =
+      ExternalReference::address_of_static_offsets_vector(isolate());
+  __ mov(r5, Operand(address_of_static_offsets_vector));
+
+  // r4: number of capture registers
+  // r5: offsets vector
+  Label next_capture;
+  // Capture register counter starts from number of capture registers and
+  // counts down until wraping after zero.
+  __ addi(
+      r3, last_match_info_elements,
+      Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag - kPointerSize));
+  __ addi(r5, r5, Operand(-kIntSize));  // bias down for lwzu
+  __ mtctr(r4);
+  __ bind(&next_capture);
+  // Read the value from the static offsets vector buffer.
+  __ lwzu(r6, MemOperand(r5, kIntSize));
+  // Store the smi value in the last match info.
+  __ SmiTag(r6);
+  __ StorePU(r6, MemOperand(r3, kPointerSize));
+  __ bdnz(&next_capture);
+
+  // Return last match info.
+  __ LoadP(r3, MemOperand(sp, kLastMatchInfoOffset));
+  __ addi(sp, sp, Operand(4 * kPointerSize));
+  __ Ret();
+
+  // Do the runtime call to execute the regexp.
+  __ bind(&runtime);
+  __ TailCallRuntime(Runtime::kRegExpExecRT, 4, 1);
+
+  // Deferred code for string handling.
+  // (6) Not a long external string?  If yes, go to (8).
+  __ bind(&not_seq_nor_cons);
+  // Compare flags are still set.
+  __ bgt(&not_long_external);  // Go to (8).
+
+  // (7) External string.  Make it, offset-wise, look like a sequential string.
+  __ bind(&external_string);
+  __ LoadP(r3, FieldMemOperand(subject, HeapObject::kMapOffset));
+  __ lbz(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset));
+  if (FLAG_debug_code) {
+    // Assert that we do not have a cons or slice (indirect strings) here.
+    // Sequential strings have already been ruled out.
+    STATIC_ASSERT(kIsIndirectStringMask == 1);
+    __ andi(r0, r3, Operand(kIsIndirectStringMask));
+    __ Assert(eq, kExternalStringExpectedButNotFound, cr0);
+  }
+  __ LoadP(subject,
+           FieldMemOperand(subject, ExternalString::kResourceDataOffset));
+  // Move the pointer so that offset-wise, it looks like a sequential string.
+  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
+  __ subi(subject, subject,
+          Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+  __ b(&seq_string);  // Go to (5).
+
+  // (8) Short external string or not a string?  If yes, bail out to runtime.
+  __ bind(&not_long_external);
+  STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag != 0);
+  __ andi(r0, r4, Operand(kIsNotStringMask | kShortExternalStringMask));
+  __ bne(&runtime, cr0);
+
+  // (9) Sliced string.  Replace subject with parent.  Go to (4).
+  // Load offset into r11 and replace subject string with parent.
+  __ LoadP(r11, FieldMemOperand(subject, SlicedString::kOffsetOffset));
+  __ SmiUntag(r11);
+  __ LoadP(subject, FieldMemOperand(subject, SlicedString::kParentOffset));
+  __ b(&check_underlying);  // Go to (4).
+#endif  // V8_INTERPRETED_REGEXP
+}
+
+
+static void GenerateRecordCallTarget(MacroAssembler* masm) {
+  // Cache the called function in a feedback vector slot.  Cache states
+  // are uninitialized, monomorphic (indicated by a JSFunction), and
+  // megamorphic.
+  // r3 : number of arguments to the construct function
+  // r4 : the function to call
+  // r5 : Feedback vector
+  // r6 : slot in feedback vector (Smi)
+  Label initialize, done, miss, megamorphic, not_array_function;
+
+  DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()),
+            masm->isolate()->heap()->megamorphic_symbol());
+  DCHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(masm->isolate()),
+            masm->isolate()->heap()->uninitialized_symbol());
+
+  // Load the cache state into r7.
+  __ SmiToPtrArrayOffset(r7, r6);
+  __ add(r7, r5, r7);
+  __ LoadP(r7, FieldMemOperand(r7, FixedArray::kHeaderSize));
+
+  // A monomorphic cache hit or an already megamorphic state: invoke the
+  // function without changing the state.
+  __ cmp(r7, r4);
+  __ b(eq, &done);
+
+  if (!FLAG_pretenuring_call_new) {
+    // If we came here, we need to see if we are the array function.
+    // If we didn't have a matching function, and we didn't find the megamorph
+    // sentinel, then we have in the slot either some other function or an
+    // AllocationSite. Do a map check on the object in ecx.
+    __ LoadP(r8, FieldMemOperand(r7, 0));
+    __ CompareRoot(r8, Heap::kAllocationSiteMapRootIndex);
+    __ bne(&miss);
+
+    // Make sure the function is the Array() function
+    __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r7);
+    __ cmp(r4, r7);
+    __ bne(&megamorphic);
+    __ b(&done);
+  }
+
+  __ bind(&miss);
+
+  // A monomorphic miss (i.e, here the cache is not uninitialized) goes
+  // megamorphic.
+  __ CompareRoot(r7, Heap::kuninitialized_symbolRootIndex);
+  __ beq(&initialize);
+  // MegamorphicSentinel is an immortal immovable object (undefined) so no
+  // write-barrier is needed.
+  __ bind(&megamorphic);
+  __ SmiToPtrArrayOffset(r7, r6);
+  __ add(r7, r5, r7);
+  __ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
+  __ StoreP(ip, FieldMemOperand(r7, FixedArray::kHeaderSize), r0);
+  __ jmp(&done);
+
+  // An uninitialized cache is patched with the function
+  __ bind(&initialize);
+
+  if (!FLAG_pretenuring_call_new) {
+    // Make sure the function is the Array() function.
+    __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r7);
+    __ cmp(r4, r7);
+    __ bne(&not_array_function);
+
+    // The target function is the Array constructor,
+    // Create an AllocationSite if we don't already have it, store it in the
+    // slot.
+    {
+      FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+
+      // Arguments register must be smi-tagged to call out.
+      __ SmiTag(r3);
+      __ Push(r6, r5, r4, r3);
+
+      CreateAllocationSiteStub create_stub(masm->isolate());
+      __ CallStub(&create_stub);
+
+      __ Pop(r6, r5, r4, r3);
+      __ SmiUntag(r3);
+    }
+    __ b(&done);
+
+    __ bind(&not_array_function);
+  }
+
+  __ SmiToPtrArrayOffset(r7, r6);
+  __ add(r7, r5, r7);
+  __ addi(r7, r7, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ StoreP(r4, MemOperand(r7, 0));
+
+  __ Push(r7, r5, r4);
+  __ RecordWrite(r5, r7, r4, kLRHasNotBeenSaved, kDontSaveFPRegs,
+                 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+  __ Pop(r7, r5, r4);
+
+  __ bind(&done);
+}
+
+
+static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) {
+  // Do not transform the receiver for strict mode functions and natives.
+  __ LoadP(r6, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+  __ lwz(r7, FieldMemOperand(r6, SharedFunctionInfo::kCompilerHintsOffset));
+  __ TestBit(r7,
+#if V8_TARGET_ARCH_PPC64
+             SharedFunctionInfo::kStrictModeFunction,
+#else
+             SharedFunctionInfo::kStrictModeFunction + kSmiTagSize,
+#endif
+             r0);
+  __ bne(cont, cr0);
+
+  // Do not transform the receiver for native.
+  __ TestBit(r7,
+#if V8_TARGET_ARCH_PPC64
+             SharedFunctionInfo::kNative,
+#else
+             SharedFunctionInfo::kNative + kSmiTagSize,
+#endif
+             r0);
+  __ bne(cont, cr0);
+}
+
+
+static void EmitSlowCase(MacroAssembler* masm, int argc, Label* non_function) {
+  // Check for function proxy.
+  STATIC_ASSERT(JS_FUNCTION_PROXY_TYPE < 0xffffu);
+  __ cmpi(r7, Operand(JS_FUNCTION_PROXY_TYPE));
+  __ bne(non_function);
+  __ push(r4);  // put proxy as additional argument
+  __ li(r3, Operand(argc + 1));
+  __ li(r5, Operand::Zero());
+  __ GetBuiltinFunction(r4, Builtins::CALL_FUNCTION_PROXY);
+  {
+    Handle<Code> adaptor =
+        masm->isolate()->builtins()->ArgumentsAdaptorTrampoline();
+    __ Jump(adaptor, RelocInfo::CODE_TARGET);
+  }
+
+  // CALL_NON_FUNCTION expects the non-function callee as receiver (instead
+  // of the original receiver from the call site).
+  __ bind(non_function);
+  __ StoreP(r4, MemOperand(sp, argc * kPointerSize), r0);
+  __ li(r3, Operand(argc));  // Set up the number of arguments.
+  __ li(r5, Operand::Zero());
+  __ GetBuiltinFunction(r4, Builtins::CALL_NON_FUNCTION);
+  __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+          RelocInfo::CODE_TARGET);
+}
+
+
+static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) {
+  // Wrap the receiver and patch it back onto the stack.
+  {
+    FrameAndConstantPoolScope frame_scope(masm, StackFrame::INTERNAL);
+    __ Push(r4, r6);
+    __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+    __ pop(r4);
+  }
+  __ StoreP(r3, MemOperand(sp, argc * kPointerSize), r0);
+  __ b(cont);
+}
+
+
+static void CallFunctionNoFeedback(MacroAssembler* masm, int argc,
+                                   bool needs_checks, bool call_as_method) {
+  // r4 : the function to call
+  Label slow, non_function, wrap, cont;
+
+  if (needs_checks) {
+    // Check that the function is really a JavaScript function.
+    // r4: pushed function (to be verified)
+    __ JumpIfSmi(r4, &non_function);
+
+    // Goto slow case if we do not have a function.
+    __ CompareObjectType(r4, r7, r7, JS_FUNCTION_TYPE);
+    __ bne(&slow);
+  }
+
+  // Fast-case: Invoke the function now.
+  // r4: pushed function
+  ParameterCount actual(argc);
+
+  if (call_as_method) {
+    if (needs_checks) {
+      EmitContinueIfStrictOrNative(masm, &cont);
+    }
+
+    // Compute the receiver in sloppy mode.
+    __ LoadP(r6, MemOperand(sp, argc * kPointerSize), r0);
+
+    if (needs_checks) {
+      __ JumpIfSmi(r6, &wrap);
+      __ CompareObjectType(r6, r7, r7, FIRST_SPEC_OBJECT_TYPE);
+      __ blt(&wrap);
+    } else {
+      __ b(&wrap);
+    }
+
+    __ bind(&cont);
+  }
+
+  __ InvokeFunction(r4, actual, JUMP_FUNCTION, NullCallWrapper());
+
+  if (needs_checks) {
+    // Slow-case: Non-function called.
+    __ bind(&slow);
+    EmitSlowCase(masm, argc, &non_function);
+  }
+
+  if (call_as_method) {
+    __ bind(&wrap);
+    EmitWrapCase(masm, argc, &cont);
+  }
+}
+
+
+void CallFunctionStub::Generate(MacroAssembler* masm) {
+  CallFunctionNoFeedback(masm, argc(), NeedsChecks(), CallAsMethod());
+}
+
+
+void CallConstructStub::Generate(MacroAssembler* masm) {
+  // r3 : number of arguments
+  // r4 : the function to call
+  // r5 : feedback vector
+  // r6 : (only if r5 is not the megamorphic symbol) slot in feedback
+  //      vector (Smi)
+  Label slow, non_function_call;
+
+  // Check that the function is not a smi.
+  __ JumpIfSmi(r4, &non_function_call);
+  // Check that the function is a JSFunction.
+  __ CompareObjectType(r4, r7, r7, JS_FUNCTION_TYPE);
+  __ bne(&slow);
+
+  if (RecordCallTarget()) {
+    GenerateRecordCallTarget(masm);
+
+    __ SmiToPtrArrayOffset(r8, r6);
+    __ add(r8, r5, r8);
+    if (FLAG_pretenuring_call_new) {
+      // Put the AllocationSite from the feedback vector into r5.
+      // By adding kPointerSize we encode that we know the AllocationSite
+      // entry is at the feedback vector slot given by r6 + 1.
+      __ LoadP(r5, FieldMemOperand(r8, FixedArray::kHeaderSize + kPointerSize));
+    } else {
+      Label feedback_register_initialized;
+      // Put the AllocationSite from the feedback vector into r5, or undefined.
+      __ LoadP(r5, FieldMemOperand(r8, FixedArray::kHeaderSize));
+      __ LoadP(r8, FieldMemOperand(r5, AllocationSite::kMapOffset));
+      __ CompareRoot(r8, Heap::kAllocationSiteMapRootIndex);
+      __ beq(&feedback_register_initialized);
+      __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+      __ bind(&feedback_register_initialized);
+    }
+
+    __ AssertUndefinedOrAllocationSite(r5, r8);
+  }
+
+  // Jump to the function-specific construct stub.
+  Register jmp_reg = r7;
+  __ LoadP(jmp_reg, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+  __ LoadP(jmp_reg,
+           FieldMemOperand(jmp_reg, SharedFunctionInfo::kConstructStubOffset));
+  __ addi(ip, jmp_reg, Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ JumpToJSEntry(ip);
+
+  // r3: number of arguments
+  // r4: called object
+  // r7: object type
+  Label do_call;
+  __ bind(&slow);
+  STATIC_ASSERT(JS_FUNCTION_PROXY_TYPE < 0xffffu);
+  __ cmpi(r7, Operand(JS_FUNCTION_PROXY_TYPE));
+  __ bne(&non_function_call);
+  __ GetBuiltinFunction(r4, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR);
+  __ b(&do_call);
+
+  __ bind(&non_function_call);
+  __ GetBuiltinFunction(r4, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
+  __ bind(&do_call);
+  // Set expected number of arguments to zero (not changing r3).
+  __ li(r5, Operand::Zero());
+  __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
+          RelocInfo::CODE_TARGET);
+}
+
+
+static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) {
+  __ LoadP(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  __ LoadP(vector,
+           FieldMemOperand(vector, JSFunction::kSharedFunctionInfoOffset));
+  __ LoadP(vector,
+           FieldMemOperand(vector, SharedFunctionInfo::kFeedbackVectorOffset));
+}
+
+
+void CallIC_ArrayStub::Generate(MacroAssembler* masm) {
+  // r4 - function
+  // r6 - slot id
+  Label miss;
+  int argc = arg_count();
+  ParameterCount actual(argc);
+
+  EmitLoadTypeFeedbackVector(masm, r5);
+
+  __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r7);
+  __ cmp(r4, r7);
+  __ bne(&miss);
+
+  __ mov(r3, Operand(arg_count()));
+  __ SmiToPtrArrayOffset(r7, r6);
+  __ add(r7, r5, r7);
+  __ LoadP(r7, FieldMemOperand(r7, FixedArray::kHeaderSize));
+
+  // Verify that r7 contains an AllocationSite
+  __ LoadP(r8, FieldMemOperand(r7, HeapObject::kMapOffset));
+  __ CompareRoot(r8, Heap::kAllocationSiteMapRootIndex);
+  __ bne(&miss);
+
+  __ mr(r5, r7);
+  ArrayConstructorStub stub(masm->isolate(), arg_count());
+  __ TailCallStub(&stub);
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+
+  // The slow case, we need this no matter what to complete a call after a miss.
+  CallFunctionNoFeedback(masm, arg_count(), true, CallAsMethod());
+
+  // Unreachable.
+  __ stop("Unexpected code address");
+}
+
+
+void CallICStub::Generate(MacroAssembler* masm) {
+  // r4 - function
+  // r6 - slot id (Smi)
+  Label extra_checks_or_miss, slow_start;
+  Label slow, non_function, wrap, cont;
+  Label have_js_function;
+  int argc = arg_count();
+  ParameterCount actual(argc);
+
+  EmitLoadTypeFeedbackVector(masm, r5);
+
+  // The checks. First, does r4 match the recorded monomorphic target?
+  __ SmiToPtrArrayOffset(r7, r6);
+  __ add(r7, r5, r7);
+  __ LoadP(r7, FieldMemOperand(r7, FixedArray::kHeaderSize));
+  __ cmp(r4, r7);
+  __ bne(&extra_checks_or_miss);
+
+  __ bind(&have_js_function);
+  if (CallAsMethod()) {
+    EmitContinueIfStrictOrNative(masm, &cont);
+    // Compute the receiver in sloppy mode.
+    __ LoadP(r6, MemOperand(sp, argc * kPointerSize), r0);
+
+    __ JumpIfSmi(r6, &wrap);
+    __ CompareObjectType(r6, r7, r7, FIRST_SPEC_OBJECT_TYPE);
+    __ blt(&wrap);
+
+    __ bind(&cont);
+  }
+
+  __ InvokeFunction(r4, actual, JUMP_FUNCTION, NullCallWrapper());
+
+  __ bind(&slow);
+  EmitSlowCase(masm, argc, &non_function);
+
+  if (CallAsMethod()) {
+    __ bind(&wrap);
+    EmitWrapCase(masm, argc, &cont);
+  }
+
+  __ bind(&extra_checks_or_miss);
+  Label miss;
+
+  __ CompareRoot(r7, Heap::kmegamorphic_symbolRootIndex);
+  __ beq(&slow_start);
+  __ CompareRoot(r7, Heap::kuninitialized_symbolRootIndex);
+  __ beq(&miss);
+
+  if (!FLAG_trace_ic) {
+    // We are going megamorphic. If the feedback is a JSFunction, it is fine
+    // to handle it here. More complex cases are dealt with in the runtime.
+    __ AssertNotSmi(r7);
+    __ CompareObjectType(r7, r8, r8, JS_FUNCTION_TYPE);
+    __ bne(&miss);
+    __ SmiToPtrArrayOffset(r7, r6);
+    __ add(r7, r5, r7);
+    __ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex);
+    __ StoreP(ip, FieldMemOperand(r7, FixedArray::kHeaderSize), r0);
+    // We have to update statistics for runtime profiling.
+    const int with_types_offset =
+        FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
+    __ LoadP(r7, FieldMemOperand(r5, with_types_offset));
+    __ SubSmiLiteral(r7, r7, Smi::FromInt(1), r0);
+    __ StoreP(r7, FieldMemOperand(r5, with_types_offset), r0);
+    const int generic_offset =
+        FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
+    __ LoadP(r7, FieldMemOperand(r5, generic_offset));
+    __ AddSmiLiteral(r7, r7, Smi::FromInt(1), r0);
+    __ StoreP(r7, FieldMemOperand(r5, generic_offset), r0);
+    __ jmp(&slow_start);
+  }
+
+  // We are here because tracing is on or we are going monomorphic.
+  __ bind(&miss);
+  GenerateMiss(masm);
+
+  // the slow case
+  __ bind(&slow_start);
+  // Check that the function is really a JavaScript function.
+  // r4: pushed function (to be verified)
+  __ JumpIfSmi(r4, &non_function);
+
+  // Goto slow case if we do not have a function.
+  __ CompareObjectType(r4, r7, r7, JS_FUNCTION_TYPE);
+  __ bne(&slow);
+  __ b(&have_js_function);
+}
+
+
+void CallICStub::GenerateMiss(MacroAssembler* masm) {
+  // Get the receiver of the function from the stack; 1 ~ return address.
+  __ LoadP(r7, MemOperand(sp, (arg_count() + 1) * kPointerSize), r0);
+
+  {
+    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+
+    // Push the receiver and the function and feedback info.
+    __ Push(r7, r4, r5, r6);
+
+    // Call the entry.
+    IC::UtilityId id = GetICState() == DEFAULT ? IC::kCallIC_Miss
+                                               : IC::kCallIC_Customization_Miss;
+
+    ExternalReference miss = ExternalReference(IC_Utility(id), masm->isolate());
+    __ CallExternalReference(miss, 4);
+
+    // Move result to r4 and exit the internal frame.
+    __ mr(r4, r3);
+  }
+}
+
+
+// StringCharCodeAtGenerator
+void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
+  // If the receiver is a smi trigger the non-string case.
+  if (check_mode_ == RECEIVER_IS_UNKNOWN) {
+    __ JumpIfSmi(object_, receiver_not_string_);
+
+    // Fetch the instance type of the receiver into result register.
+    __ LoadP(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
+    __ lbz(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
+    // If the receiver is not a string trigger the non-string case.
+    __ andi(r0, result_, Operand(kIsNotStringMask));
+    __ bne(receiver_not_string_, cr0);
+  }
+
+  // If the index is non-smi trigger the non-smi case.
+  __ JumpIfNotSmi(index_, &index_not_smi_);
+  __ bind(&got_smi_index_);
+
+  // Check for index out of range.
+  __ LoadP(ip, FieldMemOperand(object_, String::kLengthOffset));
+  __ cmpl(ip, index_);
+  __ ble(index_out_of_range_);
+
+  __ SmiUntag(index_);
+
+  StringCharLoadGenerator::Generate(masm, object_, index_, result_,
+                                    &call_runtime_);
+
+  __ SmiTag(result_);
+  __ bind(&exit_);
+}
+
+
+void StringCharCodeAtGenerator::GenerateSlow(
+    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+  __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase);
+
+  // Index is not a smi.
+  __ bind(&index_not_smi_);
+  // If index is a heap number, try converting it to an integer.
+  __ CheckMap(index_, result_, Heap::kHeapNumberMapRootIndex, index_not_number_,
+              DONT_DO_SMI_CHECK);
+  call_helper.BeforeCall(masm);
+  __ push(object_);
+  __ push(index_);  // Consumed by runtime conversion function.
+  if (index_flags_ == STRING_INDEX_IS_NUMBER) {
+    __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1);
+  } else {
+    DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX);
+    // NumberToSmi discards numbers that are not exact integers.
+    __ CallRuntime(Runtime::kNumberToSmi, 1);
+  }
+  // Save the conversion result before the pop instructions below
+  // have a chance to overwrite it.
+  __ Move(index_, r3);
+  __ pop(object_);
+  // Reload the instance type.
+  __ LoadP(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
+  __ lbz(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
+  call_helper.AfterCall(masm);
+  // If index is still not a smi, it must be out of range.
+  __ JumpIfNotSmi(index_, index_out_of_range_);
+  // Otherwise, return to the fast path.
+  __ b(&got_smi_index_);
+
+  // Call runtime. We get here when the receiver is a string and the
+  // index is a number, but the code of getting the actual character
+  // is too complex (e.g., when the string needs to be flattened).
+  __ bind(&call_runtime_);
+  call_helper.BeforeCall(masm);
+  __ SmiTag(index_);
+  __ Push(object_, index_);
+  __ CallRuntime(Runtime::kStringCharCodeAtRT, 2);
+  __ Move(result_, r3);
+  call_helper.AfterCall(masm);
+  __ b(&exit_);
+
+  __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase);
+}
+
+
+// -------------------------------------------------------------------------
+// StringCharFromCodeGenerator
+
+void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
+  // Fast case of Heap::LookupSingleCharacterStringFromCode.
+  DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCode + 1));
+  __ LoadSmiLiteral(r0, Smi::FromInt(~String::kMaxOneByteCharCode));
+  __ ori(r0, r0, Operand(kSmiTagMask));
+  __ and_(r0, code_, r0);
+  __ cmpi(r0, Operand::Zero());
+  __ bne(&slow_case_);
+
+  __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
+  // At this point code register contains smi tagged one-byte char code.
+  __ mr(r0, code_);
+  __ SmiToPtrArrayOffset(code_, code_);
+  __ add(result_, result_, code_);
+  __ mr(code_, r0);
+  __ LoadP(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
+  __ CompareRoot(result_, Heap::kUndefinedValueRootIndex);
+  __ beq(&slow_case_);
+  __ bind(&exit_);
+}
+
+
+void StringCharFromCodeGenerator::GenerateSlow(
+    MacroAssembler* masm, const RuntimeCallHelper& call_helper) {
+  __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase);
+
+  __ bind(&slow_case_);
+  call_helper.BeforeCall(masm);
+  __ push(code_);
+  __ CallRuntime(Runtime::kCharFromCode, 1);
+  __ Move(result_, r3);
+  call_helper.AfterCall(masm);
+  __ b(&exit_);
+
+  __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase);
+}
+
+
+enum CopyCharactersFlags { COPY_ONE_BYTE = 1, DEST_ALWAYS_ALIGNED = 2 };
+
+
+void StringHelper::GenerateCopyCharacters(MacroAssembler* masm, Register dest,
+                                          Register src, Register count,
+                                          Register scratch,
+                                          String::Encoding encoding) {
+  if (FLAG_debug_code) {
+    // Check that destination is word aligned.
+    __ andi(r0, dest, Operand(kPointerAlignmentMask));
+    __ Check(eq, kDestinationOfCopyNotAligned, cr0);
+  }
+
+  // Nothing to do for zero characters.
+  Label done;
+  if (encoding == String::TWO_BYTE_ENCODING) {
+    // double the length
+    __ add(count, count, count, LeaveOE, SetRC);
+    __ beq(&done, cr0);
+  } else {
+    __ cmpi(count, Operand::Zero());
+    __ beq(&done);
+  }
+
+  // Copy count bytes from src to dst.
+  Label byte_loop;
+  __ mtctr(count);
+  __ bind(&byte_loop);
+  __ lbz(scratch, MemOperand(src));
+  __ addi(src, src, Operand(1));
+  __ stb(scratch, MemOperand(dest));
+  __ addi(dest, dest, Operand(1));
+  __ bdnz(&byte_loop);
+
+  __ bind(&done);
+}
+
+
+void SubStringStub::Generate(MacroAssembler* masm) {
+  Label runtime;
+
+  // Stack frame on entry.
+  //  lr: return address
+  //  sp[0]: to
+  //  sp[4]: from
+  //  sp[8]: string
+
+  // This stub is called from the native-call %_SubString(...), so
+  // nothing can be assumed about the arguments. It is tested that:
+  //  "string" is a sequential string,
+  //  both "from" and "to" are smis, and
+  //  0 <= from <= to <= string.length.
+  // If any of these assumptions fail, we call the runtime system.
+
+  const int kToOffset = 0 * kPointerSize;
+  const int kFromOffset = 1 * kPointerSize;
+  const int kStringOffset = 2 * kPointerSize;
+
+  __ LoadP(r5, MemOperand(sp, kToOffset));
+  __ LoadP(r6, MemOperand(sp, kFromOffset));
+
+  // If either to or from had the smi tag bit set, then fail to generic runtime
+  __ JumpIfNotSmi(r5, &runtime);
+  __ JumpIfNotSmi(r6, &runtime);
+  __ SmiUntag(r5);
+  __ SmiUntag(r6, SetRC);
+  // Both r5 and r6 are untagged integers.
+
+  // We want to bailout to runtime here if From is negative.
+  __ blt(&runtime, cr0);  // From < 0.
+
+  __ cmpl(r6, r5);
+  __ bgt(&runtime);  // Fail if from > to.
+  __ sub(r5, r5, r6);
+
+  // Make sure first argument is a string.
+  __ LoadP(r3, MemOperand(sp, kStringOffset));
+  __ JumpIfSmi(r3, &runtime);
+  Condition is_string = masm->IsObjectStringType(r3, r4);
+  __ b(NegateCondition(is_string), &runtime, cr0);
+
+  Label single_char;
+  __ cmpi(r5, Operand(1));
+  __ b(eq, &single_char);
+
+  // Short-cut for the case of trivial substring.
+  Label return_r3;
+  // r3: original string
+  // r5: result string length
+  __ LoadP(r7, FieldMemOperand(r3, String::kLengthOffset));
+  __ SmiUntag(r0, r7);
+  __ cmpl(r5, r0);
+  // Return original string.
+  __ beq(&return_r3);
+  // Longer than original string's length or negative: unsafe arguments.
+  __ bgt(&runtime);
+  // Shorter than original string's length: an actual substring.
+
+  // Deal with different string types: update the index if necessary
+  // and put the underlying string into r8.
+  // r3: original string
+  // r4: instance type
+  // r5: length
+  // r6: from index (untagged)
+  Label underlying_unpacked, sliced_string, seq_or_external_string;
+  // If the string is not indirect, it can only be sequential or external.
+  STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag));
+  STATIC_ASSERT(kIsIndirectStringMask != 0);
+  __ andi(r0, r4, Operand(kIsIndirectStringMask));
+  __ beq(&seq_or_external_string, cr0);
+
+  __ andi(r0, r4, Operand(kSlicedNotConsMask));
+  __ bne(&sliced_string, cr0);
+  // Cons string.  Check whether it is flat, then fetch first part.
+  __ LoadP(r8, FieldMemOperand(r3, ConsString::kSecondOffset));
+  __ CompareRoot(r8, Heap::kempty_stringRootIndex);
+  __ bne(&runtime);
+  __ LoadP(r8, FieldMemOperand(r3, ConsString::kFirstOffset));
+  // Update instance type.
+  __ LoadP(r4, FieldMemOperand(r8, HeapObject::kMapOffset));
+  __ lbz(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
+  __ b(&underlying_unpacked);
+
+  __ bind(&sliced_string);
+  // Sliced string.  Fetch parent and correct start index by offset.
+  __ LoadP(r8, FieldMemOperand(r3, SlicedString::kParentOffset));
+  __ LoadP(r7, FieldMemOperand(r3, SlicedString::kOffsetOffset));
+  __ SmiUntag(r4, r7);
+  __ add(r6, r6, r4);  // Add offset to index.
+  // Update instance type.
+  __ LoadP(r4, FieldMemOperand(r8, HeapObject::kMapOffset));
+  __ lbz(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset));
+  __ b(&underlying_unpacked);
+
+  __ bind(&seq_or_external_string);
+  // Sequential or external string.  Just move string to the expected register.
+  __ mr(r8, r3);
+
+  __ bind(&underlying_unpacked);
+
+  if (FLAG_string_slices) {
+    Label copy_routine;
+    // r8: underlying subject string
+    // r4: instance type of underlying subject string
+    // r5: length
+    // r6: adjusted start index (untagged)
+    __ cmpi(r5, Operand(SlicedString::kMinLength));
+    // Short slice.  Copy instead of slicing.
+    __ blt(&copy_routine);
+    // Allocate new sliced string.  At this point we do not reload the instance
+    // type including the string encoding because we simply rely on the info
+    // provided by the original string.  It does not matter if the original
+    // string's encoding is wrong because we always have to recheck encoding of
+    // the newly created string's parent anyways due to externalized strings.
+    Label two_byte_slice, set_slice_header;
+    STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0);
+    STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0);
+    __ andi(r0, r4, Operand(kStringEncodingMask));
+    __ beq(&two_byte_slice, cr0);
+    __ AllocateOneByteSlicedString(r3, r5, r9, r10, &runtime);
+    __ b(&set_slice_header);
+    __ bind(&two_byte_slice);
+    __ AllocateTwoByteSlicedString(r3, r5, r9, r10, &runtime);
+    __ bind(&set_slice_header);
+    __ SmiTag(r6);
+    __ StoreP(r8, FieldMemOperand(r3, SlicedString::kParentOffset), r0);
+    __ StoreP(r6, FieldMemOperand(r3, SlicedString::kOffsetOffset), r0);
+    __ b(&return_r3);
+
+    __ bind(&copy_routine);
+  }
+
+  // r8: underlying subject string
+  // r4: instance type of underlying subject string
+  // r5: length
+  // r6: adjusted start index (untagged)
+  Label two_byte_sequential, sequential_string, allocate_result;
+  STATIC_ASSERT(kExternalStringTag != 0);
+  STATIC_ASSERT(kSeqStringTag == 0);
+  __ andi(r0, r4, Operand(kExternalStringTag));
+  __ beq(&sequential_string, cr0);
+
+  // Handle external string.
+  // Rule out short external strings.
+  STATIC_ASSERT(kShortExternalStringTag != 0);
+  __ andi(r0, r4, Operand(kShortExternalStringTag));
+  __ bne(&runtime, cr0);
+  __ LoadP(r8, FieldMemOperand(r8, ExternalString::kResourceDataOffset));
+  // r8 already points to the first character of underlying string.
+  __ b(&allocate_result);
+
+  __ bind(&sequential_string);
+  // Locate first character of underlying subject string.
+  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
+  __ addi(r8, r8, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+
+  __ bind(&allocate_result);
+  // Sequential acii string.  Allocate the result.
+  STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0);
+  __ andi(r0, r4, Operand(kStringEncodingMask));
+  __ beq(&two_byte_sequential, cr0);
+
+  // Allocate and copy the resulting one-byte string.
+  __ AllocateOneByteString(r3, r5, r7, r9, r10, &runtime);
+
+  // Locate first character of substring to copy.
+  __ add(r8, r8, r6);
+  // Locate first character of result.
+  __ addi(r4, r3, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+
+  // r3: result string
+  // r4: first character of result string
+  // r5: result string length
+  // r8: first character of substring to copy
+  STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+  StringHelper::GenerateCopyCharacters(masm, r4, r8, r5, r6,
+                                       String::ONE_BYTE_ENCODING);
+  __ b(&return_r3);
+
+  // Allocate and copy the resulting two-byte string.
+  __ bind(&two_byte_sequential);
+  __ AllocateTwoByteString(r3, r5, r7, r9, r10, &runtime);
+
+  // Locate first character of substring to copy.
+  __ ShiftLeftImm(r4, r6, Operand(1));
+  __ add(r8, r8, r4);
+  // Locate first character of result.
+  __ addi(r4, r3, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+
+  // r3: result string.
+  // r4: first character of result.
+  // r5: result length.
+  // r8: first character of substring to copy.
+  STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+  StringHelper::GenerateCopyCharacters(masm, r4, r8, r5, r6,
+                                       String::TWO_BYTE_ENCODING);
+
+  __ bind(&return_r3);
+  Counters* counters = isolate()->counters();
+  __ IncrementCounter(counters->sub_string_native(), 1, r6, r7);
+  __ Drop(3);
+  __ Ret();
+
+  // Just jump to runtime to create the sub string.
+  __ bind(&runtime);
+  __ TailCallRuntime(Runtime::kSubString, 3, 1);
+
+  __ bind(&single_char);
+  // r3: original string
+  // r4: instance type
+  // r5: length
+  // r6: from index (untagged)
+  __ SmiTag(r6, r6);
+  StringCharAtGenerator generator(r3, r6, r5, r3, &runtime, &runtime, &runtime,
+                                  STRING_INDEX_IS_NUMBER, RECEIVER_IS_STRING);
+  generator.GenerateFast(masm);
+  __ Drop(3);
+  __ Ret();
+  generator.SkipSlow(masm, &runtime);
+}
+
+
+void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm,
+                                                   Register left,
+                                                   Register right,
+                                                   Register scratch1,
+                                                   Register scratch2) {
+  Register length = scratch1;
+
+  // Compare lengths.
+  Label strings_not_equal, check_zero_length;
+  __ LoadP(length, FieldMemOperand(left, String::kLengthOffset));
+  __ LoadP(scratch2, FieldMemOperand(right, String::kLengthOffset));
+  __ cmp(length, scratch2);
+  __ beq(&check_zero_length);
+  __ bind(&strings_not_equal);
+  __ LoadSmiLiteral(r3, Smi::FromInt(NOT_EQUAL));
+  __ Ret();
+
+  // Check if the length is zero.
+  Label compare_chars;
+  __ bind(&check_zero_length);
+  STATIC_ASSERT(kSmiTag == 0);
+  __ cmpi(length, Operand::Zero());
+  __ bne(&compare_chars);
+  __ LoadSmiLiteral(r3, Smi::FromInt(EQUAL));
+  __ Ret();
+
+  // Compare characters.
+  __ bind(&compare_chars);
+  GenerateOneByteCharsCompareLoop(masm, left, right, length, scratch2,
+                                  &strings_not_equal);
+
+  // Characters are equal.
+  __ LoadSmiLiteral(r3, Smi::FromInt(EQUAL));
+  __ Ret();
+}
+
+
+void StringHelper::GenerateCompareFlatOneByteStrings(
+    MacroAssembler* masm, Register left, Register right, Register scratch1,
+    Register scratch2, Register scratch3) {
+  Label skip, result_not_equal, compare_lengths;
+  // Find minimum length and length difference.
+  __ LoadP(scratch1, FieldMemOperand(left, String::kLengthOffset));
+  __ LoadP(scratch2, FieldMemOperand(right, String::kLengthOffset));
+  __ sub(scratch3, scratch1, scratch2, LeaveOE, SetRC);
+  Register length_delta = scratch3;
+  __ ble(&skip, cr0);
+  __ mr(scratch1, scratch2);
+  __ bind(&skip);
+  Register min_length = scratch1;
+  STATIC_ASSERT(kSmiTag == 0);
+  __ cmpi(min_length, Operand::Zero());
+  __ beq(&compare_lengths);
+
+  // Compare loop.
+  GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2,
+                                  &result_not_equal);
+
+  // Compare lengths - strings up to min-length are equal.
+  __ bind(&compare_lengths);
+  DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0));
+  // Use length_delta as result if it's zero.
+  __ mr(r3, length_delta);
+  __ cmpi(r3, Operand::Zero());
+  __ bind(&result_not_equal);
+  // Conditionally update the result based either on length_delta or
+  // the last comparion performed in the loop above.
+  Label less_equal, equal;
+  __ ble(&less_equal);
+  __ LoadSmiLiteral(r3, Smi::FromInt(GREATER));
+  __ Ret();
+  __ bind(&less_equal);
+  __ beq(&equal);
+  __ LoadSmiLiteral(r3, Smi::FromInt(LESS));
+  __ bind(&equal);
+  __ Ret();
+}
+
+
+void StringHelper::GenerateOneByteCharsCompareLoop(
+    MacroAssembler* masm, Register left, Register right, Register length,
+    Register scratch1, Label* chars_not_equal) {
+  // Change index to run from -length to -1 by adding length to string
+  // start. This means that loop ends when index reaches zero, which
+  // doesn't need an additional compare.
+  __ SmiUntag(length);
+  __ addi(scratch1, length,
+          Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+  __ add(left, left, scratch1);
+  __ add(right, right, scratch1);
+  __ subfic(length, length, Operand::Zero());
+  Register index = length;  // index = -length;
+
+  // Compare loop.
+  Label loop;
+  __ bind(&loop);
+  __ lbzx(scratch1, MemOperand(left, index));
+  __ lbzx(r0, MemOperand(right, index));
+  __ cmp(scratch1, r0);
+  __ bne(chars_not_equal);
+  __ addi(index, index, Operand(1));
+  __ cmpi(index, Operand::Zero());
+  __ bne(&loop);
+}
+
+
+void StringCompareStub::Generate(MacroAssembler* masm) {
+  Label runtime;
+
+  Counters* counters = isolate()->counters();
+
+  // Stack frame on entry.
+  //  sp[0]: right string
+  //  sp[4]: left string
+  __ LoadP(r3, MemOperand(sp));  // Load right in r3, left in r4.
+  __ LoadP(r4, MemOperand(sp, kPointerSize));
+
+  Label not_same;
+  __ cmp(r3, r4);
+  __ bne(&not_same);
+  STATIC_ASSERT(EQUAL == 0);
+  STATIC_ASSERT(kSmiTag == 0);
+  __ LoadSmiLiteral(r3, Smi::FromInt(EQUAL));
+  __ IncrementCounter(counters->string_compare_native(), 1, r4, r5);
+  __ addi(sp, sp, Operand(2 * kPointerSize));
+  __ Ret();
+
+  __ bind(&not_same);
+
+  // Check that both objects are sequential one-byte strings.
+  __ JumpIfNotBothSequentialOneByteStrings(r4, r3, r5, r6, &runtime);
+
+  // Compare flat one-byte strings natively. Remove arguments from stack first.
+  __ IncrementCounter(counters->string_compare_native(), 1, r5, r6);
+  __ addi(sp, sp, Operand(2 * kPointerSize));
+  StringHelper::GenerateCompareFlatOneByteStrings(masm, r4, r3, r5, r6, r7);
+
+  // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater)
+  // tagged as a small integer.
+  __ bind(&runtime);
+  __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+}
+
+
+void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r4    : left
+  //  -- r3    : right
+  //  -- lr    : return address
+  // -----------------------------------
+
+  // Load r5 with the allocation site.  We stick an undefined dummy value here
+  // and replace it with the real allocation site later when we instantiate this
+  // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate().
+  __ Move(r5, handle(isolate()->heap()->undefined_value()));
+
+  // Make sure that we actually patched the allocation site.
+  if (FLAG_debug_code) {
+    __ TestIfSmi(r5, r0);
+    __ Assert(ne, kExpectedAllocationSite, cr0);
+    __ push(r5);
+    __ LoadP(r5, FieldMemOperand(r5, HeapObject::kMapOffset));
+    __ LoadRoot(ip, Heap::kAllocationSiteMapRootIndex);
+    __ cmp(r5, ip);
+    __ pop(r5);
+    __ Assert(eq, kExpectedAllocationSite);
+  }
+
+  // Tail call into the stub that handles binary operations with allocation
+  // sites.
+  BinaryOpWithAllocationSiteStub stub(isolate(), state());
+  __ TailCallStub(&stub);
+}
+
+
+void CompareICStub::GenerateSmis(MacroAssembler* masm) {
+  DCHECK(state() == CompareICState::SMI);
+  Label miss;
+  __ orx(r5, r4, r3);
+  __ JumpIfNotSmi(r5, &miss);
+
+  if (GetCondition() == eq) {
+    // For equality we do not care about the sign of the result.
+    // __ sub(r3, r3, r4, SetCC);
+    __ sub(r3, r3, r4);
+  } else {
+    // Untag before subtracting to avoid handling overflow.
+    __ SmiUntag(r4);
+    __ SmiUntag(r3);
+    __ sub(r3, r4, r3);
+  }
+  __ Ret();
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+
+void CompareICStub::GenerateNumbers(MacroAssembler* masm) {
+  DCHECK(state() == CompareICState::NUMBER);
+
+  Label generic_stub;
+  Label unordered, maybe_undefined1, maybe_undefined2;
+  Label miss;
+  Label equal, less_than;
+
+  if (left() == CompareICState::SMI) {
+    __ JumpIfNotSmi(r4, &miss);
+  }
+  if (right() == CompareICState::SMI) {
+    __ JumpIfNotSmi(r3, &miss);
+  }
+
+  // Inlining the double comparison and falling back to the general compare
+  // stub if NaN is involved.
+  // Load left and right operand.
+  Label done, left, left_smi, right_smi;
+  __ JumpIfSmi(r3, &right_smi);
+  __ CheckMap(r3, r5, Heap::kHeapNumberMapRootIndex, &maybe_undefined1,
+              DONT_DO_SMI_CHECK);
+  __ lfd(d1, FieldMemOperand(r3, HeapNumber::kValueOffset));
+  __ b(&left);
+  __ bind(&right_smi);
+  __ SmiToDouble(d1, r3);
+
+  __ bind(&left);
+  __ JumpIfSmi(r4, &left_smi);
+  __ CheckMap(r4, r5, Heap::kHeapNumberMapRootIndex, &maybe_undefined2,
+              DONT_DO_SMI_CHECK);
+  __ lfd(d0, FieldMemOperand(r4, HeapNumber::kValueOffset));
+  __ b(&done);
+  __ bind(&left_smi);
+  __ SmiToDouble(d0, r4);
+
+  __ bind(&done);
+
+  // Compare operands
+  __ fcmpu(d0, d1);
+
+  // Don't base result on status bits when a NaN is involved.
+  __ bunordered(&unordered);
+
+  // Return a result of -1, 0, or 1, based on status bits.
+  __ beq(&equal);
+  __ blt(&less_than);
+  //  assume greater than
+  __ li(r3, Operand(GREATER));
+  __ Ret();
+  __ bind(&equal);
+  __ li(r3, Operand(EQUAL));
+  __ Ret();
+  __ bind(&less_than);
+  __ li(r3, Operand(LESS));
+  __ Ret();
+
+  __ bind(&unordered);
+  __ bind(&generic_stub);
+  CompareICStub stub(isolate(), op(), CompareICState::GENERIC,
+                     CompareICState::GENERIC, CompareICState::GENERIC);
+  __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+
+  __ bind(&maybe_undefined1);
+  if (Token::IsOrderedRelationalCompareOp(op())) {
+    __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
+    __ bne(&miss);
+    __ JumpIfSmi(r4, &unordered);
+    __ CompareObjectType(r4, r5, r5, HEAP_NUMBER_TYPE);
+    __ bne(&maybe_undefined2);
+    __ b(&unordered);
+  }
+
+  __ bind(&maybe_undefined2);
+  if (Token::IsOrderedRelationalCompareOp(op())) {
+    __ CompareRoot(r4, Heap::kUndefinedValueRootIndex);
+    __ beq(&unordered);
+  }
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+
+void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) {
+  DCHECK(state() == CompareICState::INTERNALIZED_STRING);
+  Label miss, not_equal;
+
+  // Registers containing left and right operands respectively.
+  Register left = r4;
+  Register right = r3;
+  Register tmp1 = r5;
+  Register tmp2 = r6;
+
+  // Check that both operands are heap objects.
+  __ JumpIfEitherSmi(left, right, &miss);
+
+  // Check that both operands are symbols.
+  __ LoadP(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
+  __ LoadP(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
+  __ lbz(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
+  __ lbz(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
+  STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
+  __ orx(tmp1, tmp1, tmp2);
+  __ andi(r0, tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask));
+  __ bne(&miss, cr0);
+
+  // Internalized strings are compared by identity.
+  __ cmp(left, right);
+  __ bne(&not_equal);
+  // Make sure r3 is non-zero. At this point input operands are
+  // guaranteed to be non-zero.
+  DCHECK(right.is(r3));
+  STATIC_ASSERT(EQUAL == 0);
+  STATIC_ASSERT(kSmiTag == 0);
+  __ LoadSmiLiteral(r3, Smi::FromInt(EQUAL));
+  __ bind(&not_equal);
+  __ Ret();
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+
+void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) {
+  DCHECK(state() == CompareICState::UNIQUE_NAME);
+  DCHECK(GetCondition() == eq);
+  Label miss;
+
+  // Registers containing left and right operands respectively.
+  Register left = r4;
+  Register right = r3;
+  Register tmp1 = r5;
+  Register tmp2 = r6;
+
+  // Check that both operands are heap objects.
+  __ JumpIfEitherSmi(left, right, &miss);
+
+  // Check that both operands are unique names. This leaves the instance
+  // types loaded in tmp1 and tmp2.
+  __ LoadP(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
+  __ LoadP(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
+  __ lbz(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
+  __ lbz(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
+
+  __ JumpIfNotUniqueNameInstanceType(tmp1, &miss);
+  __ JumpIfNotUniqueNameInstanceType(tmp2, &miss);
+
+  // Unique names are compared by identity.
+  __ cmp(left, right);
+  __ bne(&miss);
+  // Make sure r3 is non-zero. At this point input operands are
+  // guaranteed to be non-zero.
+  DCHECK(right.is(r3));
+  STATIC_ASSERT(EQUAL == 0);
+  STATIC_ASSERT(kSmiTag == 0);
+  __ LoadSmiLiteral(r3, Smi::FromInt(EQUAL));
+  __ Ret();
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+
+void CompareICStub::GenerateStrings(MacroAssembler* masm) {
+  DCHECK(state() == CompareICState::STRING);
+  Label miss, not_identical, is_symbol;
+
+  bool equality = Token::IsEqualityOp(op());
+
+  // Registers containing left and right operands respectively.
+  Register left = r4;
+  Register right = r3;
+  Register tmp1 = r5;
+  Register tmp2 = r6;
+  Register tmp3 = r7;
+  Register tmp4 = r8;
+
+  // Check that both operands are heap objects.
+  __ JumpIfEitherSmi(left, right, &miss);
+
+  // Check that both operands are strings. This leaves the instance
+  // types loaded in tmp1 and tmp2.
+  __ LoadP(tmp1, FieldMemOperand(left, HeapObject::kMapOffset));
+  __ LoadP(tmp2, FieldMemOperand(right, HeapObject::kMapOffset));
+  __ lbz(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset));
+  __ lbz(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset));
+  STATIC_ASSERT(kNotStringTag != 0);
+  __ orx(tmp3, tmp1, tmp2);
+  __ andi(r0, tmp3, Operand(kIsNotStringMask));
+  __ bne(&miss, cr0);
+
+  // Fast check for identical strings.
+  __ cmp(left, right);
+  STATIC_ASSERT(EQUAL == 0);
+  STATIC_ASSERT(kSmiTag == 0);
+  __ bne(&not_identical);
+  __ LoadSmiLiteral(r3, Smi::FromInt(EQUAL));
+  __ Ret();
+  __ bind(&not_identical);
+
+  // Handle not identical strings.
+
+  // Check that both strings are internalized strings. If they are, we're done
+  // because we already know they are not identical. We know they are both
+  // strings.
+  if (equality) {
+    DCHECK(GetCondition() == eq);
+    STATIC_ASSERT(kInternalizedTag == 0);
+    __ orx(tmp3, tmp1, tmp2);
+    __ andi(r0, tmp3, Operand(kIsNotInternalizedMask));
+    __ bne(&is_symbol, cr0);
+    // Make sure r3 is non-zero. At this point input operands are
+    // guaranteed to be non-zero.
+    DCHECK(right.is(r3));
+    __ Ret();
+    __ bind(&is_symbol);
+  }
+
+  // Check that both strings are sequential one-byte.
+  Label runtime;
+  __ JumpIfBothInstanceTypesAreNotSequentialOneByte(tmp1, tmp2, tmp3, tmp4,
+                                                    &runtime);
+
+  // Compare flat one-byte strings. Returns when done.
+  if (equality) {
+    StringHelper::GenerateFlatOneByteStringEquals(masm, left, right, tmp1,
+                                                  tmp2);
+  } else {
+    StringHelper::GenerateCompareFlatOneByteStrings(masm, left, right, tmp1,
+                                                    tmp2, tmp3);
+  }
+
+  // Handle more complex cases in runtime.
+  __ bind(&runtime);
+  __ Push(left, right);
+  if (equality) {
+    __ TailCallRuntime(Runtime::kStringEquals, 2, 1);
+  } else {
+    __ TailCallRuntime(Runtime::kStringCompare, 2, 1);
+  }
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+
+void CompareICStub::GenerateObjects(MacroAssembler* masm) {
+  DCHECK(state() == CompareICState::OBJECT);
+  Label miss;
+  __ and_(r5, r4, r3);
+  __ JumpIfSmi(r5, &miss);
+
+  __ CompareObjectType(r3, r5, r5, JS_OBJECT_TYPE);
+  __ bne(&miss);
+  __ CompareObjectType(r4, r5, r5, JS_OBJECT_TYPE);
+  __ bne(&miss);
+
+  DCHECK(GetCondition() == eq);
+  __ sub(r3, r3, r4);
+  __ Ret();
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+
+void CompareICStub::GenerateKnownObjects(MacroAssembler* masm) {
+  Label miss;
+  __ and_(r5, r4, r3);
+  __ JumpIfSmi(r5, &miss);
+  __ LoadP(r5, FieldMemOperand(r3, HeapObject::kMapOffset));
+  __ LoadP(r6, FieldMemOperand(r4, HeapObject::kMapOffset));
+  __ Cmpi(r5, Operand(known_map_), r0);
+  __ bne(&miss);
+  __ Cmpi(r6, Operand(known_map_), r0);
+  __ bne(&miss);
+
+  __ sub(r3, r3, r4);
+  __ Ret();
+
+  __ bind(&miss);
+  GenerateMiss(masm);
+}
+
+
+void CompareICStub::GenerateMiss(MacroAssembler* masm) {
+  {
+    // Call the runtime system in a fresh internal frame.
+    ExternalReference miss =
+        ExternalReference(IC_Utility(IC::kCompareIC_Miss), isolate());
+
+    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+    __ Push(r4, r3);
+    __ Push(r4, r3);
+    __ LoadSmiLiteral(r0, Smi::FromInt(op()));
+    __ push(r0);
+    __ CallExternalReference(miss, 3);
+    // Compute the entry point of the rewritten stub.
+    __ addi(r5, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
+    // Restore registers.
+    __ Pop(r4, r3);
+  }
+
+  __ JumpToJSEntry(r5);
+}
+
+
+// This stub is paired with DirectCEntryStub::GenerateCall
+void DirectCEntryStub::Generate(MacroAssembler* masm) {
+  // Place the return address on the stack, making the call
+  // GC safe. The RegExp backend also relies on this.
+  __ mflr(r0);
+  __ StoreP(r0, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize));
+  __ Call(ip);  // Call the C++ function.
+  __ LoadP(r0, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize));
+  __ mtlr(r0);
+  __ blr();
+}
+
+
+void DirectCEntryStub::GenerateCall(MacroAssembler* masm, Register target) {
+#if ABI_USES_FUNCTION_DESCRIPTORS && !defined(USE_SIMULATOR)
+  // Native AIX/PPC64 Linux use a function descriptor.
+  __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(target, kPointerSize));
+  __ LoadP(ip, MemOperand(target, 0));  // Instruction address
+#else
+  // ip needs to be set for DirectCEentryStub::Generate, and also
+  // for ABI_TOC_ADDRESSABILITY_VIA_IP.
+  __ Move(ip, target);
+#endif
+
+  intptr_t code = reinterpret_cast<intptr_t>(GetCode().location());
+  __ mov(r0, Operand(code, RelocInfo::CODE_TARGET));
+  __ Call(r0);  // Call the stub.
+}
+
+
+void NameDictionaryLookupStub::GenerateNegativeLookup(
+    MacroAssembler* masm, Label* miss, Label* done, Register receiver,
+    Register properties, Handle<Name> name, Register scratch0) {
+  DCHECK(name->IsUniqueName());
+  // If names of slots in range from 1 to kProbes - 1 for the hash value are
+  // not equal to the name and kProbes-th slot is not used (its name is the
+  // undefined value), it guarantees the hash table doesn't contain the
+  // property. It's true even if some slots represent deleted properties
+  // (their names are the hole value).
+  for (int i = 0; i < kInlinedProbes; i++) {
+    // scratch0 points to properties hash.
+    // Compute the masked index: (hash + i + i * i) & mask.
+    Register index = scratch0;
+    // Capacity is smi 2^n.
+    __ LoadP(index, FieldMemOperand(properties, kCapacityOffset));
+    __ subi(index, index, Operand(1));
+    __ LoadSmiLiteral(
+        ip, Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i)));
+    __ and_(index, index, ip);
+
+    // Scale the index by multiplying by the entry size.
+    DCHECK(NameDictionary::kEntrySize == 3);
+    __ ShiftLeftImm(ip, index, Operand(1));
+    __ add(index, index, ip);  // index *= 3.
+
+    Register entity_name = scratch0;
+    // Having undefined at this place means the name is not contained.
+    Register tmp = properties;
+    __ SmiToPtrArrayOffset(ip, index);
+    __ add(tmp, properties, ip);
+    __ LoadP(entity_name, FieldMemOperand(tmp, kElementsStartOffset));
+
+    DCHECK(!tmp.is(entity_name));
+    __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex);
+    __ cmp(entity_name, tmp);
+    __ beq(done);
+
+    // Load the hole ready for use below:
+    __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex);
+
+    // Stop if found the property.
+    __ Cmpi(entity_name, Operand(Handle<Name>(name)), r0);
+    __ beq(miss);
+
+    Label good;
+    __ cmp(entity_name, tmp);
+    __ beq(&good);
+
+    // Check if the entry name is not a unique name.
+    __ LoadP(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset));
+    __ lbz(entity_name, FieldMemOperand(entity_name, Map::kInstanceTypeOffset));
+    __ JumpIfNotUniqueNameInstanceType(entity_name, miss);
+    __ bind(&good);
+
+    // Restore the properties.
+    __ LoadP(properties,
+             FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+  }
+
+  const int spill_mask = (r0.bit() | r9.bit() | r8.bit() | r7.bit() | r6.bit() |
+                          r5.bit() | r4.bit() | r3.bit());
+
+  __ mflr(r0);
+  __ MultiPush(spill_mask);
+
+  __ LoadP(r3, FieldMemOperand(receiver, JSObject::kPropertiesOffset));
+  __ mov(r4, Operand(Handle<Name>(name)));
+  NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP);
+  __ CallStub(&stub);
+  __ cmpi(r3, Operand::Zero());
+
+  __ MultiPop(spill_mask);  // MultiPop does not touch condition flags
+  __ mtlr(r0);
+
+  __ beq(done);
+  __ bne(miss);
+}
+
+
+// Probe the name dictionary in the |elements| register. Jump to the
+// |done| label if a property with the given name is found. Jump to
+// the |miss| label otherwise.
+// If lookup was successful |scratch2| will be equal to elements + 4 * index.
+void NameDictionaryLookupStub::GeneratePositiveLookup(
+    MacroAssembler* masm, Label* miss, Label* done, Register elements,
+    Register name, Register scratch1, Register scratch2) {
+  DCHECK(!elements.is(scratch1));
+  DCHECK(!elements.is(scratch2));
+  DCHECK(!name.is(scratch1));
+  DCHECK(!name.is(scratch2));
+
+  __ AssertName(name);
+
+  // Compute the capacity mask.
+  __ LoadP(scratch1, FieldMemOperand(elements, kCapacityOffset));
+  __ SmiUntag(scratch1);  // convert smi to int
+  __ subi(scratch1, scratch1, Operand(1));
+
+  // Generate an unrolled loop that performs a few probes before
+  // giving up. Measurements done on Gmail indicate that 2 probes
+  // cover ~93% of loads from dictionaries.
+  for (int i = 0; i < kInlinedProbes; i++) {
+    // Compute the masked index: (hash + i + i * i) & mask.
+    __ lwz(scratch2, FieldMemOperand(name, Name::kHashFieldOffset));
+    if (i > 0) {
+      // Add the probe offset (i + i * i) left shifted to avoid right shifting
+      // the hash in a separate instruction. The value hash + i + i * i is right
+      // shifted in the following and instruction.
+      DCHECK(NameDictionary::GetProbeOffset(i) <
+             1 << (32 - Name::kHashFieldOffset));
+      __ addi(scratch2, scratch2,
+              Operand(NameDictionary::GetProbeOffset(i) << Name::kHashShift));
+    }
+    __ srwi(scratch2, scratch2, Operand(Name::kHashShift));
+    __ and_(scratch2, scratch1, scratch2);
+
+    // Scale the index by multiplying by the element size.
+    DCHECK(NameDictionary::kEntrySize == 3);
+    // scratch2 = scratch2 * 3.
+    __ ShiftLeftImm(ip, scratch2, Operand(1));
+    __ add(scratch2, scratch2, ip);
+
+    // Check if the key is identical to the name.
+    __ ShiftLeftImm(ip, scratch2, Operand(kPointerSizeLog2));
+    __ add(scratch2, elements, ip);
+    __ LoadP(ip, FieldMemOperand(scratch2, kElementsStartOffset));
+    __ cmp(name, ip);
+    __ beq(done);
+  }
+
+  const int spill_mask = (r0.bit() | r9.bit() | r8.bit() | r7.bit() | r6.bit() |
+                          r5.bit() | r4.bit() | r3.bit()) &
+                         ~(scratch1.bit() | scratch2.bit());
+
+  __ mflr(r0);
+  __ MultiPush(spill_mask);
+  if (name.is(r3)) {
+    DCHECK(!elements.is(r4));
+    __ mr(r4, name);
+    __ mr(r3, elements);
+  } else {
+    __ mr(r3, elements);
+    __ mr(r4, name);
+  }
+  NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP);
+  __ CallStub(&stub);
+  __ cmpi(r3, Operand::Zero());
+  __ mr(scratch2, r5);
+  __ MultiPop(spill_mask);
+  __ mtlr(r0);
+
+  __ bne(done);
+  __ beq(miss);
+}
+
+
+void NameDictionaryLookupStub::Generate(MacroAssembler* masm) {
+  // This stub overrides SometimesSetsUpAFrame() to return false.  That means
+  // we cannot call anything that could cause a GC from this stub.
+  // Registers:
+  //  result: NameDictionary to probe
+  //  r4: key
+  //  dictionary: NameDictionary to probe.
+  //  index: will hold an index of entry if lookup is successful.
+  //         might alias with result_.
+  // Returns:
+  //  result_ is zero if lookup failed, non zero otherwise.
+
+  Register result = r3;
+  Register dictionary = r3;
+  Register key = r4;
+  Register index = r5;
+  Register mask = r6;
+  Register hash = r7;
+  Register undefined = r8;
+  Register entry_key = r9;
+  Register scratch = r9;
+
+  Label in_dictionary, maybe_in_dictionary, not_in_dictionary;
+
+  __ LoadP(mask, FieldMemOperand(dictionary, kCapacityOffset));
+  __ SmiUntag(mask);
+  __ subi(mask, mask, Operand(1));
+
+  __ lwz(hash, FieldMemOperand(key, Name::kHashFieldOffset));
+
+  __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex);
+
+  for (int i = kInlinedProbes; i < kTotalProbes; i++) {
+    // Compute the masked index: (hash + i + i * i) & mask.
+    // Capacity is smi 2^n.
+    if (i > 0) {
+      // Add the probe offset (i + i * i) left shifted to avoid right shifting
+      // the hash in a separate instruction. The value hash + i + i * i is right
+      // shifted in the following and instruction.
+      DCHECK(NameDictionary::GetProbeOffset(i) <
+             1 << (32 - Name::kHashFieldOffset));
+      __ addi(index, hash,
+              Operand(NameDictionary::GetProbeOffset(i) << Name::kHashShift));
+    } else {
+      __ mr(index, hash);
+    }
+    __ srwi(r0, index, Operand(Name::kHashShift));
+    __ and_(index, mask, r0);
+
+    // Scale the index by multiplying by the entry size.
+    DCHECK(NameDictionary::kEntrySize == 3);
+    __ ShiftLeftImm(scratch, index, Operand(1));
+    __ add(index, index, scratch);  // index *= 3.
+
+    DCHECK_EQ(kSmiTagSize, 1);
+    __ ShiftLeftImm(scratch, index, Operand(kPointerSizeLog2));
+    __ add(index, dictionary, scratch);
+    __ LoadP(entry_key, FieldMemOperand(index, kElementsStartOffset));
+
+    // Having undefined at this place means the name is not contained.
+    __ cmp(entry_key, undefined);
+    __ beq(&not_in_dictionary);
+
+    // Stop if found the property.
+    __ cmp(entry_key, key);
+    __ beq(&in_dictionary);
+
+    if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) {
+      // Check if the entry name is not a unique name.
+      __ LoadP(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset));
+      __ lbz(entry_key, FieldMemOperand(entry_key, Map::kInstanceTypeOffset));
+      __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary);
+    }
+  }
+
+  __ bind(&maybe_in_dictionary);
+  // If we are doing negative lookup then probing failure should be
+  // treated as a lookup success. For positive lookup probing failure
+  // should be treated as lookup failure.
+  if (mode() == POSITIVE_LOOKUP) {
+    __ li(result, Operand::Zero());
+    __ Ret();
+  }
+
+  __ bind(&in_dictionary);
+  __ li(result, Operand(1));
+  __ Ret();
+
+  __ bind(&not_in_dictionary);
+  __ li(result, Operand::Zero());
+  __ Ret();
+}
+
+
+void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(
+    Isolate* isolate) {
+  StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs);
+  stub1.GetCode();
+  // Hydrogen code stubs need stub2 at snapshot time.
+  StoreBufferOverflowStub stub2(isolate, kSaveFPRegs);
+  stub2.GetCode();
+}
+
+
+// Takes the input in 3 registers: address_ value_ and object_.  A pointer to
+// the value has just been written into the object, now this stub makes sure
+// we keep the GC informed.  The word in the object where the value has been
+// written is in the address register.
+void RecordWriteStub::Generate(MacroAssembler* masm) {
+  Label skip_to_incremental_noncompacting;
+  Label skip_to_incremental_compacting;
+
+  // The first two branch instructions are generated with labels so as to
+  // get the offset fixed up correctly by the bind(Label*) call.  We patch
+  // it back and forth between branch condition True and False
+  // when we start and stop incremental heap marking.
+  // See RecordWriteStub::Patch for details.
+
+  // Clear the bit, branch on True for NOP action initially
+  __ crclr(Assembler::encode_crbit(cr2, CR_LT));
+  __ blt(&skip_to_incremental_noncompacting, cr2);
+  __ blt(&skip_to_incremental_compacting, cr2);
+
+  if (remembered_set_action() == EMIT_REMEMBERED_SET) {
+    __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
+                           MacroAssembler::kReturnAtEnd);
+  }
+  __ Ret();
+
+  __ bind(&skip_to_incremental_noncompacting);
+  GenerateIncremental(masm, INCREMENTAL);
+
+  __ bind(&skip_to_incremental_compacting);
+  GenerateIncremental(masm, INCREMENTAL_COMPACTION);
+
+  // Initial mode of the stub is expected to be STORE_BUFFER_ONLY.
+  // Will be checked in IncrementalMarking::ActivateGeneratedStub.
+  // patching not required on PPC as the initial path is effectively NOP
+}
+
+
+void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) {
+  regs_.Save(masm);
+
+  if (remembered_set_action() == EMIT_REMEMBERED_SET) {
+    Label dont_need_remembered_set;
+
+    __ LoadP(regs_.scratch0(), MemOperand(regs_.address(), 0));
+    __ JumpIfNotInNewSpace(regs_.scratch0(),  // Value.
+                           regs_.scratch0(), &dont_need_remembered_set);
+
+    __ CheckPageFlag(regs_.object(), regs_.scratch0(),
+                     1 << MemoryChunk::SCAN_ON_SCAVENGE, ne,
+                     &dont_need_remembered_set);
+
+    // First notify the incremental marker if necessary, then update the
+    // remembered set.
+    CheckNeedsToInformIncrementalMarker(
+        masm, kUpdateRememberedSetOnNoNeedToInformIncrementalMarker, mode);
+    InformIncrementalMarker(masm);
+    regs_.Restore(masm);
+    __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
+                           MacroAssembler::kReturnAtEnd);
+
+    __ bind(&dont_need_remembered_set);
+  }
+
+  CheckNeedsToInformIncrementalMarker(
+      masm, kReturnOnNoNeedToInformIncrementalMarker, mode);
+  InformIncrementalMarker(masm);
+  regs_.Restore(masm);
+  __ Ret();
+}
+
+
+void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) {
+  regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode());
+  int argument_count = 3;
+  __ PrepareCallCFunction(argument_count, regs_.scratch0());
+  Register address =
+      r3.is(regs_.address()) ? regs_.scratch0() : regs_.address();
+  DCHECK(!address.is(regs_.object()));
+  DCHECK(!address.is(r3));
+  __ mr(address, regs_.address());
+  __ mr(r3, regs_.object());
+  __ mr(r4, address);
+  __ mov(r5, Operand(ExternalReference::isolate_address(isolate())));
+
+  AllowExternalCallThatCantCauseGC scope(masm);
+  __ CallCFunction(
+      ExternalReference::incremental_marking_record_write_function(isolate()),
+      argument_count);
+  regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode());
+}
+
+
+void RecordWriteStub::CheckNeedsToInformIncrementalMarker(
+    MacroAssembler* masm, OnNoNeedToInformIncrementalMarker on_no_need,
+    Mode mode) {
+  Label on_black;
+  Label need_incremental;
+  Label need_incremental_pop_scratch;
+
+  DCHECK((~Page::kPageAlignmentMask & 0xffff) == 0);
+  __ lis(r0, Operand((~Page::kPageAlignmentMask >> 16)));
+  __ and_(regs_.scratch0(), regs_.object(), r0);
+  __ LoadP(
+      regs_.scratch1(),
+      MemOperand(regs_.scratch0(), MemoryChunk::kWriteBarrierCounterOffset));
+  __ subi(regs_.scratch1(), regs_.scratch1(), Operand(1));
+  __ StoreP(
+      regs_.scratch1(),
+      MemOperand(regs_.scratch0(), MemoryChunk::kWriteBarrierCounterOffset));
+  __ cmpi(regs_.scratch1(), Operand::Zero());  // PPC, we could do better here
+  __ blt(&need_incremental);
+
+  // Let's look at the color of the object:  If it is not black we don't have
+  // to inform the incremental marker.
+  __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black);
+
+  regs_.Restore(masm);
+  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
+    __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
+                           MacroAssembler::kReturnAtEnd);
+  } else {
+    __ Ret();
+  }
+
+  __ bind(&on_black);
+
+  // Get the value from the slot.
+  __ LoadP(regs_.scratch0(), MemOperand(regs_.address(), 0));
+
+  if (mode == INCREMENTAL_COMPACTION) {
+    Label ensure_not_white;
+
+    __ CheckPageFlag(regs_.scratch0(),  // Contains value.
+                     regs_.scratch1(),  // Scratch.
+                     MemoryChunk::kEvacuationCandidateMask, eq,
+                     &ensure_not_white);
+
+    __ CheckPageFlag(regs_.object(),
+                     regs_.scratch1(),  // Scratch.
+                     MemoryChunk::kSkipEvacuationSlotsRecordingMask, eq,
+                     &need_incremental);
+
+    __ bind(&ensure_not_white);
+  }
+
+  // We need extra registers for this, so we push the object and the address
+  // register temporarily.
+  __ Push(regs_.object(), regs_.address());
+  __ EnsureNotWhite(regs_.scratch0(),  // The value.
+                    regs_.scratch1(),  // Scratch.
+                    regs_.object(),    // Scratch.
+                    regs_.address(),   // Scratch.
+                    &need_incremental_pop_scratch);
+  __ Pop(regs_.object(), regs_.address());
+
+  regs_.Restore(masm);
+  if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) {
+    __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(),
+                           MacroAssembler::kReturnAtEnd);
+  } else {
+    __ Ret();
+  }
+
+  __ bind(&need_incremental_pop_scratch);
+  __ Pop(regs_.object(), regs_.address());
+
+  __ bind(&need_incremental);
+
+  // Fall through when we need to inform the incremental marker.
+}
+
+
+void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r3    : element value to store
+  //  -- r6    : element index as smi
+  //  -- sp[0] : array literal index in function as smi
+  //  -- sp[4] : array literal
+  // clobbers r3, r5, r7
+  // -----------------------------------
+
+  Label element_done;
+  Label double_elements;
+  Label smi_element;
+  Label slow_elements;
+  Label fast_elements;
+
+  // Get array literal index, array literal and its map.
+  __ LoadP(r7, MemOperand(sp, 0 * kPointerSize));
+  __ LoadP(r4, MemOperand(sp, 1 * kPointerSize));
+  __ LoadP(r5, FieldMemOperand(r4, JSObject::kMapOffset));
+
+  __ CheckFastElements(r5, r8, &double_elements);
+  // FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS
+  __ JumpIfSmi(r3, &smi_element);
+  __ CheckFastSmiElements(r5, r8, &fast_elements);
+
+  // Store into the array literal requires a elements transition. Call into
+  // the runtime.
+  __ bind(&slow_elements);
+  // call.
+  __ Push(r4, r6, r3);
+  __ LoadP(r8, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  __ LoadP(r8, FieldMemOperand(r8, JSFunction::kLiteralsOffset));
+  __ Push(r8, r7);
+  __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1);
+
+  // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object.
+  __ bind(&fast_elements);
+  __ LoadP(r8, FieldMemOperand(r4, JSObject::kElementsOffset));
+  __ SmiToPtrArrayOffset(r9, r6);
+  __ add(r9, r8, r9);
+#if V8_TARGET_ARCH_PPC64
+  // add due to offset alignment requirements of StorePU
+  __ addi(r9, r9, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ StoreP(r3, MemOperand(r9));
+#else
+  __ StorePU(r3, MemOperand(r9, FixedArray::kHeaderSize - kHeapObjectTag));
+#endif
+  // Update the write barrier for the array store.
+  __ RecordWrite(r8, r9, r3, kLRHasNotBeenSaved, kDontSaveFPRegs,
+                 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+  __ Ret();
+
+  // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS,
+  // and value is Smi.
+  __ bind(&smi_element);
+  __ LoadP(r8, FieldMemOperand(r4, JSObject::kElementsOffset));
+  __ SmiToPtrArrayOffset(r9, r6);
+  __ add(r9, r8, r9);
+  __ StoreP(r3, FieldMemOperand(r9, FixedArray::kHeaderSize), r0);
+  __ Ret();
+
+  // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS.
+  __ bind(&double_elements);
+  __ LoadP(r8, FieldMemOperand(r4, JSObject::kElementsOffset));
+  __ StoreNumberToDoubleElements(r3, r6, r8, r9, d0, &slow_elements);
+  __ Ret();
+}
+
+
+void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
+  CEntryStub ces(isolate(), 1, kSaveFPRegs);
+  __ Call(ces.GetCode(), RelocInfo::CODE_TARGET);
+  int parameter_count_offset =
+      StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;
+  __ LoadP(r4, MemOperand(fp, parameter_count_offset));
+  if (function_mode() == JS_FUNCTION_STUB_MODE) {
+    __ addi(r4, r4, Operand(1));
+  }
+  masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE);
+  __ slwi(r4, r4, Operand(kPointerSizeLog2));
+  __ add(sp, sp, r4);
+  __ Ret();
+}
+
+
+void LoadICTrampolineStub::Generate(MacroAssembler* masm) {
+  EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
+  VectorLoadStub stub(isolate(), state());
+  __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) {
+  EmitLoadTypeFeedbackVector(masm, VectorLoadICDescriptor::VectorRegister());
+  VectorKeyedLoadStub stub(isolate());
+  __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) {
+  if (masm->isolate()->function_entry_hook() != NULL) {
+    PredictableCodeSizeScope predictable(masm,
+#if V8_TARGET_ARCH_PPC64
+                                         14 * Assembler::kInstrSize);
+#else
+                                         11 * Assembler::kInstrSize);
+#endif
+    ProfileEntryHookStub stub(masm->isolate());
+    __ mflr(r0);
+    __ Push(r0, ip);
+    __ CallStub(&stub);
+    __ Pop(r0, ip);
+    __ mtlr(r0);
+  }
+}
+
+
+void ProfileEntryHookStub::Generate(MacroAssembler* masm) {
+  // The entry hook is a "push lr, ip" instruction, followed by a call.
+  const int32_t kReturnAddressDistanceFromFunctionStart =
+      Assembler::kCallTargetAddressOffset + 3 * Assembler::kInstrSize;
+
+  // This should contain all kJSCallerSaved registers.
+  const RegList kSavedRegs = kJSCallerSaved |  // Caller saved registers.
+                             r15.bit();        // Saved stack pointer.
+
+  // We also save lr, so the count here is one higher than the mask indicates.
+  const int32_t kNumSavedRegs = kNumJSCallerSaved + 2;
+
+  // Save all caller-save registers as this may be called from anywhere.
+  __ mflr(ip);
+  __ MultiPush(kSavedRegs | ip.bit());
+
+  // Compute the function's address for the first argument.
+  __ subi(r3, ip, Operand(kReturnAddressDistanceFromFunctionStart));
+
+  // The caller's return address is two slots above the saved temporaries.
+  // Grab that for the second argument to the hook.
+  __ addi(r4, sp, Operand((kNumSavedRegs + 1) * kPointerSize));
+
+  // Align the stack if necessary.
+  int frame_alignment = masm->ActivationFrameAlignment();
+  if (frame_alignment > kPointerSize) {
+    __ mr(r15, sp);
+    DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
+    __ ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment)));
+  }
+
+#if !defined(USE_SIMULATOR)
+  uintptr_t entry_hook =
+      reinterpret_cast<uintptr_t>(isolate()->function_entry_hook());
+  __ mov(ip, Operand(entry_hook));
+
+#if ABI_USES_FUNCTION_DESCRIPTORS
+  // Function descriptor
+  __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(ip, kPointerSize));
+  __ LoadP(ip, MemOperand(ip, 0));
+#elif ABI_TOC_ADDRESSABILITY_VIA_IP
+// ip set above, so nothing to do.
+#endif
+
+  // PPC LINUX ABI:
+  __ li(r0, Operand::Zero());
+  __ StorePU(r0, MemOperand(sp, -kNumRequiredStackFrameSlots * kPointerSize));
+#else
+  // Under the simulator we need to indirect the entry hook through a
+  // trampoline function at a known address.
+  // It additionally takes an isolate as a third parameter
+  __ mov(r5, Operand(ExternalReference::isolate_address(isolate())));
+
+  ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline));
+  __ mov(ip, Operand(ExternalReference(
+                 &dispatcher, ExternalReference::BUILTIN_CALL, isolate())));
+#endif
+  __ Call(ip);
+
+#if !defined(USE_SIMULATOR)
+  __ addi(sp, sp, Operand(kNumRequiredStackFrameSlots * kPointerSize));
+#endif
+
+  // Restore the stack pointer if needed.
+  if (frame_alignment > kPointerSize) {
+    __ mr(sp, r15);
+  }
+
+  // Also pop lr to get Ret(0).
+  __ MultiPop(kSavedRegs | ip.bit());
+  __ mtlr(ip);
+  __ Ret();
+}
+
+
+template <class T>
+static void CreateArrayDispatch(MacroAssembler* masm,
+                                AllocationSiteOverrideMode mode) {
+  if (mode == DISABLE_ALLOCATION_SITES) {
+    T stub(masm->isolate(), GetInitialFastElementsKind(), mode);
+    __ TailCallStub(&stub);
+  } else if (mode == DONT_OVERRIDE) {
+    int last_index =
+        GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
+    for (int i = 0; i <= last_index; ++i) {
+      ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
+      __ Cmpi(r6, Operand(kind), r0);
+      T stub(masm->isolate(), kind);
+      __ TailCallStub(&stub, eq);
+    }
+
+    // If we reached this point there is a problem.
+    __ Abort(kUnexpectedElementsKindInArrayConstructor);
+  } else {
+    UNREACHABLE();
+  }
+}
+
+
+static void CreateArrayDispatchOneArgument(MacroAssembler* masm,
+                                           AllocationSiteOverrideMode mode) {
+  // r5 - allocation site (if mode != DISABLE_ALLOCATION_SITES)
+  // r6 - kind (if mode != DISABLE_ALLOCATION_SITES)
+  // r3 - number of arguments
+  // r4 - constructor?
+  // sp[0] - last argument
+  Label normal_sequence;
+  if (mode == DONT_OVERRIDE) {
+    DCHECK(FAST_SMI_ELEMENTS == 0);
+    DCHECK(FAST_HOLEY_SMI_ELEMENTS == 1);
+    DCHECK(FAST_ELEMENTS == 2);
+    DCHECK(FAST_HOLEY_ELEMENTS == 3);
+    DCHECK(FAST_DOUBLE_ELEMENTS == 4);
+    DCHECK(FAST_HOLEY_DOUBLE_ELEMENTS == 5);
+
+    // is the low bit set? If so, we are holey and that is good.
+    __ andi(r0, r6, Operand(1));
+    __ bne(&normal_sequence, cr0);
+  }
+
+  // look at the first argument
+  __ LoadP(r8, MemOperand(sp, 0));
+  __ cmpi(r8, Operand::Zero());
+  __ beq(&normal_sequence);
+
+  if (mode == DISABLE_ALLOCATION_SITES) {
+    ElementsKind initial = GetInitialFastElementsKind();
+    ElementsKind holey_initial = GetHoleyElementsKind(initial);
+
+    ArraySingleArgumentConstructorStub stub_holey(
+        masm->isolate(), holey_initial, DISABLE_ALLOCATION_SITES);
+    __ TailCallStub(&stub_holey);
+
+    __ bind(&normal_sequence);
+    ArraySingleArgumentConstructorStub stub(masm->isolate(), initial,
+                                            DISABLE_ALLOCATION_SITES);
+    __ TailCallStub(&stub);
+  } else if (mode == DONT_OVERRIDE) {
+    // We are going to create a holey array, but our kind is non-holey.
+    // Fix kind and retry (only if we have an allocation site in the slot).
+    __ addi(r6, r6, Operand(1));
+
+    if (FLAG_debug_code) {
+      __ LoadP(r8, FieldMemOperand(r5, 0));
+      __ CompareRoot(r8, Heap::kAllocationSiteMapRootIndex);
+      __ Assert(eq, kExpectedAllocationSite);
+    }
+
+    // Save the resulting elements kind in type info. We can't just store r6
+    // in the AllocationSite::transition_info field because elements kind is
+    // restricted to a portion of the field...upper bits need to be left alone.
+    STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
+    __ LoadP(r7, FieldMemOperand(r5, AllocationSite::kTransitionInfoOffset));
+    __ AddSmiLiteral(r7, r7, Smi::FromInt(kFastElementsKindPackedToHoley), r0);
+    __ StoreP(r7, FieldMemOperand(r5, AllocationSite::kTransitionInfoOffset),
+              r0);
+
+    __ bind(&normal_sequence);
+    int last_index =
+        GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
+    for (int i = 0; i <= last_index; ++i) {
+      ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
+      __ mov(r0, Operand(kind));
+      __ cmp(r6, r0);
+      ArraySingleArgumentConstructorStub stub(masm->isolate(), kind);
+      __ TailCallStub(&stub, eq);
+    }
+
+    // If we reached this point there is a problem.
+    __ Abort(kUnexpectedElementsKindInArrayConstructor);
+  } else {
+    UNREACHABLE();
+  }
+}
+
+
+template <class T>
+static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) {
+  int to_index =
+      GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND);
+  for (int i = 0; i <= to_index; ++i) {
+    ElementsKind kind = GetFastElementsKindFromSequenceIndex(i);
+    T stub(isolate, kind);
+    stub.GetCode();
+    if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) {
+      T stub1(isolate, kind, DISABLE_ALLOCATION_SITES);
+      stub1.GetCode();
+    }
+  }
+}
+
+
+void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) {
+  ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>(
+      isolate);
+  ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>(
+      isolate);
+  ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>(
+      isolate);
+}
+
+
+void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime(
+    Isolate* isolate) {
+  ElementsKind kinds[2] = {FAST_ELEMENTS, FAST_HOLEY_ELEMENTS};
+  for (int i = 0; i < 2; i++) {
+    // For internal arrays we only need a few things
+    InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]);
+    stubh1.GetCode();
+    InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]);
+    stubh2.GetCode();
+    InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]);
+    stubh3.GetCode();
+  }
+}
+
+
+void ArrayConstructorStub::GenerateDispatchToArrayStub(
+    MacroAssembler* masm, AllocationSiteOverrideMode mode) {
+  if (argument_count() == ANY) {
+    Label not_zero_case, not_one_case;
+    __ cmpi(r3, Operand::Zero());
+    __ bne(&not_zero_case);
+    CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+
+    __ bind(&not_zero_case);
+    __ cmpi(r3, Operand(1));
+    __ bgt(&not_one_case);
+    CreateArrayDispatchOneArgument(masm, mode);
+
+    __ bind(&not_one_case);
+    CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+  } else if (argument_count() == NONE) {
+    CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode);
+  } else if (argument_count() == ONE) {
+    CreateArrayDispatchOneArgument(masm, mode);
+  } else if (argument_count() == MORE_THAN_ONE) {
+    CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode);
+  } else {
+    UNREACHABLE();
+  }
+}
+
+
+void ArrayConstructorStub::Generate(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r3 : argc (only if argument_count() == ANY)
+  //  -- r4 : constructor
+  //  -- r5 : AllocationSite or undefined
+  //  -- sp[0] : return address
+  //  -- sp[4] : last argument
+  // -----------------------------------
+
+  if (FLAG_debug_code) {
+    // The array construct code is only set for the global and natives
+    // builtin Array functions which always have maps.
+
+    // Initial map for the builtin Array function should be a map.
+    __ LoadP(r7, FieldMemOperand(r4, JSFunction::kPrototypeOrInitialMapOffset));
+    // Will both indicate a NULL and a Smi.
+    __ TestIfSmi(r7, r0);
+    __ Assert(ne, kUnexpectedInitialMapForArrayFunction, cr0);
+    __ CompareObjectType(r7, r7, r8, MAP_TYPE);
+    __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
+
+    // We should either have undefined in r5 or a valid AllocationSite
+    __ AssertUndefinedOrAllocationSite(r5, r7);
+  }
+
+  Label no_info;
+  // Get the elements kind and case on that.
+  __ CompareRoot(r5, Heap::kUndefinedValueRootIndex);
+  __ beq(&no_info);
+
+  __ LoadP(r6, FieldMemOperand(r5, AllocationSite::kTransitionInfoOffset));
+  __ SmiUntag(r6);
+  STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0);
+  __ And(r6, r6, Operand(AllocationSite::ElementsKindBits::kMask));
+  GenerateDispatchToArrayStub(masm, DONT_OVERRIDE);
+
+  __ bind(&no_info);
+  GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES);
+}
+
+
+void InternalArrayConstructorStub::GenerateCase(MacroAssembler* masm,
+                                                ElementsKind kind) {
+  __ cmpli(r3, Operand(1));
+
+  InternalArrayNoArgumentConstructorStub stub0(isolate(), kind);
+  __ TailCallStub(&stub0, lt);
+
+  InternalArrayNArgumentsConstructorStub stubN(isolate(), kind);
+  __ TailCallStub(&stubN, gt);
+
+  if (IsFastPackedElementsKind(kind)) {
+    // We might need to create a holey array
+    // look at the first argument
+    __ LoadP(r6, MemOperand(sp, 0));
+    __ cmpi(r6, Operand::Zero());
+
+    InternalArraySingleArgumentConstructorStub stub1_holey(
+        isolate(), GetHoleyElementsKind(kind));
+    __ TailCallStub(&stub1_holey, ne);
+  }
+
+  InternalArraySingleArgumentConstructorStub stub1(isolate(), kind);
+  __ TailCallStub(&stub1);
+}
+
+
+void InternalArrayConstructorStub::Generate(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r3 : argc
+  //  -- r4 : constructor
+  //  -- sp[0] : return address
+  //  -- sp[4] : last argument
+  // -----------------------------------
+
+  if (FLAG_debug_code) {
+    // The array construct code is only set for the global and natives
+    // builtin Array functions which always have maps.
+
+    // Initial map for the builtin Array function should be a map.
+    __ LoadP(r6, FieldMemOperand(r4, JSFunction::kPrototypeOrInitialMapOffset));
+    // Will both indicate a NULL and a Smi.
+    __ TestIfSmi(r6, r0);
+    __ Assert(ne, kUnexpectedInitialMapForArrayFunction, cr0);
+    __ CompareObjectType(r6, r6, r7, MAP_TYPE);
+    __ Assert(eq, kUnexpectedInitialMapForArrayFunction);
+  }
+
+  // Figure out the right elements kind
+  __ LoadP(r6, FieldMemOperand(r4, JSFunction::kPrototypeOrInitialMapOffset));
+  // Load the map's "bit field 2" into |result|.
+  __ lbz(r6, FieldMemOperand(r6, Map::kBitField2Offset));
+  // Retrieve elements_kind from bit field 2.
+  __ DecodeField<Map::ElementsKindBits>(r6);
+
+  if (FLAG_debug_code) {
+    Label done;
+    __ cmpi(r6, Operand(FAST_ELEMENTS));
+    __ beq(&done);
+    __ cmpi(r6, Operand(FAST_HOLEY_ELEMENTS));
+    __ Assert(eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray);
+    __ bind(&done);
+  }
+
+  Label fast_elements_case;
+  __ cmpi(r6, Operand(FAST_ELEMENTS));
+  __ beq(&fast_elements_case);
+  GenerateCase(masm, FAST_HOLEY_ELEMENTS);
+
+  __ bind(&fast_elements_case);
+  GenerateCase(masm, FAST_ELEMENTS);
+}
+
+
+void CallApiFunctionStub::Generate(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- r3                  : callee
+  //  -- r7                  : call_data
+  //  -- r5                  : holder
+  //  -- r4                  : api_function_address
+  //  -- cp                  : context
+  //  --
+  //  -- sp[0]               : last argument
+  //  -- ...
+  //  -- sp[(argc - 1)* 4]   : first argument
+  //  -- sp[argc * 4]        : receiver
+  // -----------------------------------
+
+  Register callee = r3;
+  Register call_data = r7;
+  Register holder = r5;
+  Register api_function_address = r4;
+  Register context = cp;
+
+  int argc = this->argc();
+  bool is_store = this->is_store();
+  bool call_data_undefined = this->call_data_undefined();
+
+  typedef FunctionCallbackArguments FCA;
+
+  STATIC_ASSERT(FCA::kContextSaveIndex == 6);
+  STATIC_ASSERT(FCA::kCalleeIndex == 5);
+  STATIC_ASSERT(FCA::kDataIndex == 4);
+  STATIC_ASSERT(FCA::kReturnValueOffset == 3);
+  STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2);
+  STATIC_ASSERT(FCA::kIsolateIndex == 1);
+  STATIC_ASSERT(FCA::kHolderIndex == 0);
+  STATIC_ASSERT(FCA::kArgsLength == 7);
+
+  // context save
+  __ push(context);
+  // load context from callee
+  __ LoadP(context, FieldMemOperand(callee, JSFunction::kContextOffset));
+
+  // callee
+  __ push(callee);
+
+  // call data
+  __ push(call_data);
+
+  Register scratch = call_data;
+  if (!call_data_undefined) {
+    __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+  }
+  // return value
+  __ push(scratch);
+  // return value default
+  __ push(scratch);
+  // isolate
+  __ mov(scratch, Operand(ExternalReference::isolate_address(isolate())));
+  __ push(scratch);
+  // holder
+  __ push(holder);
+
+  // Prepare arguments.
+  __ mr(scratch, sp);
+
+  // Allocate the v8::Arguments structure in the arguments' space since
+  // it's not controlled by GC.
+  // PPC LINUX ABI:
+  //
+  // Create 5 extra slots on stack:
+  //    [0] space for DirectCEntryStub's LR save
+  //    [1-4] FunctionCallbackInfo
+  const int kApiStackSpace = 5;
+
+  FrameScope frame_scope(masm, StackFrame::MANUAL);
+  __ EnterExitFrame(false, kApiStackSpace);
+
+  DCHECK(!api_function_address.is(r3) && !scratch.is(r3));
+  // r3 = FunctionCallbackInfo&
+  // Arguments is after the return address.
+  __ addi(r3, sp, Operand((kStackFrameExtraParamSlot + 1) * kPointerSize));
+  // FunctionCallbackInfo::implicit_args_
+  __ StoreP(scratch, MemOperand(r3, 0 * kPointerSize));
+  // FunctionCallbackInfo::values_
+  __ addi(ip, scratch, Operand((FCA::kArgsLength - 1 + argc) * kPointerSize));
+  __ StoreP(ip, MemOperand(r3, 1 * kPointerSize));
+  // FunctionCallbackInfo::length_ = argc
+  __ li(ip, Operand(argc));
+  __ stw(ip, MemOperand(r3, 2 * kPointerSize));
+  // FunctionCallbackInfo::is_construct_call = 0
+  __ li(ip, Operand::Zero());
+  __ stw(ip, MemOperand(r3, 2 * kPointerSize + kIntSize));
+
+  const int kStackUnwindSpace = argc + FCA::kArgsLength + 1;
+  ExternalReference thunk_ref =
+      ExternalReference::invoke_function_callback(isolate());
+
+  AllowExternalCallThatCantCauseGC scope(masm);
+  MemOperand context_restore_operand(
+      fp, (2 + FCA::kContextSaveIndex) * kPointerSize);
+  // Stores return the first js argument
+  int return_value_offset = 0;
+  if (is_store) {
+    return_value_offset = 2 + FCA::kArgsLength;
+  } else {
+    return_value_offset = 2 + FCA::kReturnValueOffset;
+  }
+  MemOperand return_value_operand(fp, return_value_offset * kPointerSize);
+
+  __ CallApiFunctionAndReturn(api_function_address, thunk_ref,
+                              kStackUnwindSpace, return_value_operand,
+                              &context_restore_operand);
+}
+
+
+void CallApiGetterStub::Generate(MacroAssembler* masm) {
+  // ----------- S t a t e -------------
+  //  -- sp[0]                  : name
+  //  -- sp[4 - kArgsLength*4]  : PropertyCallbackArguments object
+  //  -- ...
+  //  -- r5                     : api_function_address
+  // -----------------------------------
+
+  Register api_function_address = ApiGetterDescriptor::function_address();
+  DCHECK(api_function_address.is(r5));
+
+  __ mr(r3, sp);                               // r0 = Handle<Name>
+  __ addi(r4, r3, Operand(1 * kPointerSize));  // r4 = PCA
+
+// If ABI passes Handles (pointer-sized struct) in a register:
+//
+// Create 2 extra slots on stack:
+//    [0] space for DirectCEntryStub's LR save
+//    [1] AccessorInfo&
+//
+// Otherwise:
+//
+// Create 3 extra slots on stack:
+//    [0] space for DirectCEntryStub's LR save
+//    [1] copy of Handle (first arg)
+//    [2] AccessorInfo&
+#if ABI_PASSES_HANDLES_IN_REGS
+  const int kAccessorInfoSlot = kStackFrameExtraParamSlot + 1;
+  const int kApiStackSpace = 2;
+#else
+  const int kArg0Slot = kStackFrameExtraParamSlot + 1;
+  const int kAccessorInfoSlot = kArg0Slot + 1;
+  const int kApiStackSpace = 3;
+#endif
+
+  FrameScope frame_scope(masm, StackFrame::MANUAL);
+  __ EnterExitFrame(false, kApiStackSpace);
+
+#if !ABI_PASSES_HANDLES_IN_REGS
+  // pass 1st arg by reference
+  __ StoreP(r3, MemOperand(sp, kArg0Slot * kPointerSize));
+  __ addi(r3, sp, Operand(kArg0Slot * kPointerSize));
+#endif
+
+  // Create PropertyAccessorInfo instance on the stack above the exit frame with
+  // r4 (internal::Object** args_) as the data.
+  __ StoreP(r4, MemOperand(sp, kAccessorInfoSlot * kPointerSize));
+  // r4 = AccessorInfo&
+  __ addi(r4, sp, Operand(kAccessorInfoSlot * kPointerSize));
+
+  const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1;
+
+  ExternalReference thunk_ref =
+      ExternalReference::invoke_accessor_getter_callback(isolate());
+  __ CallApiFunctionAndReturn(api_function_address, thunk_ref,
+                              kStackUnwindSpace,
+                              MemOperand(fp, 6 * kPointerSize), NULL);
+}
+
+
+#undef __
+}
+}  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_PPC
diff --git a/deps/v8/src/ppc/code-stubs-ppc.h b/deps/v8/src/ppc/code-stubs-ppc.h
new file mode 100644 (file)
index 0000000..a9d06fb
--- /dev/null
@@ -0,0 +1,325 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_PPC_CODE_STUBS_PPC_H_
+#define V8_PPC_CODE_STUBS_PPC_H_
+
+namespace v8 {
+namespace internal {
+
+
+void ArrayNativeCode(MacroAssembler* masm, Label* call_generic_code);
+
+
+class StringHelper : public AllStatic {
+ public:
+  // Generate code for copying a large number of characters. This function
+  // is allowed to spend extra time setting up conditions to make copying
+  // faster. Copying of overlapping regions is not supported.
+  // Dest register ends at the position after the last character written.
+  static void GenerateCopyCharacters(MacroAssembler* masm, Register dest,
+                                     Register src, Register count,
+                                     Register scratch,
+                                     String::Encoding encoding);
+
+  // Compares two flat one-byte strings and returns result in r0.
+  static void GenerateCompareFlatOneByteStrings(MacroAssembler* masm,
+                                                Register left, Register right,
+                                                Register scratch1,
+                                                Register scratch2,
+                                                Register scratch3);
+
+  // Compares two flat one-byte strings for equality and returns result in r0.
+  static void GenerateFlatOneByteStringEquals(MacroAssembler* masm,
+                                              Register left, Register right,
+                                              Register scratch1,
+                                              Register scratch2);
+
+ private:
+  static void GenerateOneByteCharsCompareLoop(MacroAssembler* masm,
+                                              Register left, Register right,
+                                              Register length,
+                                              Register scratch1,
+                                              Label* chars_not_equal);
+
+  DISALLOW_IMPLICIT_CONSTRUCTORS(StringHelper);
+};
+
+
+class StoreRegistersStateStub : public PlatformCodeStub {
+ public:
+  explicit StoreRegistersStateStub(Isolate* isolate)
+      : PlatformCodeStub(isolate) {}
+
+  static void GenerateAheadOfTime(Isolate* isolate);
+
+ private:
+  DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+  DEFINE_PLATFORM_CODE_STUB(StoreRegistersState, PlatformCodeStub);
+};
+
+
+class RestoreRegistersStateStub : public PlatformCodeStub {
+ public:
+  explicit RestoreRegistersStateStub(Isolate* isolate)
+      : PlatformCodeStub(isolate) {}
+
+  static void GenerateAheadOfTime(Isolate* isolate);
+
+ private:
+  DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+  DEFINE_PLATFORM_CODE_STUB(RestoreRegistersState, PlatformCodeStub);
+};
+
+
+class RecordWriteStub : public PlatformCodeStub {
+ public:
+  RecordWriteStub(Isolate* isolate, Register object, Register value,
+                  Register address, RememberedSetAction remembered_set_action,
+                  SaveFPRegsMode fp_mode)
+      : PlatformCodeStub(isolate),
+        regs_(object,   // An input reg.
+              address,  // An input reg.
+              value) {  // One scratch reg.
+    minor_key_ = ObjectBits::encode(object.code()) |
+                 ValueBits::encode(value.code()) |
+                 AddressBits::encode(address.code()) |
+                 RememberedSetActionBits::encode(remembered_set_action) |
+                 SaveFPRegsModeBits::encode(fp_mode);
+  }
+
+  RecordWriteStub(uint32_t key, Isolate* isolate)
+      : PlatformCodeStub(key, isolate), regs_(object(), address(), value()) {}
+
+  enum Mode { STORE_BUFFER_ONLY, INCREMENTAL, INCREMENTAL_COMPACTION };
+
+  virtual bool SometimesSetsUpAFrame() { return false; }
+
+  static void PatchBranchIntoNop(MacroAssembler* masm, int pos) {
+    // Consider adding DCHECK here to catch bad patching
+    masm->instr_at_put(pos, (masm->instr_at(pos) & ~kBOfieldMask) | BT);
+  }
+
+  static void PatchNopIntoBranch(MacroAssembler* masm, int pos) {
+    // Consider adding DCHECK here to catch bad patching
+    masm->instr_at_put(pos, (masm->instr_at(pos) & ~kBOfieldMask) | BF);
+  }
+
+  static Mode GetMode(Code* stub) {
+    Instr first_instruction =
+        Assembler::instr_at(stub->instruction_start() + Assembler::kInstrSize);
+    Instr second_instruction = Assembler::instr_at(stub->instruction_start() +
+                                                   (Assembler::kInstrSize * 2));
+
+    // Consider adding DCHECK here to catch unexpected instruction sequence
+    if (BF == (first_instruction & kBOfieldMask)) {
+      return INCREMENTAL;
+    }
+
+    if (BF == (second_instruction & kBOfieldMask)) {
+      return INCREMENTAL_COMPACTION;
+    }
+
+    return STORE_BUFFER_ONLY;
+  }
+
+  static void Patch(Code* stub, Mode mode) {
+    MacroAssembler masm(NULL, stub->instruction_start(),
+                        stub->instruction_size());
+    switch (mode) {
+      case STORE_BUFFER_ONLY:
+        DCHECK(GetMode(stub) == INCREMENTAL ||
+               GetMode(stub) == INCREMENTAL_COMPACTION);
+
+        PatchBranchIntoNop(&masm, Assembler::kInstrSize);
+        PatchBranchIntoNop(&masm, Assembler::kInstrSize * 2);
+        break;
+      case INCREMENTAL:
+        DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
+        PatchNopIntoBranch(&masm, Assembler::kInstrSize);
+        break;
+      case INCREMENTAL_COMPACTION:
+        DCHECK(GetMode(stub) == STORE_BUFFER_ONLY);
+        PatchNopIntoBranch(&masm, Assembler::kInstrSize * 2);
+        break;
+    }
+    DCHECK(GetMode(stub) == mode);
+    CpuFeatures::FlushICache(stub->instruction_start() + Assembler::kInstrSize,
+                             2 * Assembler::kInstrSize);
+  }
+
+  DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+
+ private:
+  // This is a helper class for freeing up 3 scratch registers.  The input is
+  // two registers that must be preserved and one scratch register provided by
+  // the caller.
+  class RegisterAllocation {
+   public:
+    RegisterAllocation(Register object, Register address, Register scratch0)
+        : object_(object), address_(address), scratch0_(scratch0) {
+      DCHECK(!AreAliased(scratch0, object, address, no_reg));
+      scratch1_ = GetRegisterThatIsNotOneOf(object_, address_, scratch0_);
+    }
+
+    void Save(MacroAssembler* masm) {
+      DCHECK(!AreAliased(object_, address_, scratch1_, scratch0_));
+      // We don't have to save scratch0_ because it was given to us as
+      // a scratch register.
+      masm->push(scratch1_);
+    }
+
+    void Restore(MacroAssembler* masm) { masm->pop(scratch1_); }
+
+    // If we have to call into C then we need to save and restore all caller-
+    // saved registers that were not already preserved.  The scratch registers
+    // will be restored by other means so we don't bother pushing them here.
+    void SaveCallerSaveRegisters(MacroAssembler* masm, SaveFPRegsMode mode) {
+      masm->mflr(r0);
+      masm->push(r0);
+      masm->MultiPush(kJSCallerSaved & ~scratch1_.bit());
+      if (mode == kSaveFPRegs) {
+        // Save all volatile FP registers except d0.
+        masm->SaveFPRegs(sp, 1, DoubleRegister::kNumVolatileRegisters - 1);
+      }
+    }
+
+    inline void RestoreCallerSaveRegisters(MacroAssembler* masm,
+                                           SaveFPRegsMode mode) {
+      if (mode == kSaveFPRegs) {
+        // Restore all volatile FP registers except d0.
+        masm->RestoreFPRegs(sp, 1, DoubleRegister::kNumVolatileRegisters - 1);
+      }
+      masm->MultiPop(kJSCallerSaved & ~scratch1_.bit());
+      masm->pop(r0);
+      masm->mtlr(r0);
+    }
+
+    inline Register object() { return object_; }
+    inline Register address() { return address_; }
+    inline Register scratch0() { return scratch0_; }
+    inline Register scratch1() { return scratch1_; }
+
+   private:
+    Register object_;
+    Register address_;
+    Register scratch0_;
+    Register scratch1_;
+
+    friend class RecordWriteStub;
+  };
+
+  enum OnNoNeedToInformIncrementalMarker {
+    kReturnOnNoNeedToInformIncrementalMarker,
+    kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
+  };
+
+  inline Major MajorKey() const FINAL { return RecordWrite; }
+
+  void Generate(MacroAssembler* masm) OVERRIDE;
+  void GenerateIncremental(MacroAssembler* masm, Mode mode);
+  void CheckNeedsToInformIncrementalMarker(
+      MacroAssembler* masm, OnNoNeedToInformIncrementalMarker on_no_need,
+      Mode mode);
+  void InformIncrementalMarker(MacroAssembler* masm);
+
+  void Activate(Code* code) {
+    code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
+  }
+
+  Register object() const {
+    return Register::from_code(ObjectBits::decode(minor_key_));
+  }
+
+  Register value() const {
+    return Register::from_code(ValueBits::decode(minor_key_));
+  }
+
+  Register address() const {
+    return Register::from_code(AddressBits::decode(minor_key_));
+  }
+
+  RememberedSetAction remembered_set_action() const {
+    return RememberedSetActionBits::decode(minor_key_);
+  }
+
+  SaveFPRegsMode save_fp_regs_mode() const {
+    return SaveFPRegsModeBits::decode(minor_key_);
+  }
+
+  class ObjectBits : public BitField<int, 0, 5> {};
+  class ValueBits : public BitField<int, 5, 5> {};
+  class AddressBits : public BitField<int, 10, 5> {};
+  class RememberedSetActionBits : public BitField<RememberedSetAction, 15, 1> {
+  };
+  class SaveFPRegsModeBits : public BitField<SaveFPRegsMode, 16, 1> {};
+
+  Label slow_;
+  RegisterAllocation regs_;
+
+  DISALLOW_COPY_AND_ASSIGN(RecordWriteStub);
+};
+
+
+// Trampoline stub to call into native code. To call safely into native code
+// in the presence of compacting GC (which can move code objects) we need to
+// keep the code which called into native pinned in the memory. Currently the
+// simplest approach is to generate such stub early enough so it can never be
+// moved by GC
+class DirectCEntryStub : public PlatformCodeStub {
+ public:
+  explicit DirectCEntryStub(Isolate* isolate) : PlatformCodeStub(isolate) {}
+  void GenerateCall(MacroAssembler* masm, Register target);
+
+ private:
+  bool NeedsImmovableCode() { return true; }
+
+  DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+  DEFINE_PLATFORM_CODE_STUB(DirectCEntry, PlatformCodeStub);
+};
+
+
+class NameDictionaryLookupStub : public PlatformCodeStub {
+ public:
+  enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
+
+  NameDictionaryLookupStub(Isolate* isolate, LookupMode mode)
+      : PlatformCodeStub(isolate) {
+    minor_key_ = LookupModeBits::encode(mode);
+  }
+
+  static void GenerateNegativeLookup(MacroAssembler* masm, Label* miss,
+                                     Label* done, Register receiver,
+                                     Register properties, Handle<Name> name,
+                                     Register scratch0);
+
+  static void GeneratePositiveLookup(MacroAssembler* masm, Label* miss,
+                                     Label* done, Register elements,
+                                     Register name, Register r0, Register r1);
+
+  virtual bool SometimesSetsUpAFrame() { return false; }
+
+ private:
+  static const int kInlinedProbes = 4;
+  static const int kTotalProbes = 20;
+
+  static const int kCapacityOffset =
+      NameDictionary::kHeaderSize +
+      NameDictionary::kCapacityIndex * kPointerSize;
+
+  static const int kElementsStartOffset =
+      NameDictionary::kHeaderSize +
+      NameDictionary::kElementsStartIndex * kPointerSize;
+
+  LookupMode mode() const { return LookupModeBits::decode(minor_key_); }
+
+  class LookupModeBits : public BitField<LookupMode, 0, 1> {};
+
+  DEFINE_NULL_CALL_INTERFACE_DESCRIPTOR();
+  DEFINE_PLATFORM_CODE_STUB(NameDictionaryLookup, PlatformCodeStub);
+};
+}
+}  // namespace v8::internal
+
+#endif  // V8_PPC_CODE_STUBS_PPC_H_
diff --git a/deps/v8/src/ppc/codegen-ppc.cc b/deps/v8/src/ppc/codegen-ppc.cc
new file mode 100644 (file)
index 0000000..1074e87
--- /dev/null
@@ -0,0 +1,700 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_PPC
+
+#include "src/codegen.h"
+#include "src/macro-assembler.h"
+#include "src/ppc/simulator-ppc.h"
+
+namespace v8 {
+namespace internal {
+
+
+#define __ masm.
+
+
+#if defined(USE_SIMULATOR)
+byte* fast_exp_ppc_machine_code = NULL;
+double fast_exp_simulator(double x) {
+  return Simulator::current(Isolate::Current())
+      ->CallFPReturnsDouble(fast_exp_ppc_machine_code, x, 0);
+}
+#endif
+
+
+UnaryMathFunction CreateExpFunction() {
+  if (!FLAG_fast_math) return &std::exp;
+  size_t actual_size;
+  byte* buffer =
+      static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
+  if (buffer == NULL) return &std::exp;
+  ExternalReference::InitializeMathExpData();
+
+  MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+
+  {
+    DoubleRegister input = d1;
+    DoubleRegister result = d2;
+    DoubleRegister double_scratch1 = d3;
+    DoubleRegister double_scratch2 = d4;
+    Register temp1 = r7;
+    Register temp2 = r8;
+    Register temp3 = r9;
+
+// Called from C
+#if ABI_USES_FUNCTION_DESCRIPTORS
+    __ function_descriptor();
+#endif
+
+    __ Push(temp3, temp2, temp1);
+    MathExpGenerator::EmitMathExp(&masm, input, result, double_scratch1,
+                                  double_scratch2, temp1, temp2, temp3);
+    __ Pop(temp3, temp2, temp1);
+    __ fmr(d1, result);
+    __ Ret();
+  }
+
+  CodeDesc desc;
+  masm.GetCode(&desc);
+#if !ABI_USES_FUNCTION_DESCRIPTORS
+  DCHECK(!RelocInfo::RequiresRelocation(desc));
+#endif
+
+  CpuFeatures::FlushICache(buffer, actual_size);
+  base::OS::ProtectCode(buffer, actual_size);
+
+#if !defined(USE_SIMULATOR)
+  return FUNCTION_CAST<UnaryMathFunction>(buffer);
+#else
+  fast_exp_ppc_machine_code = buffer;
+  return &fast_exp_simulator;
+#endif
+}
+
+
+UnaryMathFunction CreateSqrtFunction() {
+#if defined(USE_SIMULATOR)
+  return &std::sqrt;
+#else
+  size_t actual_size;
+  byte* buffer =
+      static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
+  if (buffer == NULL) return &std::sqrt;
+
+  MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
+
+// Called from C
+#if ABI_USES_FUNCTION_DESCRIPTORS
+  __ function_descriptor();
+#endif
+
+  __ MovFromFloatParameter(d1);
+  __ fsqrt(d1, d1);
+  __ MovToFloatResult(d1);
+  __ Ret();
+
+  CodeDesc desc;
+  masm.GetCode(&desc);
+#if !ABI_USES_FUNCTION_DESCRIPTORS
+  DCHECK(!RelocInfo::RequiresRelocation(desc));
+#endif
+
+  CpuFeatures::FlushICache(buffer, actual_size);
+  base::OS::ProtectCode(buffer, actual_size);
+  return FUNCTION_CAST<UnaryMathFunction>(buffer);
+#endif
+}
+
+#undef __
+
+
+// -------------------------------------------------------------------------
+// Platform-specific RuntimeCallHelper functions.
+
+void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
+  masm->EnterFrame(StackFrame::INTERNAL);
+  DCHECK(!masm->has_frame());
+  masm->set_has_frame(true);
+}
+
+
+void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
+  masm->LeaveFrame(StackFrame::INTERNAL);
+  DCHECK(masm->has_frame());
+  masm->set_has_frame(false);
+}
+
+
+// -------------------------------------------------------------------------
+// Code generators
+
+#define __ ACCESS_MASM(masm)
+
+void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
+    MacroAssembler* masm, Register receiver, Register key, Register value,
+    Register target_map, AllocationSiteMode mode,
+    Label* allocation_memento_found) {
+  Register scratch_elements = r7;
+  DCHECK(!AreAliased(receiver, key, value, target_map, scratch_elements));
+
+  if (mode == TRACK_ALLOCATION_SITE) {
+    DCHECK(allocation_memento_found != NULL);
+    __ JumpIfJSArrayHasAllocationMemento(receiver, scratch_elements,
+                                         allocation_memento_found);
+  }
+
+  // Set transitioned map.
+  __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset), r0);
+  __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, r11,
+                      kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+}
+
+
+void ElementsTransitionGenerator::GenerateSmiToDouble(
+    MacroAssembler* masm, Register receiver, Register key, Register value,
+    Register target_map, AllocationSiteMode mode, Label* fail) {
+  // lr contains the return address
+  Label loop, entry, convert_hole, gc_required, only_change_map, done;
+  Register elements = r7;
+  Register length = r8;
+  Register array = r9;
+  Register array_end = array;
+
+  // target_map parameter can be clobbered.
+  Register scratch1 = target_map;
+  Register scratch2 = r11;
+
+  // Verify input registers don't conflict with locals.
+  DCHECK(!AreAliased(receiver, key, value, target_map, elements, length, array,
+                     scratch2));
+
+  if (mode == TRACK_ALLOCATION_SITE) {
+    __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
+  }
+
+  // Check for empty arrays, which only require a map transition and no changes
+  // to the backing store.
+  __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
+  __ beq(&only_change_map);
+
+  // Preserve lr and use r17 as a temporary register.
+  __ mflr(r0);
+  __ Push(r0);
+
+  __ LoadP(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
+  // length: number of elements (smi-tagged)
+
+  // Allocate new FixedDoubleArray.
+  __ SmiToDoubleArrayOffset(r17, length);
+  __ addi(r17, r17, Operand(FixedDoubleArray::kHeaderSize));
+  __ Allocate(r17, array, r10, scratch2, &gc_required, DOUBLE_ALIGNMENT);
+
+  // Set destination FixedDoubleArray's length and map.
+  __ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
+  __ StoreP(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
+  // Update receiver's map.
+  __ StoreP(scratch2, MemOperand(array, HeapObject::kMapOffset));
+
+  __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset), r0);
+  __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch2,
+                      kLRHasBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+  // Replace receiver's backing store with newly created FixedDoubleArray.
+  __ addi(scratch1, array, Operand(kHeapObjectTag));
+  __ StoreP(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset), r0);
+  __ RecordWriteField(receiver, JSObject::kElementsOffset, scratch1, scratch2,
+                      kLRHasBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+
+  // Prepare for conversion loop.
+  __ addi(target_map, elements,
+          Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ addi(r10, array, Operand(FixedDoubleArray::kHeaderSize));
+  __ SmiToDoubleArrayOffset(array, length);
+  __ add(array_end, r10, array);
+// Repurpose registers no longer in use.
+#if V8_TARGET_ARCH_PPC64
+  Register hole_int64 = elements;
+#else
+  Register hole_lower = elements;
+  Register hole_upper = length;
+#endif
+  // scratch1: begin of source FixedArray element fields, not tagged
+  // hole_lower: kHoleNanLower32 OR hol_int64
+  // hole_upper: kHoleNanUpper32
+  // array_end: end of destination FixedDoubleArray, not tagged
+  // scratch2: begin of FixedDoubleArray element fields, not tagged
+
+  __ b(&entry);
+
+  __ bind(&only_change_map);
+  __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset), r0);
+  __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch2,
+                      kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+  __ b(&done);
+
+  // Call into runtime if GC is required.
+  __ bind(&gc_required);
+  __ Pop(r0);
+  __ mtlr(r0);
+  __ b(fail);
+
+  // Convert and copy elements.
+  __ bind(&loop);
+  __ LoadP(r11, MemOperand(scratch1));
+  __ addi(scratch1, scratch1, Operand(kPointerSize));
+  // r11: current element
+  __ UntagAndJumpIfNotSmi(r11, r11, &convert_hole);
+
+  // Normal smi, convert to double and store.
+  __ ConvertIntToDouble(r11, d0);
+  __ stfd(d0, MemOperand(scratch2, 0));
+  __ addi(r10, r10, Operand(8));
+
+  __ b(&entry);
+
+  // Hole found, store the-hole NaN.
+  __ bind(&convert_hole);
+  if (FLAG_debug_code) {
+    // Restore a "smi-untagged" heap object.
+    __ LoadP(r11, MemOperand(r6, -kPointerSize));
+    __ CompareRoot(r11, Heap::kTheHoleValueRootIndex);
+    __ Assert(eq, kObjectFoundInSmiOnlyArray);
+  }
+#if V8_TARGET_ARCH_PPC64
+  __ std(hole_int64, MemOperand(r10, 0));
+#else
+  __ stw(hole_upper, MemOperand(r10, Register::kExponentOffset));
+  __ stw(hole_lower, MemOperand(r10, Register::kMantissaOffset));
+#endif
+  __ addi(r10, r10, Operand(8));
+
+  __ bind(&entry);
+  __ cmp(r10, array_end);
+  __ blt(&loop);
+
+  __ Pop(r0);
+  __ mtlr(r0);
+  __ bind(&done);
+}
+
+
+void ElementsTransitionGenerator::GenerateDoubleToObject(
+    MacroAssembler* masm, Register receiver, Register key, Register value,
+    Register target_map, AllocationSiteMode mode, Label* fail) {
+  // Register lr contains the return address.
+  Label entry, loop, convert_hole, gc_required, only_change_map;
+  Register elements = r7;
+  Register array = r9;
+  Register length = r8;
+  Register scratch = r11;
+
+  // Verify input registers don't conflict with locals.
+  DCHECK(!AreAliased(receiver, key, value, target_map, elements, array, length,
+                     scratch));
+
+  if (mode == TRACK_ALLOCATION_SITE) {
+    __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
+  }
+
+  // Check for empty arrays, which only require a map transition and no changes
+  // to the backing store.
+  __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
+  __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
+  __ beq(&only_change_map);
+
+  __ Push(target_map, receiver, key, value);
+  __ LoadP(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
+  // elements: source FixedDoubleArray
+  // length: number of elements (smi-tagged)
+
+  // Allocate new FixedArray.
+  // Re-use value and target_map registers, as they have been saved on the
+  // stack.
+  Register array_size = value;
+  Register allocate_scratch = target_map;
+  __ li(array_size, Operand(FixedDoubleArray::kHeaderSize));
+  __ SmiToPtrArrayOffset(r0, length);
+  __ add(array_size, array_size, r0);
+  __ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
+              NO_ALLOCATION_FLAGS);
+  // array: destination FixedArray, not tagged as heap object
+  // Set destination FixedDoubleArray's length and map.
+  __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
+  __ StoreP(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
+  __ StoreP(scratch, MemOperand(array, HeapObject::kMapOffset));
+  __ addi(array, array, Operand(kHeapObjectTag));
+
+  // Prepare for conversion loop.
+  Register src_elements = elements;
+  Register dst_elements = target_map;
+  Register dst_end = length;
+  Register heap_number_map = scratch;
+  __ addi(src_elements, elements,
+          Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
+  __ SmiToPtrArrayOffset(length, length);
+  __ LoadRoot(r10, Heap::kTheHoleValueRootIndex);
+
+  Label initialization_loop, loop_done;
+  __ ShiftRightImm(r0, length, Operand(kPointerSizeLog2), SetRC);
+  __ beq(&loop_done, cr0);
+
+  // Allocating heap numbers in the loop below can fail and cause a jump to
+  // gc_required. We can't leave a partly initialized FixedArray behind,
+  // so pessimistically fill it with holes now.
+  __ mtctr(r0);
+  __ addi(dst_elements, array,
+          Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
+  __ bind(&initialization_loop);
+  __ StorePU(r10, MemOperand(dst_elements, kPointerSize));
+  __ bdnz(&initialization_loop);
+
+  __ addi(dst_elements, array,
+          Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ add(dst_end, dst_elements, length);
+  __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+  // Using offsetted addresses in src_elements to fully take advantage of
+  // post-indexing.
+  // dst_elements: begin of destination FixedArray element fields, not tagged
+  // src_elements: begin of source FixedDoubleArray element fields,
+  //               not tagged, +4
+  // dst_end: end of destination FixedArray, not tagged
+  // array: destination FixedArray
+  // r10: the-hole pointer
+  // heap_number_map: heap number map
+  __ b(&loop);
+
+  // Call into runtime if GC is required.
+  __ bind(&gc_required);
+  __ Pop(target_map, receiver, key, value);
+  __ b(fail);
+
+  // Replace the-hole NaN with the-hole pointer.
+  __ bind(&convert_hole);
+  __ StoreP(r10, MemOperand(dst_elements));
+  __ addi(dst_elements, dst_elements, Operand(kPointerSize));
+  __ cmpl(dst_elements, dst_end);
+  __ bge(&loop_done);
+
+  __ bind(&loop);
+  Register upper_bits = key;
+  __ lwz(upper_bits, MemOperand(src_elements, Register::kExponentOffset));
+  __ addi(src_elements, src_elements, Operand(kDoubleSize));
+  // upper_bits: current element's upper 32 bit
+  // src_elements: address of next element's upper 32 bit
+  __ Cmpi(upper_bits, Operand(kHoleNanUpper32), r0);
+  __ beq(&convert_hole);
+
+  // Non-hole double, copy value into a heap number.
+  Register heap_number = receiver;
+  Register scratch2 = value;
+  __ AllocateHeapNumber(heap_number, scratch2, r11, heap_number_map,
+                        &gc_required);
+  // heap_number: new heap number
+#if V8_TARGET_ARCH_PPC64
+  __ ld(scratch2, MemOperand(src_elements, -kDoubleSize));
+  // subtract tag for std
+  __ addi(upper_bits, heap_number, Operand(-kHeapObjectTag));
+  __ std(scratch2, MemOperand(upper_bits, HeapNumber::kValueOffset));
+#else
+  __ lwz(scratch2,
+         MemOperand(src_elements, Register::kMantissaOffset - kDoubleSize));
+  __ lwz(upper_bits,
+         MemOperand(src_elements, Register::kExponentOffset - kDoubleSize));
+  __ stw(scratch2, FieldMemOperand(heap_number, HeapNumber::kMantissaOffset));
+  __ stw(upper_bits, FieldMemOperand(heap_number, HeapNumber::kExponentOffset));
+#endif
+  __ mr(scratch2, dst_elements);
+  __ StoreP(heap_number, MemOperand(dst_elements));
+  __ addi(dst_elements, dst_elements, Operand(kPointerSize));
+  __ RecordWrite(array, scratch2, heap_number, kLRHasNotBeenSaved,
+                 kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+  __ b(&entry);
+
+  // Replace the-hole NaN with the-hole pointer.
+  __ bind(&convert_hole);
+  __ StoreP(r10, MemOperand(dst_elements));
+  __ addi(dst_elements, dst_elements, Operand(kPointerSize));
+
+  __ bind(&entry);
+  __ cmpl(dst_elements, dst_end);
+  __ blt(&loop);
+  __ bind(&loop_done);
+
+  __ Pop(target_map, receiver, key, value);
+  // Replace receiver's backing store with newly created and filled FixedArray.
+  __ StoreP(array, FieldMemOperand(receiver, JSObject::kElementsOffset), r0);
+  __ RecordWriteField(receiver, JSObject::kElementsOffset, array, scratch,
+                      kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+
+  __ bind(&only_change_map);
+  // Update receiver's map.
+  __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset), r0);
+  __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch,
+                      kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+}
+
+
+// assume ip can be used as a scratch register below
+void StringCharLoadGenerator::Generate(MacroAssembler* masm, Register string,
+                                       Register index, Register result,
+                                       Label* call_runtime) {
+  // Fetch the instance type of the receiver into result register.
+  __ LoadP(result, FieldMemOperand(string, HeapObject::kMapOffset));
+  __ lbz(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+
+  // We need special handling for indirect strings.
+  Label check_sequential;
+  __ andi(r0, result, Operand(kIsIndirectStringMask));
+  __ beq(&check_sequential, cr0);
+
+  // Dispatch on the indirect string shape: slice or cons.
+  Label cons_string;
+  __ mov(ip, Operand(kSlicedNotConsMask));
+  __ and_(r0, result, ip, SetRC);
+  __ beq(&cons_string, cr0);
+
+  // Handle slices.
+  Label indirect_string_loaded;
+  __ LoadP(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
+  __ LoadP(string, FieldMemOperand(string, SlicedString::kParentOffset));
+  __ SmiUntag(ip, result);
+  __ add(index, index, ip);
+  __ b(&indirect_string_loaded);
+
+  // Handle cons strings.
+  // Check whether the right hand side is the empty string (i.e. if
+  // this is really a flat string in a cons string). If that is not
+  // the case we would rather go to the runtime system now to flatten
+  // the string.
+  __ bind(&cons_string);
+  __ LoadP(result, FieldMemOperand(string, ConsString::kSecondOffset));
+  __ CompareRoot(result, Heap::kempty_stringRootIndex);
+  __ bne(call_runtime);
+  // Get the first of the two strings and load its instance type.
+  __ LoadP(string, FieldMemOperand(string, ConsString::kFirstOffset));
+
+  __ bind(&indirect_string_loaded);
+  __ LoadP(result, FieldMemOperand(string, HeapObject::kMapOffset));
+  __ lbz(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+
+  // Distinguish sequential and external strings. Only these two string
+  // representations can reach here (slices and flat cons strings have been
+  // reduced to the underlying sequential or external string).
+  Label external_string, check_encoding;
+  __ bind(&check_sequential);
+  STATIC_ASSERT(kSeqStringTag == 0);
+  __ andi(r0, result, Operand(kStringRepresentationMask));
+  __ bne(&external_string, cr0);
+
+  // Prepare sequential strings
+  STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
+  __ addi(string, string,
+          Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+  __ b(&check_encoding);
+
+  // Handle external strings.
+  __ bind(&external_string);
+  if (FLAG_debug_code) {
+    // Assert that we do not have a cons or slice (indirect strings) here.
+    // Sequential strings have already been ruled out.
+    __ andi(r0, result, Operand(kIsIndirectStringMask));
+    __ Assert(eq, kExternalStringExpectedButNotFound, cr0);
+  }
+  // Rule out short external strings.
+  STATIC_ASSERT(kShortExternalStringTag != 0);
+  __ andi(r0, result, Operand(kShortExternalStringMask));
+  __ bne(call_runtime, cr0);
+  __ LoadP(string,
+           FieldMemOperand(string, ExternalString::kResourceDataOffset));
+
+  Label one_byte, done;
+  __ bind(&check_encoding);
+  STATIC_ASSERT(kTwoByteStringTag == 0);
+  __ andi(r0, result, Operand(kStringEncodingMask));
+  __ bne(&one_byte, cr0);
+  // Two-byte string.
+  __ ShiftLeftImm(result, index, Operand(1));
+  __ lhzx(result, MemOperand(string, result));
+  __ b(&done);
+  __ bind(&one_byte);
+  // One-byte string.
+  __ lbzx(result, MemOperand(string, index));
+  __ bind(&done);
+}
+
+
+static MemOperand ExpConstant(int index, Register base) {
+  return MemOperand(base, index * kDoubleSize);
+}
+
+
+void MathExpGenerator::EmitMathExp(MacroAssembler* masm, DoubleRegister input,
+                                   DoubleRegister result,
+                                   DoubleRegister double_scratch1,
+                                   DoubleRegister double_scratch2,
+                                   Register temp1, Register temp2,
+                                   Register temp3) {
+  DCHECK(!input.is(result));
+  DCHECK(!input.is(double_scratch1));
+  DCHECK(!input.is(double_scratch2));
+  DCHECK(!result.is(double_scratch1));
+  DCHECK(!result.is(double_scratch2));
+  DCHECK(!double_scratch1.is(double_scratch2));
+  DCHECK(!temp1.is(temp2));
+  DCHECK(!temp1.is(temp3));
+  DCHECK(!temp2.is(temp3));
+  DCHECK(ExternalReference::math_exp_constants(0).address() != NULL);
+  DCHECK(!masm->serializer_enabled());  // External references not serializable.
+
+  Label zero, infinity, done;
+
+  __ mov(temp3, Operand(ExternalReference::math_exp_constants(0)));
+
+  __ lfd(double_scratch1, ExpConstant(0, temp3));
+  __ fcmpu(double_scratch1, input);
+  __ fmr(result, input);
+  __ bunordered(&done);
+  __ bge(&zero);
+
+  __ lfd(double_scratch2, ExpConstant(1, temp3));
+  __ fcmpu(input, double_scratch2);
+  __ bge(&infinity);
+
+  __ lfd(double_scratch1, ExpConstant(3, temp3));
+  __ lfd(result, ExpConstant(4, temp3));
+  __ fmul(double_scratch1, double_scratch1, input);
+  __ fadd(double_scratch1, double_scratch1, result);
+  __ MovDoubleLowToInt(temp2, double_scratch1);
+  __ fsub(double_scratch1, double_scratch1, result);
+  __ lfd(result, ExpConstant(6, temp3));
+  __ lfd(double_scratch2, ExpConstant(5, temp3));
+  __ fmul(double_scratch1, double_scratch1, double_scratch2);
+  __ fsub(double_scratch1, double_scratch1, input);
+  __ fsub(result, result, double_scratch1);
+  __ fmul(double_scratch2, double_scratch1, double_scratch1);
+  __ fmul(result, result, double_scratch2);
+  __ lfd(double_scratch2, ExpConstant(7, temp3));
+  __ fmul(result, result, double_scratch2);
+  __ fsub(result, result, double_scratch1);
+  __ lfd(double_scratch2, ExpConstant(8, temp3));
+  __ fadd(result, result, double_scratch2);
+  __ srwi(temp1, temp2, Operand(11));
+  __ andi(temp2, temp2, Operand(0x7ff));
+  __ addi(temp1, temp1, Operand(0x3ff));
+
+  // Must not call ExpConstant() after overwriting temp3!
+  __ mov(temp3, Operand(ExternalReference::math_exp_log_table()));
+  __ slwi(temp2, temp2, Operand(3));
+#if V8_TARGET_ARCH_PPC64
+  __ ldx(temp2, MemOperand(temp3, temp2));
+  __ sldi(temp1, temp1, Operand(52));
+  __ orx(temp2, temp1, temp2);
+  __ MovInt64ToDouble(double_scratch1, temp2);
+#else
+  __ add(ip, temp3, temp2);
+  __ lwz(temp3, MemOperand(ip, Register::kExponentOffset));
+  __ lwz(temp2, MemOperand(ip, Register::kMantissaOffset));
+  __ slwi(temp1, temp1, Operand(20));
+  __ orx(temp3, temp1, temp3);
+  __ MovInt64ToDouble(double_scratch1, temp3, temp2);
+#endif
+
+  __ fmul(result, result, double_scratch1);
+  __ b(&done);
+
+  __ bind(&zero);
+  __ fmr(result, kDoubleRegZero);
+  __ b(&done);
+
+  __ bind(&infinity);
+  __ lfd(result, ExpConstant(2, temp3));
+
+  __ bind(&done);
+}
+
+#undef __
+
+CodeAgingHelper::CodeAgingHelper() {
+  DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
+  // Since patcher is a large object, allocate it dynamically when needed,
+  // to avoid overloading the stack in stress conditions.
+  // DONT_FLUSH is used because the CodeAgingHelper is initialized early in
+  // the process, before ARM simulator ICache is setup.
+  SmartPointer<CodePatcher> patcher(new CodePatcher(
+      young_sequence_.start(), young_sequence_.length() / Assembler::kInstrSize,
+      CodePatcher::DONT_FLUSH));
+  PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
+  patcher->masm()->PushFixedFrame(r4);
+  patcher->masm()->addi(fp, sp,
+                        Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+  for (int i = 0; i < kNoCodeAgeSequenceNops; i++) {
+    patcher->masm()->nop();
+  }
+}
+
+
+#ifdef DEBUG
+bool CodeAgingHelper::IsOld(byte* candidate) const {
+  return Assembler::IsNop(Assembler::instr_at(candidate));
+}
+#endif
+
+
+bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
+  bool result = isolate->code_aging_helper()->IsYoung(sequence);
+  DCHECK(result || isolate->code_aging_helper()->IsOld(sequence));
+  return result;
+}
+
+
+void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
+                               MarkingParity* parity) {
+  if (IsYoungSequence(isolate, sequence)) {
+    *age = kNoAgeCodeAge;
+    *parity = NO_MARKING_PARITY;
+  } else {
+    ConstantPoolArray* constant_pool = NULL;
+    Address target_address = Assembler::target_address_at(
+        sequence + kCodeAgingTargetDelta, constant_pool);
+    Code* stub = GetCodeFromTargetAddress(target_address);
+    GetCodeAgeAndParity(stub, age, parity);
+  }
+}
+
+
+void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence, Code::Age age,
+                                MarkingParity parity) {
+  uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
+  if (age == kNoAgeCodeAge) {
+    isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
+    CpuFeatures::FlushICache(sequence, young_length);
+  } else {
+    // FIXED_SEQUENCE
+    Code* stub = GetCodeAgeStub(isolate, age, parity);
+    CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
+    Assembler::BlockTrampolinePoolScope block_trampoline_pool(patcher.masm());
+    intptr_t target = reinterpret_cast<intptr_t>(stub->instruction_start());
+    // Don't use Call -- we need to preserve ip and lr.
+    // GenerateMakeCodeYoungAgainCommon for the stub code.
+    patcher.masm()->nop();  // marker to detect sequence (see IsOld)
+    patcher.masm()->mov(r3, Operand(target));
+    patcher.masm()->Jump(r3);
+    for (int i = 0; i < kCodeAgingSequenceNops; i++) {
+      patcher.masm()->nop();
+    }
+  }
+}
+}
+}  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_PPC
diff --git a/deps/v8/src/ppc/codegen-ppc.h b/deps/v8/src/ppc/codegen-ppc.h
new file mode 100644 (file)
index 0000000..500bf60
--- /dev/null
@@ -0,0 +1,44 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_PPC_CODEGEN_PPC_H_
+#define V8_PPC_CODEGEN_PPC_H_
+
+#include "src/ast.h"
+#include "src/macro-assembler.h"
+
+namespace v8 {
+namespace internal {
+
+
+enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
+
+
+class StringCharLoadGenerator : public AllStatic {
+ public:
+  // Generates the code for handling different string types and loading the
+  // indexed character into |result|.  We expect |index| as untagged input and
+  // |result| as untagged output.
+  static void Generate(MacroAssembler* masm, Register string, Register index,
+                       Register result, Label* call_runtime);
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(StringCharLoadGenerator);
+};
+
+class MathExpGenerator : public AllStatic {
+ public:
+  // Register input isn't modified. All other registers are clobbered.
+  static void EmitMathExp(MacroAssembler* masm, DoubleRegister input,
+                          DoubleRegister result, DoubleRegister double_scratch1,
+                          DoubleRegister double_scratch2, Register temp1,
+                          Register temp2, Register temp3);
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(MathExpGenerator);
+};
+}
+}  // namespace v8::internal
+
+#endif  // V8_PPC_CODEGEN_PPC_H_
diff --git a/deps/v8/src/ppc/constants-ppc.cc b/deps/v8/src/ppc/constants-ppc.cc
new file mode 100644 (file)
index 0000000..f32f25a
--- /dev/null
@@ -0,0 +1,91 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_PPC
+
+#include "src/ppc/constants-ppc.h"
+
+
+namespace v8 {
+namespace internal {
+
+// These register names are defined in a way to match the native disassembler
+// formatting. See for example the command "objdump -d <binary file>".
+const char* Registers::names_[kNumRegisters] = {
+    "r0",  "sp",  "r2",  "r3",  "r4",  "r5",  "r6",  "r7",  "r8",  "r9",  "r10",
+    "r11", "r12", "r13", "r14", "r15", "r16", "r17", "r18", "r19", "r20", "r21",
+    "r22", "r23", "r24", "r25", "r26", "r27", "r28", "r29", "r30", "fp"};
+
+
+// List of alias names which can be used when referring to PPC registers.
+const Registers::RegisterAlias Registers::aliases_[] = {{10, "sl"},
+                                                        {11, "r11"},
+                                                        {12, "r12"},
+                                                        {13, "r13"},
+                                                        {14, "r14"},
+                                                        {15, "r15"},
+                                                        {kNoRegister, NULL}};
+
+
+const char* Registers::Name(int reg) {
+  const char* result;
+  if ((0 <= reg) && (reg < kNumRegisters)) {
+    result = names_[reg];
+  } else {
+    result = "noreg";
+  }
+  return result;
+}
+
+
+const char* FPRegisters::names_[kNumFPRegisters] = {
+    "d0",  "d1",  "d2",  "d3",  "d4",  "d5",  "d6",  "d7",  "d8",  "d9",  "d10",
+    "d11", "d12", "d13", "d14", "d15", "d16", "d17", "d18", "d19", "d20", "d21",
+    "d22", "d23", "d24", "d25", "d26", "d27", "d28", "d29", "d30", "d31"};
+
+
+const char* FPRegisters::Name(int reg) {
+  DCHECK((0 <= reg) && (reg < kNumFPRegisters));
+  return names_[reg];
+}
+
+
+int FPRegisters::Number(const char* name) {
+  for (int i = 0; i < kNumFPRegisters; i++) {
+    if (strcmp(names_[i], name) == 0) {
+      return i;
+    }
+  }
+
+  // No register with the requested name found.
+  return kNoRegister;
+}
+
+
+int Registers::Number(const char* name) {
+  // Look through the canonical names.
+  for (int i = 0; i < kNumRegisters; i++) {
+    if (strcmp(names_[i], name) == 0) {
+      return i;
+    }
+  }
+
+  // Look through the alias names.
+  int i = 0;
+  while (aliases_[i].reg != kNoRegister) {
+    if (strcmp(aliases_[i].name, name) == 0) {
+      return aliases_[i].reg;
+    }
+    i++;
+  }
+
+  // No register with the requested name found.
+  return kNoRegister;
+}
+}
+}  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_PPC
diff --git a/deps/v8/src/ppc/constants-ppc.h b/deps/v8/src/ppc/constants-ppc.h
new file mode 100644 (file)
index 0000000..9434b8f
--- /dev/null
@@ -0,0 +1,600 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_PPC_CONSTANTS_PPC_H_
+#define V8_PPC_CONSTANTS_PPC_H_
+
+namespace v8 {
+namespace internal {
+
+// Number of registers
+const int kNumRegisters = 32;
+
+// FP support.
+const int kNumFPDoubleRegisters = 32;
+const int kNumFPRegisters = kNumFPDoubleRegisters;
+
+const int kNoRegister = -1;
+
+// sign-extend the least significant 16-bits of value <imm>
+#define SIGN_EXT_IMM16(imm) ((static_cast<int>(imm) << 16) >> 16)
+
+// sign-extend the least significant 26-bits of value <imm>
+#define SIGN_EXT_IMM26(imm) ((static_cast<int>(imm) << 6) >> 6)
+
+// -----------------------------------------------------------------------------
+// Conditions.
+
+// Defines constants and accessor classes to assemble, disassemble and
+// simulate PPC instructions.
+//
+// Section references in the code refer to the "PowerPC Microprocessor
+// Family: The Programmer.s Reference Guide" from 10/95
+// https://www-01.ibm.com/chips/techlib/techlib.nsf/techdocs/852569B20050FF778525699600741775/$file/prg.pdf
+//
+
+// Constants for specific fields are defined in their respective named enums.
+// General constants are in an anonymous enum in class Instr.
+enum Condition {
+  kNoCondition = -1,
+  eq = 0,         // Equal.
+  ne = 1,         // Not equal.
+  ge = 2,         // Greater or equal.
+  lt = 3,         // Less than.
+  gt = 4,         // Greater than.
+  le = 5,         // Less then or equal
+  unordered = 6,  // Floating-point unordered
+  ordered = 7,
+  overflow = 8,  // Summary overflow
+  nooverflow = 9,
+  al = 10  // Always.
+};
+
+
+inline Condition NegateCondition(Condition cond) {
+  DCHECK(cond != al);
+  return static_cast<Condition>(cond ^ ne);
+}
+
+
+// Commute a condition such that {a cond b == b cond' a}.
+inline Condition CommuteCondition(Condition cond) {
+  switch (cond) {
+    case lt:
+      return gt;
+    case gt:
+      return lt;
+    case ge:
+      return le;
+    case le:
+      return ge;
+    default:
+      return cond;
+  }
+}
+
+// -----------------------------------------------------------------------------
+// Instructions encoding.
+
+// Instr is merely used by the Assembler to distinguish 32bit integers
+// representing instructions from usual 32 bit values.
+// Instruction objects are pointers to 32bit values, and provide methods to
+// access the various ISA fields.
+typedef int32_t Instr;
+
+// Opcodes as defined in section 4.2 table 34 (32bit PowerPC)
+enum Opcode {
+  TWI = 3 << 26,       // Trap Word Immediate
+  MULLI = 7 << 26,     // Multiply Low Immediate
+  SUBFIC = 8 << 26,    // Subtract from Immediate Carrying
+  CMPLI = 10 << 26,    // Compare Logical Immediate
+  CMPI = 11 << 26,     // Compare Immediate
+  ADDIC = 12 << 26,    // Add Immediate Carrying
+  ADDICx = 13 << 26,   // Add Immediate Carrying and Record
+  ADDI = 14 << 26,     // Add Immediate
+  ADDIS = 15 << 26,    // Add Immediate Shifted
+  BCX = 16 << 26,      // Branch Conditional
+  SC = 17 << 26,       // System Call
+  BX = 18 << 26,       // Branch
+  EXT1 = 19 << 26,     // Extended code set 1
+  RLWIMIX = 20 << 26,  // Rotate Left Word Immediate then Mask Insert
+  RLWINMX = 21 << 26,  // Rotate Left Word Immediate then AND with Mask
+  RLWNMX = 23 << 26,   // Rotate Left Word then AND with Mask
+  ORI = 24 << 26,      // OR Immediate
+  ORIS = 25 << 26,     // OR Immediate Shifted
+  XORI = 26 << 26,     // XOR Immediate
+  XORIS = 27 << 26,    // XOR Immediate Shifted
+  ANDIx = 28 << 26,    // AND Immediate
+  ANDISx = 29 << 26,   // AND Immediate Shifted
+  EXT5 = 30 << 26,     // Extended code set 5 - 64bit only
+  EXT2 = 31 << 26,     // Extended code set 2
+  LWZ = 32 << 26,      // Load Word and Zero
+  LWZU = 33 << 26,     // Load Word with Zero Update
+  LBZ = 34 << 26,      // Load Byte and Zero
+  LBZU = 35 << 26,     // Load Byte and Zero with Update
+  STW = 36 << 26,      // Store
+  STWU = 37 << 26,     // Store Word with Update
+  STB = 38 << 26,      // Store Byte
+  STBU = 39 << 26,     // Store Byte with Update
+  LHZ = 40 << 26,      // Load Half and Zero
+  LHZU = 41 << 26,     // Load Half and Zero with Update
+  LHA = 42 << 26,      // Load Half Algebraic
+  LHAU = 43 << 26,     // Load Half Algebraic with Update
+  STH = 44 << 26,      // Store Half
+  STHU = 45 << 26,     // Store Half with Update
+  LMW = 46 << 26,      // Load Multiple Word
+  STMW = 47 << 26,     // Store Multiple Word
+  LFS = 48 << 26,      // Load Floating-Point Single
+  LFSU = 49 << 26,     // Load Floating-Point Single with Update
+  LFD = 50 << 26,      // Load Floating-Point Double
+  LFDU = 51 << 26,     // Load Floating-Point Double with Update
+  STFS = 52 << 26,     // Store Floating-Point Single
+  STFSU = 53 << 26,    // Store Floating-Point Single with Update
+  STFD = 54 << 26,     // Store Floating-Point Double
+  STFDU = 55 << 26,    // Store Floating-Point Double with Update
+  LD = 58 << 26,       // Load Double Word
+  EXT3 = 59 << 26,     // Extended code set 3
+  STD = 62 << 26,      // Store Double Word (optionally with Update)
+  EXT4 = 63 << 26      // Extended code set 4
+};
+
+// Bits 10-1
+enum OpcodeExt1 {
+  MCRF = 0 << 1,      // Move Condition Register Field
+  BCLRX = 16 << 1,    // Branch Conditional Link Register
+  CRNOR = 33 << 1,    // Condition Register NOR)
+  RFI = 50 << 1,      // Return from Interrupt
+  CRANDC = 129 << 1,  // Condition Register AND with Complement
+  ISYNC = 150 << 1,   // Instruction Synchronize
+  CRXOR = 193 << 1,   // Condition Register XOR
+  CRNAND = 225 << 1,  // Condition Register NAND
+  CRAND = 257 << 1,   // Condition Register AND
+  CREQV = 289 << 1,   // Condition Register Equivalent
+  CRORC = 417 << 1,   // Condition Register OR with Complement
+  CROR = 449 << 1,    // Condition Register OR
+  BCCTRX = 528 << 1   // Branch Conditional to Count Register
+};
+
+// Bits 9-1 or 10-1
+enum OpcodeExt2 {
+  CMP = 0 << 1,
+  TW = 4 << 1,
+  SUBFCX = 8 << 1,
+  ADDCX = 10 << 1,
+  MULHWUX = 11 << 1,
+  MFCR = 19 << 1,
+  LWARX = 20 << 1,
+  LDX = 21 << 1,
+  LWZX = 23 << 1,  // load word zero w/ x-form
+  SLWX = 24 << 1,
+  CNTLZWX = 26 << 1,
+  SLDX = 27 << 1,
+  ANDX = 28 << 1,
+  CMPL = 32 << 1,
+  SUBFX = 40 << 1,
+  MFVSRD = 51 << 1,  // Move From VSR Doubleword
+  LDUX = 53 << 1,
+  DCBST = 54 << 1,
+  LWZUX = 55 << 1,  // load word zero w/ update x-form
+  CNTLZDX = 58 << 1,
+  ANDCX = 60 << 1,
+  MULHWX = 75 << 1,
+  DCBF = 86 << 1,
+  LBZX = 87 << 1,  // load byte zero w/ x-form
+  NEGX = 104 << 1,
+  MFVSRWZ = 115 << 1,  // Move From VSR Word And Zero
+  LBZUX = 119 << 1,    // load byte zero w/ update x-form
+  NORX = 124 << 1,
+  SUBFEX = 136 << 1,
+  ADDEX = 138 << 1,
+  STDX = 149 << 1,
+  STWX = 151 << 1,    // store word w/ x-form
+  MTVSRD = 179 << 1,  // Move To VSR Doubleword
+  STDUX = 181 << 1,
+  STWUX = 183 << 1,  // store word w/ update x-form
+                     /*
+    MTCRF
+    MTMSR
+    STWCXx
+    SUBFZEX
+  */
+  ADDZEX = 202 << 1,  // Add to Zero Extended
+                      /*
+    MTSR
+  */
+  MTVSRWA = 211 << 1,  // Move To VSR Word Algebraic
+  STBX = 215 << 1,     // store byte w/ x-form
+  MULLD = 233 << 1,    // Multiply Low Double Word
+  MULLW = 235 << 1,    // Multiply Low Word
+  MTVSRWZ = 243 << 1,  // Move To VSR Word And Zero
+  STBUX = 247 << 1,    // store byte w/ update x-form
+  ADDX = 266 << 1,     // Add
+  LHZX = 279 << 1,     // load half-word zero w/ x-form
+  LHZUX = 311 << 1,    // load half-word zero w/ update x-form
+  LHAX = 343 << 1,     // load half-word algebraic w/ x-form
+  LHAUX = 375 << 1,    // load half-word algebraic w/ update x-form
+  XORX = 316 << 1,     // Exclusive OR
+  MFSPR = 339 << 1,    // Move from Special-Purpose-Register
+  STHX = 407 << 1,     // store half-word w/ x-form
+  STHUX = 439 << 1,    // store half-word w/ update x-form
+  ORX = 444 << 1,      // Or
+  MTSPR = 467 << 1,    // Move to Special-Purpose-Register
+  DIVD = 489 << 1,     // Divide Double Word
+  DIVW = 491 << 1,     // Divide Word
+
+  // Below represent bits 10-1  (any value >= 512)
+  LFSX = 535 << 1,    // load float-single w/ x-form
+  SRWX = 536 << 1,    // Shift Right Word
+  SRDX = 539 << 1,    // Shift Right Double Word
+  LFSUX = 567 << 1,   // load float-single w/ update x-form
+  SYNC = 598 << 1,    // Synchronize
+  LFDX = 599 << 1,    // load float-double w/ x-form
+  LFDUX = 631 << 1,   // load float-double w/ update X-form
+  STFSX = 663 << 1,   // store float-single w/ x-form
+  STFSUX = 695 << 1,  // store float-single w/ update x-form
+  STFDX = 727 << 1,   // store float-double w/ x-form
+  STFDUX = 759 << 1,  // store float-double w/ update x-form
+  SRAW = 792 << 1,    // Shift Right Algebraic Word
+  SRAD = 794 << 1,    // Shift Right Algebraic Double Word
+  SRAWIX = 824 << 1,  // Shift Right Algebraic Word Immediate
+  SRADIX = 413 << 2,  // Shift Right Algebraic Double Word Immediate
+  EXTSH = 922 << 1,   // Extend Sign Halfword
+  EXTSB = 954 << 1,   // Extend Sign Byte
+  ICBI = 982 << 1,    // Instruction Cache Block Invalidate
+  EXTSW = 986 << 1    // Extend Sign Word
+};
+
+// Some use Bits 10-1 and other only 5-1 for the opcode
+enum OpcodeExt4 {
+  // Bits 5-1
+  FDIV = 18 << 1,   // Floating Divide
+  FSUB = 20 << 1,   // Floating Subtract
+  FADD = 21 << 1,   // Floating Add
+  FSQRT = 22 << 1,  // Floating Square Root
+  FSEL = 23 << 1,   // Floating Select
+  FMUL = 25 << 1,   // Floating Multiply
+  FMSUB = 28 << 1,  // Floating Multiply-Subtract
+  FMADD = 29 << 1,  // Floating Multiply-Add
+
+  // Bits 10-1
+  FCMPU = 0 << 1,     // Floating Compare Unordered
+  FRSP = 12 << 1,     // Floating-Point Rounding
+  FCTIW = 14 << 1,    // Floating Convert to Integer Word X-form
+  FCTIWZ = 15 << 1,   // Floating Convert to Integer Word with Round to Zero
+  FNEG = 40 << 1,     // Floating Negate
+  MCRFS = 64 << 1,    // Move to Condition Register from FPSCR
+  FMR = 72 << 1,      // Floating Move Register
+  MTFSFI = 134 << 1,  // Move to FPSCR Field Immediate
+  FABS = 264 << 1,    // Floating Absolute Value
+  FRIM = 488 << 1,    // Floating Round to Integer Minus
+  MFFS = 583 << 1,    // move from FPSCR x-form
+  MTFSF = 711 << 1,   // move to FPSCR fields XFL-form
+  FCFID = 846 << 1,   // Floating convert from integer doubleword
+  FCTID = 814 << 1,   // Floating convert from integer doubleword
+  FCTIDZ = 815 << 1   // Floating convert from integer doubleword
+};
+
+enum OpcodeExt5 {
+  // Bits 4-2
+  RLDICL = 0 << 1,  // Rotate Left Double Word Immediate then Clear Left
+  RLDICR = 2 << 1,  // Rotate Left Double Word Immediate then Clear Right
+  RLDIC = 4 << 1,   // Rotate Left Double Word Immediate then Clear
+  RLDIMI = 6 << 1,  // Rotate Left Double Word Immediate then Mask Insert
+  // Bits 4-1
+  RLDCL = 8 << 1,  // Rotate Left Double Word then Clear Left
+  RLDCR = 9 << 1   // Rotate Left Double Word then Clear Right
+};
+
+// Instruction encoding bits and masks.
+enum {
+  // Instruction encoding bit
+  B1 = 1 << 1,
+  B4 = 1 << 4,
+  B5 = 1 << 5,
+  B7 = 1 << 7,
+  B8 = 1 << 8,
+  B9 = 1 << 9,
+  B12 = 1 << 12,
+  B18 = 1 << 18,
+  B19 = 1 << 19,
+  B20 = 1 << 20,
+  B22 = 1 << 22,
+  B23 = 1 << 23,
+  B24 = 1 << 24,
+  B25 = 1 << 25,
+  B26 = 1 << 26,
+  B27 = 1 << 27,
+  B28 = 1 << 28,
+  B6 = 1 << 6,
+  B10 = 1 << 10,
+  B11 = 1 << 11,
+  B16 = 1 << 16,
+  B17 = 1 << 17,
+  B21 = 1 << 21,
+
+  // Instruction bit masks
+  kCondMask = 0x1F << 21,
+  kOff12Mask = (1 << 12) - 1,
+  kImm24Mask = (1 << 24) - 1,
+  kOff16Mask = (1 << 16) - 1,
+  kImm16Mask = (1 << 16) - 1,
+  kImm26Mask = (1 << 26) - 1,
+  kBOfieldMask = 0x1f << 21,
+  kOpcodeMask = 0x3f << 26,
+  kExt1OpcodeMask = 0x3ff << 1,
+  kExt2OpcodeMask = 0x1f << 1,
+  kExt5OpcodeMask = 0x3 << 2,
+  kBOMask = 0x1f << 21,
+  kBIMask = 0x1F << 16,
+  kBDMask = 0x14 << 2,
+  kAAMask = 0x01 << 1,
+  kLKMask = 0x01,
+  kRCMask = 0x01,
+  kTOMask = 0x1f << 21
+};
+
+// the following is to differentiate different faked opcodes for
+// the BOGUS PPC instruction we invented (when bit 25 is 0) or to mark
+// different stub code (when bit 25 is 1)
+//   - use primary opcode 1 for undefined instruction
+//   - use bit 25 to indicate whether the opcode is for fake-arm
+//     instr or stub-marker
+//   - use the least significant 6-bit to indicate FAKE_OPCODE_T or
+//     MARKER_T
+#define FAKE_OPCODE 1 << 26
+#define MARKER_SUBOPCODE_BIT 25
+#define MARKER_SUBOPCODE 1 << MARKER_SUBOPCODE_BIT
+#define FAKER_SUBOPCODE 0 << MARKER_SUBOPCODE_BIT
+
+enum FAKE_OPCODE_T {
+  fBKPT = 14,
+  fLastFaker  // can't be more than 128 (2^^7)
+};
+#define FAKE_OPCODE_HIGH_BIT 7  // fake opcode has to fall into bit 0~7
+#define F_NEXT_AVAILABLE_STUB_MARKER 369  // must be less than 2^^9 (512)
+#define STUB_MARKER_HIGH_BIT 9  // stub marker has to fall into bit 0~9
+// -----------------------------------------------------------------------------
+// Addressing modes and instruction variants.
+
+// Overflow Exception
+enum OEBit {
+  SetOE = 1 << 10,   // Set overflow exception
+  LeaveOE = 0 << 10  // No overflow exception
+};
+
+// Record bit
+enum RCBit {   // Bit 0
+  SetRC = 1,   // LT,GT,EQ,SO
+  LeaveRC = 0  // None
+};
+
+// Link bit
+enum LKBit {   // Bit 0
+  SetLK = 1,   // Load effective address of next instruction
+  LeaveLK = 0  // No action
+};
+
+enum BOfield {        // Bits 25-21
+  DCBNZF = 0 << 21,   // Decrement CTR; branch if CTR != 0 and condition false
+  DCBEZF = 2 << 21,   // Decrement CTR; branch if CTR == 0 and condition false
+  BF = 4 << 21,       // Branch if condition false
+  DCBNZT = 8 << 21,   // Decrement CTR; branch if CTR != 0 and condition true
+  DCBEZT = 10 << 21,  // Decrement CTR; branch if CTR == 0 and condition true
+  BT = 12 << 21,      // Branch if condition true
+  DCBNZ = 16 << 21,   // Decrement CTR; branch if CTR != 0
+  DCBEZ = 18 << 21,   // Decrement CTR; branch if CTR == 0
+  BA = 20 << 21       // Branch always
+};
+
+#if V8_OS_AIX
+#undef CR_LT
+#undef CR_GT
+#undef CR_EQ
+#undef CR_SO
+#endif
+
+enum CRBit { CR_LT = 0, CR_GT = 1, CR_EQ = 2, CR_SO = 3, CR_FU = 3 };
+
+#define CRWIDTH 4
+
+// -----------------------------------------------------------------------------
+// Supervisor Call (svc) specific support.
+
+// Special Software Interrupt codes when used in the presence of the PPC
+// simulator.
+// svc (formerly swi) provides a 24bit immediate value. Use bits 22:0 for
+// standard SoftwareInterrupCode. Bit 23 is reserved for the stop feature.
+enum SoftwareInterruptCodes {
+  // transition to C code
+  kCallRtRedirected = 0x10,
+  // break point
+  kBreakpoint = 0x821008,  // bits23-0 of 0x7d821008 = twge r2, r2
+  // stop
+  kStopCode = 1 << 23,
+  // info
+  kInfo = 0x9ff808  // bits23-0 of 0x7d9ff808 = twge r31, r31
+};
+const uint32_t kStopCodeMask = kStopCode - 1;
+const uint32_t kMaxStopCode = kStopCode - 1;
+const int32_t kDefaultStopCode = -1;
+
+// FP rounding modes.
+enum FPRoundingMode {
+  RN = 0,  // Round to Nearest.
+  RZ = 1,  // Round towards zero.
+  RP = 2,  // Round towards Plus Infinity.
+  RM = 3,  // Round towards Minus Infinity.
+
+  // Aliases.
+  kRoundToNearest = RN,
+  kRoundToZero = RZ,
+  kRoundToPlusInf = RP,
+  kRoundToMinusInf = RM
+};
+
+const uint32_t kFPRoundingModeMask = 3;
+
+enum CheckForInexactConversion {
+  kCheckForInexactConversion,
+  kDontCheckForInexactConversion
+};
+
+// -----------------------------------------------------------------------------
+// Specific instructions, constants, and masks.
+// These constants are declared in assembler-arm.cc, as they use named registers
+// and other constants.
+
+
+// add(sp, sp, 4) instruction (aka Pop())
+extern const Instr kPopInstruction;
+
+// str(r, MemOperand(sp, 4, NegPreIndex), al) instruction (aka push(r))
+// register r is not encoded.
+extern const Instr kPushRegPattern;
+
+// ldr(r, MemOperand(sp, 4, PostIndex), al) instruction (aka pop(r))
+// register r is not encoded.
+extern const Instr kPopRegPattern;
+
+// use TWI to indicate redirection call for simulation mode
+const Instr rtCallRedirInstr = TWI;
+
+// -----------------------------------------------------------------------------
+// Instruction abstraction.
+
+// The class Instruction enables access to individual fields defined in the PPC
+// architecture instruction set encoding.
+// Note that the Assembler uses typedef int32_t Instr.
+//
+// Example: Test whether the instruction at ptr does set the condition code
+// bits.
+//
+// bool InstructionSetsConditionCodes(byte* ptr) {
+//   Instruction* instr = Instruction::At(ptr);
+//   int type = instr->TypeValue();
+//   return ((type == 0) || (type == 1)) && instr->HasS();
+// }
+//
+class Instruction {
+ public:
+  enum { kInstrSize = 4, kInstrSizeLog2 = 2, kPCReadOffset = 8 };
+
+// Helper macro to define static accessors.
+// We use the cast to char* trick to bypass the strict anti-aliasing rules.
+#define DECLARE_STATIC_TYPED_ACCESSOR(return_type, Name) \
+  static inline return_type Name(Instr instr) {          \
+    char* temp = reinterpret_cast<char*>(&instr);        \
+    return reinterpret_cast<Instruction*>(temp)->Name(); \
+  }
+
+#define DECLARE_STATIC_ACCESSOR(Name) DECLARE_STATIC_TYPED_ACCESSOR(int, Name)
+
+  // Get the raw instruction bits.
+  inline Instr InstructionBits() const {
+    return *reinterpret_cast<const Instr*>(this);
+  }
+
+  // Set the raw instruction bits to value.
+  inline void SetInstructionBits(Instr value) {
+    *reinterpret_cast<Instr*>(this) = value;
+  }
+
+  // Read one particular bit out of the instruction bits.
+  inline int Bit(int nr) const { return (InstructionBits() >> nr) & 1; }
+
+  // Read a bit field's value out of the instruction bits.
+  inline int Bits(int hi, int lo) const {
+    return (InstructionBits() >> lo) & ((2 << (hi - lo)) - 1);
+  }
+
+  // Read a bit field out of the instruction bits.
+  inline int BitField(int hi, int lo) const {
+    return InstructionBits() & (((2 << (hi - lo)) - 1) << lo);
+  }
+
+  // Static support.
+
+  // Read one particular bit out of the instruction bits.
+  static inline int Bit(Instr instr, int nr) { return (instr >> nr) & 1; }
+
+  // Read the value of a bit field out of the instruction bits.
+  static inline int Bits(Instr instr, int hi, int lo) {
+    return (instr >> lo) & ((2 << (hi - lo)) - 1);
+  }
+
+
+  // Read a bit field out of the instruction bits.
+  static inline int BitField(Instr instr, int hi, int lo) {
+    return instr & (((2 << (hi - lo)) - 1) << lo);
+  }
+
+  inline int RSValue() const { return Bits(25, 21); }
+  inline int RTValue() const { return Bits(25, 21); }
+  inline int RAValue() const { return Bits(20, 16); }
+  DECLARE_STATIC_ACCESSOR(RAValue);
+  inline int RBValue() const { return Bits(15, 11); }
+  DECLARE_STATIC_ACCESSOR(RBValue);
+  inline int RCValue() const { return Bits(10, 6); }
+  DECLARE_STATIC_ACCESSOR(RCValue);
+
+  inline int OpcodeValue() const { return static_cast<Opcode>(Bits(31, 26)); }
+  inline Opcode OpcodeField() const {
+    return static_cast<Opcode>(BitField(24, 21));
+  }
+
+  // Fields used in Software interrupt instructions
+  inline SoftwareInterruptCodes SvcValue() const {
+    return static_cast<SoftwareInterruptCodes>(Bits(23, 0));
+  }
+
+  // Instructions are read of out a code stream. The only way to get a
+  // reference to an instruction is to convert a pointer. There is no way
+  // to allocate or create instances of class Instruction.
+  // Use the At(pc) function to create references to Instruction.
+  static Instruction* At(byte* pc) {
+    return reinterpret_cast<Instruction*>(pc);
+  }
+
+
+ private:
+  // We need to prevent the creation of instances of class Instruction.
+  DISALLOW_IMPLICIT_CONSTRUCTORS(Instruction);
+};
+
+
+// Helper functions for converting between register numbers and names.
+class Registers {
+ public:
+  // Return the name of the register.
+  static const char* Name(int reg);
+
+  // Lookup the register number for the name provided.
+  static int Number(const char* name);
+
+  struct RegisterAlias {
+    int reg;
+    const char* name;
+  };
+
+ private:
+  static const char* names_[kNumRegisters];
+  static const RegisterAlias aliases_[];
+};
+
+// Helper functions for converting between FP register numbers and names.
+class FPRegisters {
+ public:
+  // Return the name of the register.
+  static const char* Name(int reg);
+
+  // Lookup the register number for the name provided.
+  static int Number(const char* name);
+
+ private:
+  static const char* names_[kNumFPRegisters];
+};
+}
+}  // namespace v8::internal
+
+#endif  // V8_PPC_CONSTANTS_PPC_H_
diff --git a/deps/v8/src/ppc/cpu-ppc.cc b/deps/v8/src/ppc/cpu-ppc.cc
new file mode 100644 (file)
index 0000000..d42420c
--- /dev/null
@@ -0,0 +1,63 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// CPU specific code for ppc independent of OS goes here.
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_PPC
+
+#include "src/assembler.h"
+#include "src/macro-assembler.h"
+#include "src/simulator.h"  // for cache flushing.
+
+namespace v8 {
+namespace internal {
+
+void CpuFeatures::FlushICache(void* buffer, size_t size) {
+  // Nothing to do flushing no instructions.
+  if (size == 0) {
+    return;
+  }
+
+#if defined(USE_SIMULATOR)
+  // Not generating PPC instructions for C-code. This means that we are
+  // building an PPC emulator based target.  We should notify the simulator
+  // that the Icache was flushed.
+  // None of this code ends up in the snapshot so there are no issues
+  // around whether or not to generate the code when building snapshots.
+  Simulator::FlushICache(Isolate::Current()->simulator_i_cache(), buffer, size);
+#else
+
+  if (CpuFeatures::IsSupported(INSTR_AND_DATA_CACHE_COHERENCY)) {
+    __asm__ __volatile__(
+        "sync \n"
+        "icbi 0, %0  \n"
+        "isync  \n"
+        : /* no output */
+        : "r"(buffer)
+        : "memory");
+    return;
+  }
+
+  const int kCacheLineSize = CpuFeatures::cache_line_size();
+  intptr_t mask = kCacheLineSize - 1;
+  byte *start =
+      reinterpret_cast<byte *>(reinterpret_cast<intptr_t>(buffer) & ~mask);
+  byte *end = static_cast<byte *>(buffer) + size;
+  for (byte *pointer = start; pointer < end; pointer += kCacheLineSize) {
+    __asm__(
+        "dcbf 0, %0  \n"
+        "sync        \n"
+        "icbi 0, %0  \n"
+        "isync       \n"
+        : /* no output */
+        : "r"(pointer));
+  }
+
+#endif  // USE_SIMULATOR
+}
+}
+}  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_PPC
diff --git a/deps/v8/src/ppc/debug-ppc.cc b/deps/v8/src/ppc/debug-ppc.cc
new file mode 100644 (file)
index 0000000..8106853
--- /dev/null
@@ -0,0 +1,343 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_PPC
+
+#include "src/codegen.h"
+#include "src/debug.h"
+
+namespace v8 {
+namespace internal {
+
+bool BreakLocationIterator::IsDebugBreakAtReturn() {
+  return Debug::IsDebugBreakAtReturn(rinfo());
+}
+
+
+void BreakLocationIterator::SetDebugBreakAtReturn() {
+  // Patch the code changing the return from JS function sequence from
+  //
+  //   LeaveFrame
+  //   blr
+  //
+  // to a call to the debug break return code.
+  // this uses a FIXED_SEQUENCE to load an address constant
+  //
+  //   mov r0, <address>
+  //   mtlr r0
+  //   blrl
+  //   bkpt
+  //
+  CodePatcher patcher(rinfo()->pc(), Assembler::kJSReturnSequenceInstructions);
+  Assembler::BlockTrampolinePoolScope block_trampoline_pool(patcher.masm());
+  patcher.masm()->mov(
+      v8::internal::r0,
+      Operand(reinterpret_cast<intptr_t>(debug_info_->GetIsolate()
+                                             ->builtins()
+                                             ->Return_DebugBreak()
+                                             ->entry())));
+  patcher.masm()->mtctr(v8::internal::r0);
+  patcher.masm()->bctrl();
+  patcher.masm()->bkpt(0);
+}
+
+
+// Restore the JS frame exit code.
+void BreakLocationIterator::ClearDebugBreakAtReturn() {
+  rinfo()->PatchCode(original_rinfo()->pc(),
+                     Assembler::kJSReturnSequenceInstructions);
+}
+
+
+// A debug break in the frame exit code is identified by the JS frame exit code
+// having been patched with a call instruction.
+bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
+  DCHECK(RelocInfo::IsJSReturn(rinfo->rmode()));
+  return rinfo->IsPatchedReturnSequence();
+}
+
+
+bool BreakLocationIterator::IsDebugBreakAtSlot() {
+  DCHECK(IsDebugBreakSlot());
+  // Check whether the debug break slot instructions have been patched.
+  return rinfo()->IsPatchedDebugBreakSlotSequence();
+}
+
+
+void BreakLocationIterator::SetDebugBreakAtSlot() {
+  DCHECK(IsDebugBreakSlot());
+  // Patch the code changing the debug break slot code from
+  //
+  //   ori r3, r3, 0
+  //   ori r3, r3, 0
+  //   ori r3, r3, 0
+  //   ori r3, r3, 0
+  //   ori r3, r3, 0
+  //
+  // to a call to the debug break code, using a FIXED_SEQUENCE.
+  //
+  //   mov r0, <address>
+  //   mtlr r0
+  //   blrl
+  //
+  CodePatcher patcher(rinfo()->pc(), Assembler::kDebugBreakSlotInstructions);
+  Assembler::BlockTrampolinePoolScope block_trampoline_pool(patcher.masm());
+  patcher.masm()->mov(
+      v8::internal::r0,
+      Operand(reinterpret_cast<intptr_t>(
+          debug_info_->GetIsolate()->builtins()->Slot_DebugBreak()->entry())));
+  patcher.masm()->mtctr(v8::internal::r0);
+  patcher.masm()->bctrl();
+}
+
+
+void BreakLocationIterator::ClearDebugBreakAtSlot() {
+  DCHECK(IsDebugBreakSlot());
+  rinfo()->PatchCode(original_rinfo()->pc(),
+                     Assembler::kDebugBreakSlotInstructions);
+}
+
+
+#define __ ACCESS_MASM(masm)
+
+
+static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
+                                          RegList object_regs,
+                                          RegList non_object_regs) {
+  {
+    FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL);
+
+    // Load padding words on stack.
+    __ LoadSmiLiteral(ip, Smi::FromInt(LiveEdit::kFramePaddingValue));
+    for (int i = 0; i < LiveEdit::kFramePaddingInitialSize; i++) {
+      __ push(ip);
+    }
+    __ LoadSmiLiteral(ip, Smi::FromInt(LiveEdit::kFramePaddingInitialSize));
+    __ push(ip);
+
+    // Store the registers containing live values on the expression stack to
+    // make sure that these are correctly updated during GC. Non object values
+    // are stored as a smi causing it to be untouched by GC.
+    DCHECK((object_regs & ~kJSCallerSaved) == 0);
+    DCHECK((non_object_regs & ~kJSCallerSaved) == 0);
+    DCHECK((object_regs & non_object_regs) == 0);
+    if ((object_regs | non_object_regs) != 0) {
+      for (int i = 0; i < kNumJSCallerSaved; i++) {
+        int r = JSCallerSavedCode(i);
+        Register reg = {r};
+        if ((non_object_regs & (1 << r)) != 0) {
+          if (FLAG_debug_code) {
+            __ TestUnsignedSmiCandidate(reg, r0);
+            __ Assert(eq, kUnableToEncodeValueAsSmi, cr0);
+          }
+          __ SmiTag(reg);
+        }
+      }
+      __ MultiPush(object_regs | non_object_regs);
+    }
+
+#ifdef DEBUG
+    __ RecordComment("// Calling from debug break to runtime - come in - over");
+#endif
+    __ mov(r3, Operand::Zero());  // no arguments
+    __ mov(r4, Operand(ExternalReference::debug_break(masm->isolate())));
+
+    CEntryStub ceb(masm->isolate(), 1);
+    __ CallStub(&ceb);
+
+    // Restore the register values from the expression stack.
+    if ((object_regs | non_object_regs) != 0) {
+      __ MultiPop(object_regs | non_object_regs);
+      for (int i = 0; i < kNumJSCallerSaved; i++) {
+        int r = JSCallerSavedCode(i);
+        Register reg = {r};
+        if ((non_object_regs & (1 << r)) != 0) {
+          __ SmiUntag(reg);
+        }
+        if (FLAG_debug_code &&
+            (((object_regs | non_object_regs) & (1 << r)) == 0)) {
+          __ mov(reg, Operand(kDebugZapValue));
+        }
+      }
+    }
+
+    // Don't bother removing padding bytes pushed on the stack
+    // as the frame is going to be restored right away.
+
+    // Leave the internal frame.
+  }
+
+  // Now that the break point has been handled, resume normal execution by
+  // jumping to the target address intended by the caller and that was
+  // overwritten by the address of DebugBreakXXX.
+  ExternalReference after_break_target =
+      ExternalReference::debug_after_break_target_address(masm->isolate());
+  __ mov(ip, Operand(after_break_target));
+  __ LoadP(ip, MemOperand(ip));
+  __ JumpToJSEntry(ip);
+}
+
+
+void DebugCodegen::GenerateCallICStubDebugBreak(MacroAssembler* masm) {
+  // Register state for CallICStub
+  // ----------- S t a t e -------------
+  //  -- r4 : function
+  //  -- r6 : slot in feedback array (smi)
+  // -----------------------------------
+  Generate_DebugBreakCallHelper(masm, r4.bit() | r6.bit(), 0);
+}
+
+
+void DebugCodegen::GenerateLoadICDebugBreak(MacroAssembler* masm) {
+  // Calling convention for IC load (from ic-ppc.cc).
+  Register receiver = LoadDescriptor::ReceiverRegister();
+  Register name = LoadDescriptor::NameRegister();
+  RegList regs = receiver.bit() | name.bit();
+  if (FLAG_vector_ics) {
+    regs |= VectorLoadICTrampolineDescriptor::SlotRegister().bit();
+  }
+  Generate_DebugBreakCallHelper(masm, regs, 0);
+}
+
+
+void DebugCodegen::GenerateStoreICDebugBreak(MacroAssembler* masm) {
+  // Calling convention for IC store (from ic-ppc.cc).
+  Register receiver = StoreDescriptor::ReceiverRegister();
+  Register name = StoreDescriptor::NameRegister();
+  Register value = StoreDescriptor::ValueRegister();
+  Generate_DebugBreakCallHelper(masm, receiver.bit() | name.bit() | value.bit(),
+                                0);
+}
+
+
+void DebugCodegen::GenerateKeyedLoadICDebugBreak(MacroAssembler* masm) {
+  // Calling convention for keyed IC load (from ic-ppc.cc).
+  GenerateLoadICDebugBreak(masm);
+}
+
+
+void DebugCodegen::GenerateKeyedStoreICDebugBreak(MacroAssembler* masm) {
+  // Calling convention for IC keyed store call (from ic-ppc.cc).
+  Register receiver = StoreDescriptor::ReceiverRegister();
+  Register name = StoreDescriptor::NameRegister();
+  Register value = StoreDescriptor::ValueRegister();
+  Generate_DebugBreakCallHelper(masm, receiver.bit() | name.bit() | value.bit(),
+                                0);
+}
+
+
+void DebugCodegen::GenerateCompareNilICDebugBreak(MacroAssembler* masm) {
+  // Register state for CompareNil IC
+  // ----------- S t a t e -------------
+  //  -- r3    : value
+  // -----------------------------------
+  Generate_DebugBreakCallHelper(masm, r3.bit(), 0);
+}
+
+
+void DebugCodegen::GenerateReturnDebugBreak(MacroAssembler* masm) {
+  // In places other than IC call sites it is expected that r3 is TOS which
+  // is an object - this is not generally the case so this should be used with
+  // care.
+  Generate_DebugBreakCallHelper(masm, r3.bit(), 0);
+}
+
+
+void DebugCodegen::GenerateCallFunctionStubDebugBreak(MacroAssembler* masm) {
+  // Register state for CallFunctionStub (from code-stubs-ppc.cc).
+  // ----------- S t a t e -------------
+  //  -- r4 : function
+  // -----------------------------------
+  Generate_DebugBreakCallHelper(masm, r4.bit(), 0);
+}
+
+
+void DebugCodegen::GenerateCallConstructStubDebugBreak(MacroAssembler* masm) {
+  // Calling convention for CallConstructStub (from code-stubs-ppc.cc)
+  // ----------- S t a t e -------------
+  //  -- r3     : number of arguments (not smi)
+  //  -- r4     : constructor function
+  // -----------------------------------
+  Generate_DebugBreakCallHelper(masm, r4.bit(), r3.bit());
+}
+
+
+void DebugCodegen::GenerateCallConstructStubRecordDebugBreak(
+    MacroAssembler* masm) {
+  // Calling convention for CallConstructStub (from code-stubs-ppc.cc)
+  // ----------- S t a t e -------------
+  //  -- r3     : number of arguments (not smi)
+  //  -- r4     : constructor function
+  //  -- r5     : feedback array
+  //  -- r6     : feedback slot (smi)
+  // -----------------------------------
+  Generate_DebugBreakCallHelper(masm, r4.bit() | r5.bit() | r6.bit(), r3.bit());
+}
+
+
+void DebugCodegen::GenerateSlot(MacroAssembler* masm) {
+  // Generate enough nop's to make space for a call instruction. Avoid emitting
+  // the trampoline pool in the debug break slot code.
+  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm);
+  Label check_codesize;
+  __ bind(&check_codesize);
+  __ RecordDebugBreakSlot();
+  for (int i = 0; i < Assembler::kDebugBreakSlotInstructions; i++) {
+    __ nop(MacroAssembler::DEBUG_BREAK_NOP);
+  }
+  DCHECK_EQ(Assembler::kDebugBreakSlotInstructions,
+            masm->InstructionsGeneratedSince(&check_codesize));
+}
+
+
+void DebugCodegen::GenerateSlotDebugBreak(MacroAssembler* masm) {
+  // In the places where a debug break slot is inserted no registers can contain
+  // object pointers.
+  Generate_DebugBreakCallHelper(masm, 0, 0);
+}
+
+
+void DebugCodegen::GeneratePlainReturnLiveEdit(MacroAssembler* masm) {
+  __ Ret();
+}
+
+
+void DebugCodegen::GenerateFrameDropperLiveEdit(MacroAssembler* masm) {
+  ExternalReference restarter_frame_function_slot =
+      ExternalReference::debug_restarter_frame_function_pointer_address(
+          masm->isolate());
+  __ mov(ip, Operand(restarter_frame_function_slot));
+  __ li(r4, Operand::Zero());
+  __ StoreP(r4, MemOperand(ip, 0));
+
+  // Load the function pointer off of our current stack frame.
+  __ LoadP(r4, MemOperand(fp, StandardFrameConstants::kConstantPoolOffset -
+                                  kPointerSize));
+
+  // Pop return address, frame and constant pool pointer (if
+  // FLAG_enable_ool_constant_pool).
+  __ LeaveFrame(StackFrame::INTERNAL);
+
+  // Load context from the function.
+  __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
+
+  // Get function code.
+  __ LoadP(ip, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+  __ LoadP(ip, FieldMemOperand(ip, SharedFunctionInfo::kCodeOffset));
+  __ addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
+
+  // Re-run JSFunction, r4 is function, cp is context.
+  __ Jump(ip);
+}
+
+
+const bool LiveEdit::kFrameDropperSupported = true;
+
+#undef __
+}
+}  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_PPC
diff --git a/deps/v8/src/ppc/deoptimizer-ppc.cc b/deps/v8/src/ppc/deoptimizer-ppc.cc
new file mode 100644 (file)
index 0000000..58e9e93
--- /dev/null
@@ -0,0 +1,359 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/codegen.h"
+#include "src/deoptimizer.h"
+#include "src/full-codegen.h"
+#include "src/safepoint-table.h"
+
+namespace v8 {
+namespace internal {
+
+const int Deoptimizer::table_entry_size_ = 8;
+
+
+int Deoptimizer::patch_size() {
+#if V8_TARGET_ARCH_PPC64
+  const int kCallInstructionSizeInWords = 7;
+#else
+  const int kCallInstructionSizeInWords = 4;
+#endif
+  return kCallInstructionSizeInWords * Assembler::kInstrSize;
+}
+
+
+void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
+  Address code_start_address = code->instruction_start();
+
+  // Invalidate the relocation information, as it will become invalid by the
+  // code patching below, and is not needed any more.
+  code->InvalidateRelocation();
+
+  if (FLAG_zap_code_space) {
+    // Fail hard and early if we enter this code object again.
+    byte* pointer = code->FindCodeAgeSequence();
+    if (pointer != NULL) {
+      pointer += kNoCodeAgeSequenceLength;
+    } else {
+      pointer = code->instruction_start();
+    }
+    CodePatcher patcher(pointer, 1);
+    patcher.masm()->bkpt(0);
+
+    DeoptimizationInputData* data =
+        DeoptimizationInputData::cast(code->deoptimization_data());
+    int osr_offset = data->OsrPcOffset()->value();
+    if (osr_offset > 0) {
+      CodePatcher osr_patcher(code->instruction_start() + osr_offset, 1);
+      osr_patcher.masm()->bkpt(0);
+    }
+  }
+
+  DeoptimizationInputData* deopt_data =
+      DeoptimizationInputData::cast(code->deoptimization_data());
+#ifdef DEBUG
+  Address prev_call_address = NULL;
+#endif
+  // For each LLazyBailout instruction insert a call to the corresponding
+  // deoptimization entry.
+  for (int i = 0; i < deopt_data->DeoptCount(); i++) {
+    if (deopt_data->Pc(i)->value() == -1) continue;
+    Address call_address = code_start_address + deopt_data->Pc(i)->value();
+    Address deopt_entry = GetDeoptimizationEntry(isolate, i, LAZY);
+    // We need calls to have a predictable size in the unoptimized code, but
+    // this is optimized code, so we don't have to have a predictable size.
+    int call_size_in_bytes = MacroAssembler::CallSizeNotPredictableCodeSize(
+        deopt_entry, kRelocInfo_NONEPTR);
+    int call_size_in_words = call_size_in_bytes / Assembler::kInstrSize;
+    DCHECK(call_size_in_bytes % Assembler::kInstrSize == 0);
+    DCHECK(call_size_in_bytes <= patch_size());
+    CodePatcher patcher(call_address, call_size_in_words);
+    patcher.masm()->Call(deopt_entry, kRelocInfo_NONEPTR);
+    DCHECK(prev_call_address == NULL ||
+           call_address >= prev_call_address + patch_size());
+    DCHECK(call_address + patch_size() <= code->instruction_end());
+#ifdef DEBUG
+    prev_call_address = call_address;
+#endif
+  }
+}
+
+
+void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
+  // Set the register values. The values are not important as there are no
+  // callee saved registers in JavaScript frames, so all registers are
+  // spilled. Registers fp and sp are set to the correct values though.
+
+  for (int i = 0; i < Register::kNumRegisters; i++) {
+    input_->SetRegister(i, i * 4);
+  }
+  input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp()));
+  input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
+  for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); i++) {
+    input_->SetDoubleRegister(i, 0.0);
+  }
+
+  // Fill the frame content from the actual data on the frame.
+  for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
+    input_->SetFrameSlot(
+        i, reinterpret_cast<intptr_t>(Memory::Address_at(tos + i)));
+  }
+}
+
+
+void Deoptimizer::SetPlatformCompiledStubRegisters(
+    FrameDescription* output_frame, CodeStubDescriptor* descriptor) {
+  ApiFunction function(descriptor->deoptimization_handler());
+  ExternalReference xref(&function, ExternalReference::BUILTIN_CALL, isolate_);
+  intptr_t handler = reinterpret_cast<intptr_t>(xref.address());
+  int params = descriptor->GetHandlerParameterCount();
+  output_frame->SetRegister(r3.code(), params);
+  output_frame->SetRegister(r4.code(), handler);
+}
+
+
+void Deoptimizer::CopyDoubleRegisters(FrameDescription* output_frame) {
+  for (int i = 0; i < DoubleRegister::kMaxNumRegisters; ++i) {
+    double double_value = input_->GetDoubleRegister(i);
+    output_frame->SetDoubleRegister(i, double_value);
+  }
+}
+
+
+bool Deoptimizer::HasAlignmentPadding(JSFunction* function) {
+  // There is no dynamic alignment padding on PPC in the input frame.
+  return false;
+}
+
+
+#define __ masm()->
+
+// This code tries to be close to ia32 code so that any changes can be
+// easily ported.
+void Deoptimizer::EntryGenerator::Generate() {
+  GeneratePrologue();
+
+  // Unlike on ARM we don't save all the registers, just the useful ones.
+  // For the rest, there are gaps on the stack, so the offsets remain the same.
+  const int kNumberOfRegisters = Register::kNumRegisters;
+
+  RegList restored_regs = kJSCallerSaved | kCalleeSaved;
+  RegList saved_regs = restored_regs | sp.bit();
+
+  const int kDoubleRegsSize =
+      kDoubleSize * DoubleRegister::kMaxNumAllocatableRegisters;
+
+  // Save all FPU registers before messing with them.
+  __ subi(sp, sp, Operand(kDoubleRegsSize));
+  for (int i = 0; i < DoubleRegister::kMaxNumAllocatableRegisters; ++i) {
+    DoubleRegister fpu_reg = DoubleRegister::FromAllocationIndex(i);
+    int offset = i * kDoubleSize;
+    __ stfd(fpu_reg, MemOperand(sp, offset));
+  }
+
+  // Push saved_regs (needed to populate FrameDescription::registers_).
+  // Leave gaps for other registers.
+  __ subi(sp, sp, Operand(kNumberOfRegisters * kPointerSize));
+  for (int16_t i = kNumberOfRegisters - 1; i >= 0; i--) {
+    if ((saved_regs & (1 << i)) != 0) {
+      __ StoreP(ToRegister(i), MemOperand(sp, kPointerSize * i));
+    }
+  }
+
+  const int kSavedRegistersAreaSize =
+      (kNumberOfRegisters * kPointerSize) + kDoubleRegsSize;
+
+  // Get the bailout id from the stack.
+  __ LoadP(r5, MemOperand(sp, kSavedRegistersAreaSize));
+
+  // Get the address of the location in the code object (r6) (return
+  // address for lazy deoptimization) and compute the fp-to-sp delta in
+  // register r7.
+  __ mflr(r6);
+  // Correct one word for bailout id.
+  __ addi(r7, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
+  __ sub(r7, fp, r7);
+
+  // Allocate a new deoptimizer object.
+  // Pass six arguments in r3 to r8.
+  __ PrepareCallCFunction(6, r8);
+  __ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  __ li(r4, Operand(type()));  // bailout type,
+  // r5: bailout id already loaded.
+  // r6: code address or 0 already loaded.
+  // r7: Fp-to-sp delta.
+  __ mov(r8, Operand(ExternalReference::isolate_address(isolate())));
+  // Call Deoptimizer::New().
+  {
+    AllowExternalCallThatCantCauseGC scope(masm());
+    __ CallCFunction(ExternalReference::new_deoptimizer_function(isolate()), 6);
+  }
+
+  // Preserve "deoptimizer" object in register r3 and get the input
+  // frame descriptor pointer to r4 (deoptimizer->input_);
+  __ LoadP(r4, MemOperand(r3, Deoptimizer::input_offset()));
+
+  // Copy core registers into FrameDescription::registers_[kNumRegisters].
+  DCHECK(Register::kNumRegisters == kNumberOfRegisters);
+  for (int i = 0; i < kNumberOfRegisters; i++) {
+    int offset = (i * kPointerSize) + FrameDescription::registers_offset();
+    __ LoadP(r5, MemOperand(sp, i * kPointerSize));
+    __ StoreP(r5, MemOperand(r4, offset));
+  }
+
+  int double_regs_offset = FrameDescription::double_registers_offset();
+  // Copy VFP registers to
+  // double_registers_[DoubleRegister::kNumAllocatableRegisters]
+  for (int i = 0; i < DoubleRegister::NumAllocatableRegisters(); ++i) {
+    int dst_offset = i * kDoubleSize + double_regs_offset;
+    int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
+    __ lfd(d0, MemOperand(sp, src_offset));
+    __ stfd(d0, MemOperand(r4, dst_offset));
+  }
+
+  // Remove the bailout id and the saved registers from the stack.
+  __ addi(sp, sp, Operand(kSavedRegistersAreaSize + (1 * kPointerSize)));
+
+  // Compute a pointer to the unwinding limit in register r5; that is
+  // the first stack slot not part of the input frame.
+  __ LoadP(r5, MemOperand(r4, FrameDescription::frame_size_offset()));
+  __ add(r5, r5, sp);
+
+  // Unwind the stack down to - but not including - the unwinding
+  // limit and copy the contents of the activation frame to the input
+  // frame description.
+  __ addi(r6, r4, Operand(FrameDescription::frame_content_offset()));
+  Label pop_loop;
+  Label pop_loop_header;
+  __ b(&pop_loop_header);
+  __ bind(&pop_loop);
+  __ pop(r7);
+  __ StoreP(r7, MemOperand(r6, 0));
+  __ addi(r6, r6, Operand(kPointerSize));
+  __ bind(&pop_loop_header);
+  __ cmp(r5, sp);
+  __ bne(&pop_loop);
+
+  // Compute the output frame in the deoptimizer.
+  __ push(r3);  // Preserve deoptimizer object across call.
+  // r3: deoptimizer object; r4: scratch.
+  __ PrepareCallCFunction(1, r4);
+  // Call Deoptimizer::ComputeOutputFrames().
+  {
+    AllowExternalCallThatCantCauseGC scope(masm());
+    __ CallCFunction(
+        ExternalReference::compute_output_frames_function(isolate()), 1);
+  }
+  __ pop(r3);  // Restore deoptimizer object (class Deoptimizer).
+
+  // Replace the current (input) frame with the output frames.
+  Label outer_push_loop, inner_push_loop, outer_loop_header, inner_loop_header;
+  // Outer loop state: r7 = current "FrameDescription** output_",
+  // r4 = one past the last FrameDescription**.
+  __ lwz(r4, MemOperand(r3, Deoptimizer::output_count_offset()));
+  __ LoadP(r7, MemOperand(r3, Deoptimizer::output_offset()));  // r7 is output_.
+  __ ShiftLeftImm(r4, r4, Operand(kPointerSizeLog2));
+  __ add(r4, r7, r4);
+  __ b(&outer_loop_header);
+
+  __ bind(&outer_push_loop);
+  // Inner loop state: r5 = current FrameDescription*, r6 = loop index.
+  __ LoadP(r5, MemOperand(r7, 0));  // output_[ix]
+  __ LoadP(r6, MemOperand(r5, FrameDescription::frame_size_offset()));
+  __ b(&inner_loop_header);
+
+  __ bind(&inner_push_loop);
+  __ addi(r6, r6, Operand(-sizeof(intptr_t)));
+  __ add(r9, r5, r6);
+  __ LoadP(r9, MemOperand(r9, FrameDescription::frame_content_offset()));
+  __ push(r9);
+
+  __ bind(&inner_loop_header);
+  __ cmpi(r6, Operand::Zero());
+  __ bne(&inner_push_loop);  // test for gt?
+
+  __ addi(r7, r7, Operand(kPointerSize));
+  __ bind(&outer_loop_header);
+  __ cmp(r7, r4);
+  __ blt(&outer_push_loop);
+
+  __ LoadP(r4, MemOperand(r3, Deoptimizer::input_offset()));
+  for (int i = 0; i < DoubleRegister::kMaxNumAllocatableRegisters; ++i) {
+    const DoubleRegister dreg = DoubleRegister::FromAllocationIndex(i);
+    int src_offset = i * kDoubleSize + double_regs_offset;
+    __ lfd(dreg, MemOperand(r4, src_offset));
+  }
+
+  // Push state, pc, and continuation from the last output frame.
+  __ LoadP(r9, MemOperand(r5, FrameDescription::state_offset()));
+  __ push(r9);
+  __ LoadP(r9, MemOperand(r5, FrameDescription::pc_offset()));
+  __ push(r9);
+  __ LoadP(r9, MemOperand(r5, FrameDescription::continuation_offset()));
+  __ push(r9);
+
+  // Restore the registers from the last output frame.
+  DCHECK(!(ip.bit() & restored_regs));
+  __ mr(ip, r5);
+  for (int i = kNumberOfRegisters - 1; i >= 0; i--) {
+    int offset = (i * kPointerSize) + FrameDescription::registers_offset();
+    if ((restored_regs & (1 << i)) != 0) {
+      __ LoadP(ToRegister(i), MemOperand(ip, offset));
+    }
+  }
+
+  __ InitializeRootRegister();
+
+  __ pop(ip);  // get continuation, leave pc on stack
+  __ pop(r0);
+  __ mtlr(r0);
+  __ Jump(ip);
+  __ stop("Unreachable.");
+}
+
+
+void Deoptimizer::TableEntryGenerator::GeneratePrologue() {
+  Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm());
+
+  // Create a sequence of deoptimization entries.
+  // Note that registers are still live when jumping to an entry.
+  Label done;
+  for (int i = 0; i < count(); i++) {
+    int start = masm()->pc_offset();
+    USE(start);
+    __ li(ip, Operand(i));
+    __ b(&done);
+    DCHECK(masm()->pc_offset() - start == table_entry_size_);
+  }
+  __ bind(&done);
+  __ push(ip);
+}
+
+
+void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
+  SetFrameSlot(offset, value);
+}
+
+
+void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
+  SetFrameSlot(offset, value);
+}
+
+
+void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
+#if V8_OOL_CONSTANT_POOL
+  DCHECK(FLAG_enable_ool_constant_pool);
+  SetFrameSlot(offset, value);
+#else
+  // No out-of-line constant pool support.
+  UNREACHABLE();
+#endif
+}
+
+
+#undef __
+}
+}  // namespace v8::internal
diff --git a/deps/v8/src/ppc/disasm-ppc.cc b/deps/v8/src/ppc/disasm-ppc.cc
new file mode 100644 (file)
index 0000000..63cec8c
--- /dev/null
@@ -0,0 +1,1353 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// A Disassembler object is used to disassemble a block of code instruction by
+// instruction. The default implementation of the NameConverter object can be
+// overriden to modify register names or to do symbol lookup on addresses.
+//
+// The example below will disassemble a block of code and print it to stdout.
+//
+//   NameConverter converter;
+//   Disassembler d(converter);
+//   for (byte* pc = begin; pc < end;) {
+//     v8::internal::EmbeddedVector<char, 256> buffer;
+//     byte* prev_pc = pc;
+//     pc += d.InstructionDecode(buffer, pc);
+//     printf("%p    %08x      %s\n",
+//            prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer);
+//   }
+//
+// The Disassembler class also has a convenience method to disassemble a block
+// of code into a FILE*, meaning that the above functionality could also be
+// achieved by just calling Disassembler::Disassemble(stdout, begin, end);
+
+
+#include <assert.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <string.h>
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_PPC
+
+#include "src/base/platform/platform.h"
+#include "src/disasm.h"
+#include "src/macro-assembler.h"
+#include "src/ppc/constants-ppc.h"
+
+
+namespace v8 {
+namespace internal {
+
+
+//------------------------------------------------------------------------------
+
+// Decoder decodes and disassembles instructions into an output buffer.
+// It uses the converter to convert register names and call destinations into
+// more informative description.
+class Decoder {
+ public:
+  Decoder(const disasm::NameConverter& converter, Vector<char> out_buffer)
+      : converter_(converter), out_buffer_(out_buffer), out_buffer_pos_(0) {
+    out_buffer_[out_buffer_pos_] = '\0';
+  }
+
+  ~Decoder() {}
+
+  // Writes one disassembled instruction into 'buffer' (0-terminated).
+  // Returns the length of the disassembled machine instruction in bytes.
+  int InstructionDecode(byte* instruction);
+
+ private:
+  // Bottleneck functions to print into the out_buffer.
+  void PrintChar(const char ch);
+  void Print(const char* str);
+
+  // Printing of common values.
+  void PrintRegister(int reg);
+  void PrintDRegister(int reg);
+  int FormatFPRegister(Instruction* instr, const char* format);
+  void PrintSoftwareInterrupt(SoftwareInterruptCodes svc);
+
+  // Handle formatting of instructions and their options.
+  int FormatRegister(Instruction* instr, const char* option);
+  int FormatOption(Instruction* instr, const char* option);
+  void Format(Instruction* instr, const char* format);
+  void Unknown(Instruction* instr);
+  void UnknownFormat(Instruction* instr, const char* opcname);
+  void MarkerFormat(Instruction* instr, const char* opcname, int id);
+
+  void DecodeExt1(Instruction* instr);
+  void DecodeExt2(Instruction* instr);
+  void DecodeExt4(Instruction* instr);
+  void DecodeExt5(Instruction* instr);
+
+  const disasm::NameConverter& converter_;
+  Vector<char> out_buffer_;
+  int out_buffer_pos_;
+
+  DISALLOW_COPY_AND_ASSIGN(Decoder);
+};
+
+
+// Support for assertions in the Decoder formatting functions.
+#define STRING_STARTS_WITH(string, compare_string) \
+  (strncmp(string, compare_string, strlen(compare_string)) == 0)
+
+
+// Append the ch to the output buffer.
+void Decoder::PrintChar(const char ch) { out_buffer_[out_buffer_pos_++] = ch; }
+
+
+// Append the str to the output buffer.
+void Decoder::Print(const char* str) {
+  char cur = *str++;
+  while (cur != '\0' && (out_buffer_pos_ < (out_buffer_.length() - 1))) {
+    PrintChar(cur);
+    cur = *str++;
+  }
+  out_buffer_[out_buffer_pos_] = 0;
+}
+
+
+// Print the register name according to the active name converter.
+void Decoder::PrintRegister(int reg) {
+  Print(converter_.NameOfCPURegister(reg));
+}
+
+
+// Print the double FP register name according to the active name converter.
+void Decoder::PrintDRegister(int reg) { Print(FPRegisters::Name(reg)); }
+
+
+// Print SoftwareInterrupt codes. Factoring this out reduces the complexity of
+// the FormatOption method.
+void Decoder::PrintSoftwareInterrupt(SoftwareInterruptCodes svc) {
+  switch (svc) {
+    case kCallRtRedirected:
+      Print("call rt redirected");
+      return;
+    case kBreakpoint:
+      Print("breakpoint");
+      return;
+    default:
+      if (svc >= kStopCode) {
+        out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d - 0x%x",
+                                    svc & kStopCodeMask, svc & kStopCodeMask);
+      } else {
+        out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", svc);
+      }
+      return;
+  }
+}
+
+
+// Handle all register based formatting in this function to reduce the
+// complexity of FormatOption.
+int Decoder::FormatRegister(Instruction* instr, const char* format) {
+  DCHECK(format[0] == 'r');
+
+  if ((format[1] == 't') || (format[1] == 's')) {  // 'rt & 'rs register
+    int reg = instr->RTValue();
+    PrintRegister(reg);
+    return 2;
+  } else if (format[1] == 'a') {  // 'ra: RA register
+    int reg = instr->RAValue();
+    PrintRegister(reg);
+    return 2;
+  } else if (format[1] == 'b') {  // 'rb: RB register
+    int reg = instr->RBValue();
+    PrintRegister(reg);
+    return 2;
+  }
+
+  UNREACHABLE();
+  return -1;
+}
+
+
+// Handle all FP register based formatting in this function to reduce the
+// complexity of FormatOption.
+int Decoder::FormatFPRegister(Instruction* instr, const char* format) {
+  DCHECK(format[0] == 'D');
+
+  int retval = 2;
+  int reg = -1;
+  if (format[1] == 't') {
+    reg = instr->RTValue();
+  } else if (format[1] == 'a') {
+    reg = instr->RAValue();
+  } else if (format[1] == 'b') {
+    reg = instr->RBValue();
+  } else if (format[1] == 'c') {
+    reg = instr->RCValue();
+  } else {
+    UNREACHABLE();
+  }
+
+  PrintDRegister(reg);
+
+  return retval;
+}
+
+
+// FormatOption takes a formatting string and interprets it based on
+// the current instructions. The format string points to the first
+// character of the option string (the option escape has already been
+// consumed by the caller.)  FormatOption returns the number of
+// characters that were consumed from the formatting string.
+int Decoder::FormatOption(Instruction* instr, const char* format) {
+  switch (format[0]) {
+    case 'o': {
+      if (instr->Bit(10) == 1) {
+        Print("o");
+      }
+      return 1;
+    }
+    case '.': {
+      if (instr->Bit(0) == 1) {
+        Print(".");
+      } else {
+        Print(" ");  // ensure consistent spacing
+      }
+      return 1;
+    }
+    case 'r': {
+      return FormatRegister(instr, format);
+    }
+    case 'D': {
+      return FormatFPRegister(instr, format);
+    }
+    case 'i': {  // int16
+      int32_t value = (instr->Bits(15, 0) << 16) >> 16;
+      out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
+      return 5;
+    }
+    case 'u': {  // uint16
+      int32_t value = instr->Bits(15, 0);
+      out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
+      return 6;
+    }
+    case 'l': {
+      // Link (LK) Bit 0
+      if (instr->Bit(0) == 1) {
+        Print("l");
+      }
+      return 1;
+    }
+    case 'a': {
+      // Absolute Address Bit 1
+      if (instr->Bit(1) == 1) {
+        Print("a");
+      }
+      return 1;
+    }
+    case 't': {  // 'target: target of branch instructions
+      // target26 or target16
+      DCHECK(STRING_STARTS_WITH(format, "target"));
+      if ((format[6] == '2') && (format[7] == '6')) {
+        int off = ((instr->Bits(25, 2)) << 8) >> 6;
+        out_buffer_pos_ += SNPrintF(
+            out_buffer_ + out_buffer_pos_, "%+d -> %s", off,
+            converter_.NameOfAddress(reinterpret_cast<byte*>(instr) + off));
+        return 8;
+      } else if ((format[6] == '1') && (format[7] == '6')) {
+        int off = ((instr->Bits(15, 2)) << 18) >> 16;
+        out_buffer_pos_ += SNPrintF(
+            out_buffer_ + out_buffer_pos_, "%+d -> %s", off,
+            converter_.NameOfAddress(reinterpret_cast<byte*>(instr) + off));
+        return 8;
+      }
+      case 's': {
+        DCHECK(format[1] == 'h');
+        int32_t value = 0;
+        int32_t opcode = instr->OpcodeValue() << 26;
+        int32_t sh = instr->Bits(15, 11);
+        if (opcode == EXT5 ||
+            (opcode == EXT2 && instr->Bits(10, 2) << 2 == SRADIX)) {
+          // SH Bits 1 and 15-11 (split field)
+          value = (sh | (instr->Bit(1) << 5));
+        } else {
+          // SH Bits 15-11
+          value = (sh << 26) >> 26;
+        }
+        out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
+        return 2;
+      }
+      case 'm': {
+        int32_t value = 0;
+        if (format[1] == 'e') {
+          if (instr->OpcodeValue() << 26 != EXT5) {
+            // ME Bits 10-6
+            value = (instr->Bits(10, 6) << 26) >> 26;
+          } else {
+            // ME Bits 5 and 10-6 (split field)
+            value = (instr->Bits(10, 6) | (instr->Bit(5) << 5));
+          }
+        } else if (format[1] == 'b') {
+          if (instr->OpcodeValue() << 26 != EXT5) {
+            // MB Bits 5-1
+            value = (instr->Bits(5, 1) << 26) >> 26;
+          } else {
+            // MB Bits 5 and 10-6 (split field)
+            value = (instr->Bits(10, 6) | (instr->Bit(5) << 5));
+          }
+        } else {
+          UNREACHABLE();  // bad format
+        }
+        out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
+        return 2;
+      }
+    }
+#if V8_TARGET_ARCH_PPC64
+    case 'd': {  // ds value for offset
+      int32_t value = SIGN_EXT_IMM16(instr->Bits(15, 0) & ~3);
+      out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", value);
+      return 1;
+    }
+#endif
+    default: {
+      UNREACHABLE();
+      break;
+    }
+  }
+
+  UNREACHABLE();
+  return -1;
+}
+
+
+// Format takes a formatting string for a whole instruction and prints it into
+// the output buffer. All escaped options are handed to FormatOption to be
+// parsed further.
+void Decoder::Format(Instruction* instr, const char* format) {
+  char cur = *format++;
+  while ((cur != 0) && (out_buffer_pos_ < (out_buffer_.length() - 1))) {
+    if (cur == '\'') {  // Single quote is used as the formatting escape.
+      format += FormatOption(instr, format);
+    } else {
+      out_buffer_[out_buffer_pos_++] = cur;
+    }
+    cur = *format++;
+  }
+  out_buffer_[out_buffer_pos_] = '\0';
+}
+
+
+// The disassembler may end up decoding data inlined in the code. We do not want
+// it to crash if the data does not ressemble any known instruction.
+#define VERIFY(condition) \
+  if (!(condition)) {     \
+    Unknown(instr);       \
+    return;               \
+  }
+
+
+// For currently unimplemented decodings the disassembler calls Unknown(instr)
+// which will just print "unknown" of the instruction bits.
+void Decoder::Unknown(Instruction* instr) { Format(instr, "unknown"); }
+
+
+// For currently unimplemented decodings the disassembler calls
+// UnknownFormat(instr) which will just print opcode name of the
+// instruction bits.
+void Decoder::UnknownFormat(Instruction* instr, const char* name) {
+  char buffer[100];
+  snprintf(buffer, sizeof(buffer), "%s (unknown-format)", name);
+  Format(instr, buffer);
+}
+
+
+void Decoder::MarkerFormat(Instruction* instr, const char* name, int id) {
+  char buffer[100];
+  snprintf(buffer, sizeof(buffer), "%s %d", name, id);
+  Format(instr, buffer);
+}
+
+
+void Decoder::DecodeExt1(Instruction* instr) {
+  switch (instr->Bits(10, 1) << 1) {
+    case MCRF: {
+      UnknownFormat(instr, "mcrf");  // not used by V8
+      break;
+    }
+    case BCLRX: {
+      switch (instr->Bits(25, 21) << 21) {
+        case DCBNZF: {
+          UnknownFormat(instr, "bclrx-dcbnzf");
+          break;
+        }
+        case DCBEZF: {
+          UnknownFormat(instr, "bclrx-dcbezf");
+          break;
+        }
+        case BF: {
+          UnknownFormat(instr, "bclrx-bf");
+          break;
+        }
+        case DCBNZT: {
+          UnknownFormat(instr, "bclrx-dcbbzt");
+          break;
+        }
+        case DCBEZT: {
+          UnknownFormat(instr, "bclrx-dcbnezt");
+          break;
+        }
+        case BT: {
+          UnknownFormat(instr, "bclrx-bt");
+          break;
+        }
+        case DCBNZ: {
+          UnknownFormat(instr, "bclrx-dcbnz");
+          break;
+        }
+        case DCBEZ: {
+          UnknownFormat(instr, "bclrx-dcbez");  // not used by V8
+          break;
+        }
+        case BA: {
+          if (instr->Bit(0) == 1) {
+            Format(instr, "blrl");
+          } else {
+            Format(instr, "blr");
+          }
+          break;
+        }
+      }
+      break;
+    }
+    case BCCTRX: {
+      switch (instr->Bits(25, 21) << 21) {
+        case DCBNZF: {
+          UnknownFormat(instr, "bcctrx-dcbnzf");
+          break;
+        }
+        case DCBEZF: {
+          UnknownFormat(instr, "bcctrx-dcbezf");
+          break;
+        }
+        case BF: {
+          UnknownFormat(instr, "bcctrx-bf");
+          break;
+        }
+        case DCBNZT: {
+          UnknownFormat(instr, "bcctrx-dcbnzt");
+          break;
+        }
+        case DCBEZT: {
+          UnknownFormat(instr, "bcctrx-dcbezf");
+          break;
+        }
+        case BT: {
+          UnknownFormat(instr, "bcctrx-bt");
+          break;
+        }
+        case DCBNZ: {
+          UnknownFormat(instr, "bcctrx-dcbnz");
+          break;
+        }
+        case DCBEZ: {
+          UnknownFormat(instr, "bcctrx-dcbez");
+          break;
+        }
+        case BA: {
+          if (instr->Bit(0) == 1) {
+            Format(instr, "bctrl");
+          } else {
+            Format(instr, "bctr");
+          }
+          break;
+        }
+        default: { UNREACHABLE(); }
+      }
+      break;
+    }
+    case CRNOR: {
+      Format(instr, "crnor (stuff)");
+      break;
+    }
+    case RFI: {
+      Format(instr, "rfi (stuff)");
+      break;
+    }
+    case CRANDC: {
+      Format(instr, "crandc (stuff)");
+      break;
+    }
+    case ISYNC: {
+      Format(instr, "isync (stuff)");
+      break;
+    }
+    case CRXOR: {
+      Format(instr, "crxor (stuff)");
+      break;
+    }
+    case CRNAND: {
+      UnknownFormat(instr, "crnand");
+      break;
+    }
+    case CRAND: {
+      UnknownFormat(instr, "crand");
+      break;
+    }
+    case CREQV: {
+      UnknownFormat(instr, "creqv");
+      break;
+    }
+    case CRORC: {
+      UnknownFormat(instr, "crorc");
+      break;
+    }
+    case CROR: {
+      UnknownFormat(instr, "cror");
+      break;
+    }
+    default: {
+      Unknown(instr);  // not used by V8
+    }
+  }
+}
+
+
+void Decoder::DecodeExt2(Instruction* instr) {
+  // Some encodings are 10-1 bits, handle those first
+  switch (instr->Bits(10, 1) << 1) {
+    case SRWX: {
+      Format(instr, "srw'.    'ra, 'rs, 'rb");
+      return;
+    }
+#if V8_TARGET_ARCH_PPC64
+    case SRDX: {
+      Format(instr, "srd'.    'ra, 'rs, 'rb");
+      return;
+    }
+#endif
+    case SRAW: {
+      Format(instr, "sraw'.   'ra, 'rs, 'rb");
+      return;
+    }
+#if V8_TARGET_ARCH_PPC64
+    case SRAD: {
+      Format(instr, "srad'.   'ra, 'rs, 'rb");
+      return;
+    }
+#endif
+    case SRAWIX: {
+      Format(instr, "srawi'.  'ra,'rs,'sh");
+      return;
+    }
+    case EXTSH: {
+      Format(instr, "extsh'.  'ra, 'rs");
+      return;
+    }
+#if V8_TARGET_ARCH_PPC64
+    case EXTSW: {
+      Format(instr, "extsw'.  'ra, 'rs");
+      return;
+    }
+#endif
+    case EXTSB: {
+      Format(instr, "extsb'.  'ra, 'rs");
+      return;
+    }
+    case LFSX: {
+      Format(instr, "lfsx    'rt, 'ra, 'rb");
+      return;
+    }
+    case LFSUX: {
+      Format(instr, "lfsux   'rt, 'ra, 'rb");
+      return;
+    }
+    case LFDX: {
+      Format(instr, "lfdx    'rt, 'ra, 'rb");
+      return;
+    }
+    case LFDUX: {
+      Format(instr, "lfdux   'rt, 'ra, 'rb");
+      return;
+    }
+    case STFSX: {
+      Format(instr, "stfsx    'rs, 'ra, 'rb");
+      return;
+    }
+    case STFSUX: {
+      Format(instr, "stfsux   'rs, 'ra, 'rb");
+      return;
+    }
+    case STFDX: {
+      Format(instr, "stfdx    'rs, 'ra, 'rb");
+      return;
+    }
+    case STFDUX: {
+      Format(instr, "stfdux   'rs, 'ra, 'rb");
+      return;
+    }
+  }
+
+  switch (instr->Bits(10, 2) << 2) {
+    case SRADIX: {
+      Format(instr, "sradi'.  'ra,'rs,'sh");
+      return;
+    }
+  }
+
+  // ?? are all of these xo_form?
+  switch (instr->Bits(9, 1) << 1) {
+    case CMP: {
+#if V8_TARGET_ARCH_PPC64
+      if (instr->Bit(21)) {
+#endif
+        Format(instr, "cmp     'ra, 'rb");
+#if V8_TARGET_ARCH_PPC64
+      } else {
+        Format(instr, "cmpw    'ra, 'rb");
+      }
+#endif
+      break;
+    }
+    case SLWX: {
+      Format(instr, "slw'.   'ra, 'rs, 'rb");
+      break;
+    }
+#if V8_TARGET_ARCH_PPC64
+    case SLDX: {
+      Format(instr, "sld'.   'ra, 'rs, 'rb");
+      break;
+    }
+#endif
+    case SUBFCX: {
+      Format(instr, "subfc'. 'rt, 'ra, 'rb");
+      break;
+    }
+    case ADDCX: {
+      Format(instr, "addc'.   'rt, 'ra, 'rb");
+      break;
+    }
+    case CNTLZWX: {
+      Format(instr, "cntlzw'. 'ra, 'rs");
+      break;
+    }
+#if V8_TARGET_ARCH_PPC64
+    case CNTLZDX: {
+      Format(instr, "cntlzd'. 'ra, 'rs");
+      break;
+    }
+#endif
+    case ANDX: {
+      Format(instr, "and'.    'ra, 'rs, 'rb");
+      break;
+    }
+    case ANDCX: {
+      Format(instr, "andc'.   'ra, 'rs, 'rb");
+      break;
+    }
+    case CMPL: {
+#if V8_TARGET_ARCH_PPC64
+      if (instr->Bit(21)) {
+#endif
+        Format(instr, "cmpl    'ra, 'rb");
+#if V8_TARGET_ARCH_PPC64
+      } else {
+        Format(instr, "cmplw   'ra, 'rb");
+      }
+#endif
+      break;
+    }
+    case NEGX: {
+      Format(instr, "neg'.    'rt, 'ra");
+      break;
+    }
+    case NORX: {
+      Format(instr, "nor'.    'rt, 'ra, 'rb");
+      break;
+    }
+    case SUBFX: {
+      Format(instr, "subf'.   'rt, 'ra, 'rb");
+      break;
+    }
+    case MULHWX: {
+      Format(instr, "mulhw'o'.  'rt, 'ra, 'rb");
+      break;
+    }
+    case ADDZEX: {
+      Format(instr, "addze'.   'rt, 'ra");
+      break;
+    }
+    case MULLW: {
+      Format(instr, "mullw'o'.  'rt, 'ra, 'rb");
+      break;
+    }
+#if V8_TARGET_ARCH_PPC64
+    case MULLD: {
+      Format(instr, "mulld'o'.  'rt, 'ra, 'rb");
+      break;
+    }
+#endif
+    case DIVW: {
+      Format(instr, "divw'o'.   'rt, 'ra, 'rb");
+      break;
+    }
+#if V8_TARGET_ARCH_PPC64
+    case DIVD: {
+      Format(instr, "divd'o'.   'rt, 'ra, 'rb");
+      break;
+    }
+#endif
+    case ADDX: {
+      Format(instr, "add'o     'rt, 'ra, 'rb");
+      break;
+    }
+    case XORX: {
+      Format(instr, "xor'.    'ra, 'rs, 'rb");
+      break;
+    }
+    case ORX: {
+      if (instr->RTValue() == instr->RBValue()) {
+        Format(instr, "mr      'ra, 'rb");
+      } else {
+        Format(instr, "or      'ra, 'rs, 'rb");
+      }
+      break;
+    }
+    case MFSPR: {
+      int spr = instr->Bits(20, 11);
+      if (256 == spr) {
+        Format(instr, "mflr    'rt");
+      } else {
+        Format(instr, "mfspr   'rt ??");
+      }
+      break;
+    }
+    case MTSPR: {
+      int spr = instr->Bits(20, 11);
+      if (256 == spr) {
+        Format(instr, "mtlr    'rt");
+      } else if (288 == spr) {
+        Format(instr, "mtctr   'rt");
+      } else {
+        Format(instr, "mtspr   'rt ??");
+      }
+      break;
+    }
+    case MFCR: {
+      Format(instr, "mfcr    'rt");
+      break;
+    }
+    case STWX: {
+      Format(instr, "stwx    'rs, 'ra, 'rb");
+      break;
+    }
+    case STWUX: {
+      Format(instr, "stwux   'rs, 'ra, 'rb");
+      break;
+    }
+    case STBX: {
+      Format(instr, "stbx    'rs, 'ra, 'rb");
+      break;
+    }
+    case STBUX: {
+      Format(instr, "stbux   'rs, 'ra, 'rb");
+      break;
+    }
+    case STHX: {
+      Format(instr, "sthx    'rs, 'ra, 'rb");
+      break;
+    }
+    case STHUX: {
+      Format(instr, "sthux   'rs, 'ra, 'rb");
+      break;
+    }
+    case LWZX: {
+      Format(instr, "lwzx    'rt, 'ra, 'rb");
+      break;
+    }
+    case LWZUX: {
+      Format(instr, "lwzux   'rt, 'ra, 'rb");
+      break;
+    }
+    case LBZX: {
+      Format(instr, "lbzx    'rt, 'ra, 'rb");
+      break;
+    }
+    case LBZUX: {
+      Format(instr, "lbzux   'rt, 'ra, 'rb");
+      break;
+    }
+    case LHZX: {
+      Format(instr, "lhzx    'rt, 'ra, 'rb");
+      break;
+    }
+    case LHZUX: {
+      Format(instr, "lhzux   'rt, 'ra, 'rb");
+      break;
+    }
+#if V8_TARGET_ARCH_PPC64
+    case LDX: {
+      Format(instr, "ldx     'rt, 'ra, 'rb");
+      break;
+    }
+    case LDUX: {
+      Format(instr, "ldux    'rt, 'ra, 'rb");
+      break;
+    }
+    case STDX: {
+      Format(instr, "stdx    'rt, 'ra, 'rb");
+      break;
+    }
+    case STDUX: {
+      Format(instr, "stdux   'rt, 'ra, 'rb");
+      break;
+    }
+    case MFVSRD: {
+      Format(instr, "mffprd  'ra, 'Dt");
+      break;
+    }
+    case MFVSRWZ: {
+      Format(instr, "mffprwz 'ra, 'Dt");
+      break;
+    }
+    case MTVSRD: {
+      Format(instr, "mtfprd  'Dt, 'ra");
+      break;
+    }
+    case MTVSRWA: {
+      Format(instr, "mtfprwa 'Dt, 'ra");
+      break;
+    }
+    case MTVSRWZ: {
+      Format(instr, "mtfprwz 'Dt, 'ra");
+      break;
+    }
+#endif
+    default: {
+      Unknown(instr);  // not used by V8
+    }
+  }
+}
+
+
+void Decoder::DecodeExt4(Instruction* instr) {
+  switch (instr->Bits(5, 1) << 1) {
+    case FDIV: {
+      Format(instr, "fdiv'.   'Dt, 'Da, 'Db");
+      return;
+    }
+    case FSUB: {
+      Format(instr, "fsub'.   'Dt, 'Da, 'Db");
+      return;
+    }
+    case FADD: {
+      Format(instr, "fadd'.   'Dt, 'Da, 'Db");
+      return;
+    }
+    case FSQRT: {
+      Format(instr, "fsqrt'.  'Dt, 'Db");
+      return;
+    }
+    case FSEL: {
+      Format(instr, "fsel'.   'Dt, 'Da, 'Dc, 'Db");
+      return;
+    }
+    case FMUL: {
+      Format(instr, "fmul'.   'Dt, 'Da, 'Dc");
+      return;
+    }
+    case FMSUB: {
+      Format(instr, "fmsub'.  'Dt, 'Da, 'Dc, 'Db");
+      return;
+    }
+    case FMADD: {
+      Format(instr, "fmadd'.  'Dt, 'Da, 'Dc, 'Db");
+      return;
+    }
+  }
+
+  switch (instr->Bits(10, 1) << 1) {
+    case FCMPU: {
+      Format(instr, "fcmpu   'Da, 'Db");
+      break;
+    }
+    case FRSP: {
+      Format(instr, "frsp'.   'Dt, 'Db");
+      break;
+    }
+    case FCFID: {
+      Format(instr, "fcfid'.  'Dt, 'Db");
+      break;
+    }
+    case FCTID: {
+      Format(instr, "fctid   'Dt, 'Db");
+      break;
+    }
+    case FCTIDZ: {
+      Format(instr, "fctidz  'Dt, 'Db");
+      break;
+    }
+    case FCTIW: {
+      Format(instr, "fctiw'. 'Dt, 'Db");
+      break;
+    }
+    case FCTIWZ: {
+      Format(instr, "fctiwz'. 'Dt, 'Db");
+      break;
+    }
+    case FMR: {
+      Format(instr, "fmr'.    'Dt, 'Db");
+      break;
+    }
+    case MTFSFI: {
+      Format(instr, "mtfsfi'.  ?,?");
+      break;
+    }
+    case MFFS: {
+      Format(instr, "mffs'.   'Dt");
+      break;
+    }
+    case MTFSF: {
+      Format(instr, "mtfsf'.  'Db ?,?,?");
+      break;
+    }
+    case FABS: {
+      Format(instr, "fabs'.   'Dt, 'Db");
+      break;
+    }
+    case FRIM: {
+      Format(instr, "frim    'Dt, 'Db");
+      break;
+    }
+    case FNEG: {
+      Format(instr, "fneg'.   'Dt, 'Db");
+      break;
+    }
+    default: {
+      Unknown(instr);  // not used by V8
+    }
+  }
+}
+
+
+void Decoder::DecodeExt5(Instruction* instr) {
+  switch (instr->Bits(4, 2) << 2) {
+    case RLDICL: {
+      Format(instr, "rldicl'. 'ra, 'rs, 'sh, 'mb");
+      return;
+    }
+    case RLDICR: {
+      Format(instr, "rldicr'. 'ra, 'rs, 'sh, 'me");
+      return;
+    }
+    case RLDIC: {
+      Format(instr, "rldic'.  'ra, 'rs, 'sh, 'mb");
+      return;
+    }
+    case RLDIMI: {
+      Format(instr, "rldimi'. 'ra, 'rs, 'sh, 'mb");
+      return;
+    }
+  }
+  switch (instr->Bits(4, 1) << 1) {
+    case RLDCL: {
+      Format(instr, "rldcl'.  'ra, 'rs, 'sb, 'mb");
+      return;
+    }
+  }
+  Unknown(instr);  // not used by V8
+}
+
+#undef VERIFIY
+
+// Disassemble the instruction at *instr_ptr into the output buffer.
+int Decoder::InstructionDecode(byte* instr_ptr) {
+  Instruction* instr = Instruction::At(instr_ptr);
+  // Print raw instruction bytes.
+  out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%08x       ",
+                              instr->InstructionBits());
+
+  switch (instr->OpcodeValue() << 26) {
+    case TWI: {
+      PrintSoftwareInterrupt(instr->SvcValue());
+      break;
+    }
+    case MULLI: {
+      UnknownFormat(instr, "mulli");
+      break;
+    }
+    case SUBFIC: {
+      Format(instr, "subfic  'rt, 'ra, 'int16");
+      break;
+    }
+    case CMPLI: {
+#if V8_TARGET_ARCH_PPC64
+      if (instr->Bit(21)) {
+#endif
+        Format(instr, "cmpli   'ra, 'uint16");
+#if V8_TARGET_ARCH_PPC64
+      } else {
+        Format(instr, "cmplwi  'ra, 'uint16");
+      }
+#endif
+      break;
+    }
+    case CMPI: {
+#if V8_TARGET_ARCH_PPC64
+      if (instr->Bit(21)) {
+#endif
+        Format(instr, "cmpi    'ra, 'int16");
+#if V8_TARGET_ARCH_PPC64
+      } else {
+        Format(instr, "cmpwi   'ra, 'int16");
+      }
+#endif
+      break;
+    }
+    case ADDIC: {
+      Format(instr, "addic   'rt, 'ra, 'int16");
+      break;
+    }
+    case ADDICx: {
+      UnknownFormat(instr, "addicx");
+      break;
+    }
+    case ADDI: {
+      if (instr->RAValue() == 0) {
+        // this is load immediate
+        Format(instr, "li      'rt, 'int16");
+      } else {
+        Format(instr, "addi    'rt, 'ra, 'int16");
+      }
+      break;
+    }
+    case ADDIS: {
+      if (instr->RAValue() == 0) {
+        Format(instr, "lis     'rt, 'int16");
+      } else {
+        Format(instr, "addis   'rt, 'ra, 'int16");
+      }
+      break;
+    }
+    case BCX: {
+      int bo = instr->Bits(25, 21) << 21;
+      int bi = instr->Bits(20, 16);
+      switch (bi) {
+        case 2:
+        case 30:
+          if (BT == bo) {
+            Format(instr, "beq'l'a 'target16");
+            break;
+          }
+          if (BF == bo) {
+            Format(instr, "bne'l'a 'target16");
+            break;
+          }
+          Format(instr, "bc'l'a 'target16");
+          break;
+        case 29:
+          if (BT == bo) {
+            Format(instr, "bgt'l'a 'target16");
+            break;
+          }
+          if (BF == bo) {
+            Format(instr, "ble'l'a 'target16");
+            break;
+          }
+          Format(instr, "bc'l'a 'target16");
+          break;
+        case 28:
+          if (BT == bo) {
+            Format(instr, "blt'l'a 'target16");
+            break;
+          }
+          if (BF == bo) {
+            Format(instr, "bge'l'a 'target16");
+            break;
+          }
+          Format(instr, "bc'l'a 'target16");
+          break;
+        default:
+          Format(instr, "bc'l'a 'target16");
+          break;
+      }
+      break;
+    }
+    case SC: {
+      UnknownFormat(instr, "sc");
+      break;
+    }
+    case BX: {
+      Format(instr, "b'l'a 'target26");
+      break;
+    }
+    case EXT1: {
+      DecodeExt1(instr);
+      break;
+    }
+    case RLWIMIX: {
+      Format(instr, "rlwimi'. 'ra, 'rs, 'sh, 'me, 'mb");
+      break;
+    }
+    case RLWINMX: {
+      Format(instr, "rlwinm'. 'ra, 'rs, 'sh, 'me, 'mb");
+      break;
+    }
+    case RLWNMX: {
+      Format(instr, "rlwnm'.  'ra, 'rs, 'rb, 'me, 'mb");
+      break;
+    }
+    case ORI: {
+      Format(instr, "ori     'ra, 'rs, 'uint16");
+      break;
+    }
+    case ORIS: {
+      Format(instr, "oris    'ra, 'rs, 'uint16");
+      break;
+    }
+    case XORI: {
+      Format(instr, "xori    'ra, 'rs, 'uint16");
+      break;
+    }
+    case XORIS: {
+      Format(instr, "xoris   'ra, 'rs, 'uint16");
+      break;
+    }
+    case ANDIx: {
+      Format(instr, "andi.   'ra, 'rs, 'uint16");
+      break;
+    }
+    case ANDISx: {
+      Format(instr, "andis.  'ra, 'rs, 'uint16");
+      break;
+    }
+    case EXT2: {
+      DecodeExt2(instr);
+      break;
+    }
+    case LWZ: {
+      Format(instr, "lwz     'rt, 'int16('ra)");
+      break;
+    }
+    case LWZU: {
+      Format(instr, "lwzu    'rt, 'int16('ra)");
+      break;
+    }
+    case LBZ: {
+      Format(instr, "lbz     'rt, 'int16('ra)");
+      break;
+    }
+    case LBZU: {
+      Format(instr, "lbzu    'rt, 'int16('ra)");
+      break;
+    }
+    case STW: {
+      Format(instr, "stw     'rs, 'int16('ra)");
+      break;
+    }
+    case STWU: {
+      Format(instr, "stwu    'rs, 'int16('ra)");
+      break;
+    }
+    case STB: {
+      Format(instr, "stb     'rs, 'int16('ra)");
+      break;
+    }
+    case STBU: {
+      Format(instr, "stbu    'rs, 'int16('ra)");
+      break;
+    }
+    case LHZ: {
+      Format(instr, "lhz     'rt, 'int16('ra)");
+      break;
+    }
+    case LHZU: {
+      Format(instr, "lhzu    'rt, 'int16('ra)");
+      break;
+    }
+    case LHA: {
+      Format(instr, "lha     'rt, 'int16('ra)");
+      break;
+    }
+    case LHAU: {
+      Format(instr, "lhau    'rt, 'int16('ra)");
+      break;
+    }
+    case STH: {
+      Format(instr, "sth 'rs, 'int16('ra)");
+      break;
+    }
+    case STHU: {
+      Format(instr, "sthu 'rs, 'int16('ra)");
+      break;
+    }
+    case LMW: {
+      UnknownFormat(instr, "lmw");
+      break;
+    }
+    case STMW: {
+      UnknownFormat(instr, "stmw");
+      break;
+    }
+    case LFS: {
+      Format(instr, "lfs     'Dt, 'int16('ra)");
+      break;
+    }
+    case LFSU: {
+      Format(instr, "lfsu    'Dt, 'int16('ra)");
+      break;
+    }
+    case LFD: {
+      Format(instr, "lfd     'Dt, 'int16('ra)");
+      break;
+    }
+    case LFDU: {
+      Format(instr, "lfdu    'Dt, 'int16('ra)");
+      break;
+    }
+    case STFS: {
+      Format(instr, "stfs    'Dt, 'int16('ra)");
+      break;
+    }
+    case STFSU: {
+      Format(instr, "stfsu   'Dt, 'int16('ra)");
+      break;
+    }
+    case STFD: {
+      Format(instr, "stfd    'Dt, 'int16('ra)");
+      break;
+    }
+    case STFDU: {
+      Format(instr, "stfdu   'Dt, 'int16('ra)");
+      break;
+    }
+    case EXT3:
+    case EXT4: {
+      DecodeExt4(instr);
+      break;
+    }
+    case EXT5: {
+      DecodeExt5(instr);
+      break;
+    }
+#if V8_TARGET_ARCH_PPC64
+    case LD: {
+      switch (instr->Bits(1, 0)) {
+        case 0:
+          Format(instr, "ld      'rt, 'd('ra)");
+          break;
+        case 1:
+          Format(instr, "ldu     'rt, 'd('ra)");
+          break;
+        case 2:
+          Format(instr, "lwa     'rt, 'd('ra)");
+          break;
+      }
+      break;
+    }
+    case STD: {  // could be STD or STDU
+      if (instr->Bit(0) == 0) {
+        Format(instr, "std     'rs, 'd('ra)");
+      } else {
+        Format(instr, "stdu    'rs, 'd('ra)");
+      }
+      break;
+    }
+#endif
+
+    case FAKE_OPCODE: {
+      if (instr->Bits(MARKER_SUBOPCODE_BIT, MARKER_SUBOPCODE_BIT) == 1) {
+        int marker_code = instr->Bits(STUB_MARKER_HIGH_BIT, 0);
+        DCHECK(marker_code < F_NEXT_AVAILABLE_STUB_MARKER);
+        MarkerFormat(instr, "stub-marker ", marker_code);
+      } else {
+        int fake_opcode = instr->Bits(FAKE_OPCODE_HIGH_BIT, 0);
+        MarkerFormat(instr, "faker-opcode ", fake_opcode);
+      }
+      break;
+    }
+    default: {
+      Unknown(instr);
+      break;
+    }
+  }
+
+  return Instruction::kInstrSize;
+}
+}
+}  // namespace v8::internal
+
+
+//------------------------------------------------------------------------------
+
+namespace disasm {
+
+
+const char* NameConverter::NameOfAddress(byte* addr) const {
+  v8::internal::SNPrintF(tmp_buffer_, "%p", addr);
+  return tmp_buffer_.start();
+}
+
+
+const char* NameConverter::NameOfConstant(byte* addr) const {
+  return NameOfAddress(addr);
+}
+
+
+const char* NameConverter::NameOfCPURegister(int reg) const {
+  return v8::internal::Registers::Name(reg);
+}
+
+const char* NameConverter::NameOfByteCPURegister(int reg) const {
+  UNREACHABLE();  // PPC does not have the concept of a byte register
+  return "nobytereg";
+}
+
+
+const char* NameConverter::NameOfXMMRegister(int reg) const {
+  UNREACHABLE();  // PPC does not have any XMM registers
+  return "noxmmreg";
+}
+
+const char* NameConverter::NameInCode(byte* addr) const {
+  // The default name converter is called for unknown code. So we will not try
+  // to access any memory.
+  return "";
+}
+
+
+//------------------------------------------------------------------------------
+
+Disassembler::Disassembler(const NameConverter& converter)
+    : converter_(converter) {}
+
+
+Disassembler::~Disassembler() {}
+
+
+int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
+                                    byte* instruction) {
+  v8::internal::Decoder d(converter_, buffer);
+  return d.InstructionDecode(instruction);
+}
+
+
+// The PPC assembler does not currently use constant pools.
+int Disassembler::ConstantPoolSizeAt(byte* instruction) { return -1; }
+
+
+void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) {
+  NameConverter converter;
+  Disassembler d(converter);
+  for (byte* pc = begin; pc < end;) {
+    v8::internal::EmbeddedVector<char, 128> buffer;
+    buffer[0] = '\0';
+    byte* prev_pc = pc;
+    pc += d.InstructionDecode(buffer, pc);
+    v8::internal::PrintF(f, "%p    %08x      %s\n", prev_pc,
+                         *reinterpret_cast<int32_t*>(prev_pc), buffer.start());
+  }
+}
+
+
+}  // namespace disasm
+
+#endif  // V8_TARGET_ARCH_PPC
diff --git a/deps/v8/src/ppc/frames-ppc.cc b/deps/v8/src/ppc/frames-ppc.cc
new file mode 100644 (file)
index 0000000..4b52882
--- /dev/null
@@ -0,0 +1,60 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_PPC
+
+#include "src/assembler.h"
+#include "src/frames.h"
+#include "src/macro-assembler.h"
+
+#include "src/ppc/assembler-ppc.h"
+#include "src/ppc/assembler-ppc-inl.h"
+#include "src/ppc/macro-assembler-ppc.h"
+
+namespace v8 {
+namespace internal {
+
+
+Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
+Register JavaScriptFrame::context_register() { return cp; }
+Register JavaScriptFrame::constant_pool_pointer_register() {
+#if V8_OOL_CONSTANT_POOL
+  DCHECK(FLAG_enable_ool_constant_pool);
+  return kConstantPoolRegister;
+#else
+  UNREACHABLE();
+  return no_reg;
+#endif
+}
+
+
+Register StubFailureTrampolineFrame::fp_register() { return v8::internal::fp; }
+Register StubFailureTrampolineFrame::context_register() { return cp; }
+Register StubFailureTrampolineFrame::constant_pool_pointer_register() {
+#if V8_OOL_CONSTANT_POOL
+  DCHECK(FLAG_enable_ool_constant_pool);
+  return kConstantPoolRegister;
+#else
+  UNREACHABLE();
+  return no_reg;
+#endif
+}
+
+
+Object*& ExitFrame::constant_pool_slot() const {
+#if V8_OOL_CONSTANT_POOL
+  DCHECK(FLAG_enable_ool_constant_pool);
+  const int offset = ExitFrameConstants::kConstantPoolOffset;
+  return Memory::Object_at(fp() + offset);
+#else
+  UNREACHABLE();
+  return Memory::Object_at(NULL);
+#endif
+}
+}
+}  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_PPC
diff --git a/deps/v8/src/ppc/frames-ppc.h b/deps/v8/src/ppc/frames-ppc.h
new file mode 100644 (file)
index 0000000..f00fa66
--- /dev/null
@@ -0,0 +1,202 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_PPC_FRAMES_PPC_H_
+#define V8_PPC_FRAMES_PPC_H_
+
+namespace v8 {
+namespace internal {
+
+
+// Register list in load/store instructions
+// Note that the bit values must match those used in actual instruction encoding
+const int kNumRegs = 32;
+
+
+// Caller-saved/arguments registers
+const RegList kJSCallerSaved = 1 << 3 |   // r3  a1
+                               1 << 4 |   // r4  a2
+                               1 << 5 |   // r5  a3
+                               1 << 6 |   // r6  a4
+                               1 << 7 |   // r7  a5
+                               1 << 8 |   // r8  a6
+                               1 << 9 |   // r9  a7
+                               1 << 10 |  // r10 a8
+                               1 << 11;
+
+const int kNumJSCallerSaved = 9;
+
+// Return the code of the n-th caller-saved register available to JavaScript
+// e.g. JSCallerSavedReg(0) returns r0.code() == 0
+int JSCallerSavedCode(int n);
+
+
+// Callee-saved registers preserved when switching from C to JavaScript
+const RegList kCalleeSaved = 1 << 14 |  // r14
+                             1 << 15 |  // r15
+                             1 << 16 |  // r16
+                             1 << 17 |  // r17
+                             1 << 18 |  // r18
+                             1 << 19 |  // r19
+                             1 << 20 |  // r20
+                             1 << 21 |  // r21
+                             1 << 22 |  // r22
+                             1 << 23 |  // r23
+                             1 << 24 |  // r24
+                             1 << 25 |  // r25
+                             1 << 26 |  // r26
+                             1 << 27 |  // r27
+                             1 << 28 |  // r28
+                             1 << 29 |  // r29
+                             1 << 30 |  // r20
+                             1 << 31;   // r31
+
+
+const int kNumCalleeSaved = 18;
+
+// Number of registers for which space is reserved in safepoints. Must be a
+// multiple of 8.
+// TODO(regis): Only 8 registers may actually be sufficient. Revisit.
+const int kNumSafepointRegisters = 32;
+
+// Define the list of registers actually saved at safepoints.
+// Note that the number of saved registers may be smaller than the reserved
+// space, i.e. kNumSafepointSavedRegisters <= kNumSafepointRegisters.
+const RegList kSafepointSavedRegisters = kJSCallerSaved | kCalleeSaved;
+const int kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved;
+
+// The following constants describe the stack frame linkage area as
+// defined by the ABI.  Note that kNumRequiredStackFrameSlots must
+// satisfy alignment requirements (rounding up if required).
+#if V8_TARGET_ARCH_PPC64 && V8_TARGET_LITTLE_ENDIAN
+// [0] back chain
+// [1] condition register save area
+// [2] link register save area
+// [3] TOC save area
+// [4] Parameter1 save area
+// ...
+// [11] Parameter8 save area
+// [12] Parameter9 slot (if necessary)
+// ...
+const int kNumRequiredStackFrameSlots = 12;
+const int kStackFrameLRSlot = 2;
+const int kStackFrameExtraParamSlot = 12;
+#elif V8_OS_AIX || V8_TARGET_ARCH_PPC64
+// [0] back chain
+// [1] condition register save area
+// [2] link register save area
+// [3] reserved for compiler
+// [4] reserved by binder
+// [5] TOC save area
+// [6] Parameter1 save area
+// ...
+// [13] Parameter8 save area
+// [14] Parameter9 slot (if necessary)
+// ...
+#if V8_TARGET_ARCH_PPC64
+const int kNumRequiredStackFrameSlots = 14;
+#else
+const int kNumRequiredStackFrameSlots = 16;
+#endif
+const int kStackFrameLRSlot = 2;
+const int kStackFrameExtraParamSlot = 14;
+#else
+// [0] back chain
+// [1] link register save area
+// [2] Parameter9 slot (if necessary)
+// ...
+const int kNumRequiredStackFrameSlots = 4;
+const int kStackFrameLRSlot = 1;
+const int kStackFrameExtraParamSlot = 2;
+#endif
+
+// ----------------------------------------------------
+
+
+class EntryFrameConstants : public AllStatic {
+ public:
+  static const int kCallerFPOffset =
+      -(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize);
+};
+
+
+class ExitFrameConstants : public AllStatic {
+ public:
+#if V8_OOL_CONSTANT_POOL
+  static const int kFrameSize = 3 * kPointerSize;
+  static const int kConstantPoolOffset = -3 * kPointerSize;
+#else
+  static const int kFrameSize = 2 * kPointerSize;
+  static const int kConstantPoolOffset = 0;  // Not used.
+#endif
+  static const int kCodeOffset = -2 * kPointerSize;
+  static const int kSPOffset = -1 * kPointerSize;
+
+  // The caller fields are below the frame pointer on the stack.
+  static const int kCallerFPOffset = 0 * kPointerSize;
+  // The calling JS function is below FP.
+  static const int kCallerPCOffset = 1 * kPointerSize;
+
+  // FP-relative displacement of the caller's SP.  It points just
+  // below the saved PC.
+  static const int kCallerSPDisplacement = 2 * kPointerSize;
+};
+
+
+class JavaScriptFrameConstants : public AllStatic {
+ public:
+  // FP-relative.
+  static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
+  static const int kLastParameterOffset = +2 * kPointerSize;
+  static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
+
+  // Caller SP-relative.
+  static const int kParam0Offset = -2 * kPointerSize;
+  static const int kReceiverOffset = -1 * kPointerSize;
+};
+
+
+class ArgumentsAdaptorFrameConstants : public AllStatic {
+ public:
+  // FP-relative.
+  static const int kLengthOffset = StandardFrameConstants::kExpressionsOffset;
+
+  static const int kFrameSize =
+      StandardFrameConstants::kFixedFrameSize + kPointerSize;
+};
+
+
+class ConstructFrameConstants : public AllStatic {
+ public:
+  // FP-relative.
+  static const int kImplicitReceiverOffset = -6 * kPointerSize;
+  static const int kConstructorOffset = -5 * kPointerSize;
+  static const int kLengthOffset = -4 * kPointerSize;
+  static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
+
+  static const int kFrameSize =
+      StandardFrameConstants::kFixedFrameSize + 4 * kPointerSize;
+};
+
+
+class InternalFrameConstants : public AllStatic {
+ public:
+  // FP-relative.
+  static const int kCodeOffset = StandardFrameConstants::kExpressionsOffset;
+};
+
+
+inline Object* JavaScriptFrame::function_slot_object() const {
+  const int offset = JavaScriptFrameConstants::kFunctionOffset;
+  return Memory::Object_at(fp() + offset);
+}
+
+
+inline void StackHandler::SetFp(Address slot, Address fp) {
+  Memory::Address_at(slot) = fp;
+}
+}
+}  // namespace v8::internal
+
+#endif  // V8_PPC_FRAMES_PPC_H_
diff --git a/deps/v8/src/ppc/full-codegen-ppc.cc b/deps/v8/src/ppc/full-codegen-ppc.cc
new file mode 100644 (file)
index 0000000..1bb4f54
--- /dev/null
@@ -0,0 +1,5290 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_PPC
+
+#include "src/code-factory.h"
+#include "src/code-stubs.h"
+#include "src/codegen.h"
+#include "src/compiler.h"
+#include "src/debug.h"
+#include "src/full-codegen.h"
+#include "src/ic/ic.h"
+#include "src/isolate-inl.h"
+#include "src/parser.h"
+#include "src/scopes.h"
+
+#include "src/ppc/code-stubs-ppc.h"
+#include "src/ppc/macro-assembler-ppc.h"
+
+namespace v8 {
+namespace internal {
+
+#define __ ACCESS_MASM(masm_)
+
+// A patch site is a location in the code which it is possible to patch. This
+// class has a number of methods to emit the code which is patchable and the
+// method EmitPatchInfo to record a marker back to the patchable code. This
+// marker is a cmpi rx, #yyy instruction, and x * 0x0000ffff + yyy (raw 16 bit
+// immediate value is used) is the delta from the pc to the first instruction of
+// the patchable code.
+// See PatchInlinedSmiCode in ic-ppc.cc for the code that patches it
+class JumpPatchSite BASE_EMBEDDED {
+ public:
+  explicit JumpPatchSite(MacroAssembler* masm) : masm_(masm) {
+#ifdef DEBUG
+    info_emitted_ = false;
+#endif
+  }
+
+  ~JumpPatchSite() { DCHECK(patch_site_.is_bound() == info_emitted_); }
+
+  // When initially emitting this ensure that a jump is always generated to skip
+  // the inlined smi code.
+  void EmitJumpIfNotSmi(Register reg, Label* target) {
+    DCHECK(!patch_site_.is_bound() && !info_emitted_);
+    Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+    __ bind(&patch_site_);
+    __ cmp(reg, reg, cr0);
+    __ beq(target, cr0);  // Always taken before patched.
+  }
+
+  // When initially emitting this ensure that a jump is never generated to skip
+  // the inlined smi code.
+  void EmitJumpIfSmi(Register reg, Label* target) {
+    Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+    DCHECK(!patch_site_.is_bound() && !info_emitted_);
+    __ bind(&patch_site_);
+    __ cmp(reg, reg, cr0);
+    __ bne(target, cr0);  // Never taken before patched.
+  }
+
+  void EmitPatchInfo() {
+    if (patch_site_.is_bound()) {
+      int delta_to_patch_site = masm_->InstructionsGeneratedSince(&patch_site_);
+      Register reg;
+      // I believe this is using reg as the high bits of of the offset
+      reg.set_code(delta_to_patch_site / kOff16Mask);
+      __ cmpi(reg, Operand(delta_to_patch_site % kOff16Mask));
+#ifdef DEBUG
+      info_emitted_ = true;
+#endif
+    } else {
+      __ nop();  // Signals no inlined code.
+    }
+  }
+
+ private:
+  MacroAssembler* masm_;
+  Label patch_site_;
+#ifdef DEBUG
+  bool info_emitted_;
+#endif
+};
+
+
+// Generate code for a JS function.  On entry to the function the receiver
+// and arguments have been pushed on the stack left to right.  The actual
+// argument count matches the formal parameter count expected by the
+// function.
+//
+// The live registers are:
+//   o r4: the JS function object being called (i.e., ourselves)
+//   o cp: our context
+//   o fp: our caller's frame pointer (aka r31)
+//   o sp: stack pointer
+//   o lr: return address
+//   o ip: our own function entry (required by the prologue)
+//
+// The function builds a JS frame.  Please see JavaScriptFrameConstants in
+// frames-ppc.h for its layout.
+void FullCodeGenerator::Generate() {
+  CompilationInfo* info = info_;
+  handler_table_ =
+      isolate()->factory()->NewFixedArray(function()->handler_count(), TENURED);
+
+  profiling_counter_ = isolate()->factory()->NewCell(
+      Handle<Smi>(Smi::FromInt(FLAG_interrupt_budget), isolate()));
+  SetFunctionPosition(function());
+  Comment cmnt(masm_, "[ function compiled by full code generator");
+
+  ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+
+#ifdef DEBUG
+  if (strlen(FLAG_stop_at) > 0 &&
+      info->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
+    __ stop("stop-at");
+  }
+#endif
+
+  // Sloppy mode functions and builtins need to replace the receiver with the
+  // global proxy when called as functions (without an explicit receiver
+  // object).
+  if (info->strict_mode() == SLOPPY && !info->is_native()) {
+    Label ok;
+    int receiver_offset = info->scope()->num_parameters() * kPointerSize;
+    __ LoadP(r5, MemOperand(sp, receiver_offset), r0);
+    __ CompareRoot(r5, Heap::kUndefinedValueRootIndex);
+    __ bne(&ok);
+
+    __ LoadP(r5, GlobalObjectOperand());
+    __ LoadP(r5, FieldMemOperand(r5, GlobalObject::kGlobalProxyOffset));
+
+    __ StoreP(r5, MemOperand(sp, receiver_offset), r0);
+
+    __ bind(&ok);
+  }
+
+  // Open a frame scope to indicate that there is a frame on the stack.  The
+  // MANUAL indicates that the scope shouldn't actually generate code to set up
+  // the frame (that is done below).
+  FrameScope frame_scope(masm_, StackFrame::MANUAL);
+  int prologue_offset = masm_->pc_offset();
+
+  if (prologue_offset) {
+    // Prologue logic requires it's starting address in ip and the
+    // corresponding offset from the function entry.
+    prologue_offset += Instruction::kInstrSize;
+    __ addi(ip, ip, Operand(prologue_offset));
+  }
+  info->set_prologue_offset(prologue_offset);
+  __ Prologue(info->IsCodePreAgingActive(), prologue_offset);
+  info->AddNoFrameRange(0, masm_->pc_offset());
+
+  {
+    Comment cmnt(masm_, "[ Allocate locals");
+    int locals_count = info->scope()->num_stack_slots();
+    // Generators allocate locals, if any, in context slots.
+    DCHECK(!info->function()->is_generator() || locals_count == 0);
+    if (locals_count > 0) {
+      if (locals_count >= 128) {
+        Label ok;
+        __ Add(ip, sp, -(locals_count * kPointerSize), r0);
+        __ LoadRoot(r5, Heap::kRealStackLimitRootIndex);
+        __ cmpl(ip, r5);
+        __ bc_short(ge, &ok);
+        __ InvokeBuiltin(Builtins::STACK_OVERFLOW, CALL_FUNCTION);
+        __ bind(&ok);
+      }
+      __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+      int kMaxPushes = FLAG_optimize_for_size ? 4 : 32;
+      if (locals_count >= kMaxPushes) {
+        int loop_iterations = locals_count / kMaxPushes;
+        __ mov(r5, Operand(loop_iterations));
+        __ mtctr(r5);
+        Label loop_header;
+        __ bind(&loop_header);
+        // Do pushes.
+        for (int i = 0; i < kMaxPushes; i++) {
+          __ push(ip);
+        }
+        // Continue loop if not done.
+        __ bdnz(&loop_header);
+      }
+      int remaining = locals_count % kMaxPushes;
+      // Emit the remaining pushes.
+      for (int i = 0; i < remaining; i++) {
+        __ push(ip);
+      }
+    }
+  }
+
+  bool function_in_register = true;
+
+  // Possibly allocate a local context.
+  int heap_slots = info->scope()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+  if (heap_slots > 0) {
+    // Argument to NewContext is the function, which is still in r4.
+    Comment cmnt(masm_, "[ Allocate context");
+    bool need_write_barrier = true;
+    if (FLAG_harmony_scoping && info->scope()->is_script_scope()) {
+      __ push(r4);
+      __ Push(info->scope()->GetScopeInfo());
+      __ CallRuntime(Runtime::kNewScriptContext, 2);
+    } else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+      FastNewContextStub stub(isolate(), heap_slots);
+      __ CallStub(&stub);
+      // Result of FastNewContextStub is always in new space.
+      need_write_barrier = false;
+    } else {
+      __ push(r4);
+      __ CallRuntime(Runtime::kNewFunctionContext, 1);
+    }
+    function_in_register = false;
+    // Context is returned in r3.  It replaces the context passed to us.
+    // It's saved in the stack and kept live in cp.
+    __ mr(cp, r3);
+    __ StoreP(r3, MemOperand(fp, StandardFrameConstants::kContextOffset));
+    // Copy any necessary parameters into the context.
+    int num_parameters = info->scope()->num_parameters();
+    for (int i = 0; i < num_parameters; i++) {
+      Variable* var = scope()->parameter(i);
+      if (var->IsContextSlot()) {
+        int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+                               (num_parameters - 1 - i) * kPointerSize;
+        // Load parameter from stack.
+        __ LoadP(r3, MemOperand(fp, parameter_offset), r0);
+        // Store it in the context.
+        MemOperand target = ContextOperand(cp, var->index());
+        __ StoreP(r3, target, r0);
+
+        // Update the write barrier.
+        if (need_write_barrier) {
+          __ RecordWriteContextSlot(cp, target.offset(), r3, r6,
+                                    kLRHasBeenSaved, kDontSaveFPRegs);
+        } else if (FLAG_debug_code) {
+          Label done;
+          __ JumpIfInNewSpace(cp, r3, &done);
+          __ Abort(kExpectedNewSpaceObject);
+          __ bind(&done);
+        }
+      }
+    }
+  }
+
+  Variable* arguments = scope()->arguments();
+  if (arguments != NULL) {
+    // Function uses arguments object.
+    Comment cmnt(masm_, "[ Allocate arguments object");
+    if (!function_in_register) {
+      // Load this again, if it's used by the local context below.
+      __ LoadP(r6, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+    } else {
+      __ mr(r6, r4);
+    }
+    // Receiver is just before the parameters on the caller's stack.
+    int num_parameters = info->scope()->num_parameters();
+    int offset = num_parameters * kPointerSize;
+    __ addi(r5, fp, Operand(StandardFrameConstants::kCallerSPOffset + offset));
+    __ LoadSmiLiteral(r4, Smi::FromInt(num_parameters));
+    __ Push(r6, r5, r4);
+
+    // Arguments to ArgumentsAccessStub:
+    //   function, receiver address, parameter count.
+    // The stub will rewrite receiever and parameter count if the previous
+    // stack frame was an arguments adapter frame.
+    ArgumentsAccessStub::Type type;
+    if (strict_mode() == STRICT) {
+      type = ArgumentsAccessStub::NEW_STRICT;
+    } else if (function()->has_duplicate_parameters()) {
+      type = ArgumentsAccessStub::NEW_SLOPPY_SLOW;
+    } else {
+      type = ArgumentsAccessStub::NEW_SLOPPY_FAST;
+    }
+    ArgumentsAccessStub stub(isolate(), type);
+    __ CallStub(&stub);
+
+    SetVar(arguments, r3, r4, r5);
+  }
+
+  if (FLAG_trace) {
+    __ CallRuntime(Runtime::kTraceEnter, 0);
+  }
+
+  // Visit the declarations and body unless there is an illegal
+  // redeclaration.
+  if (scope()->HasIllegalRedeclaration()) {
+    Comment cmnt(masm_, "[ Declarations");
+    scope()->VisitIllegalRedeclaration(this);
+
+  } else {
+    PrepareForBailoutForId(BailoutId::FunctionEntry(), NO_REGISTERS);
+    {
+      Comment cmnt(masm_, "[ Declarations");
+      // For named function expressions, declare the function name as a
+      // constant.
+      if (scope()->is_function_scope() && scope()->function() != NULL) {
+        VariableDeclaration* function = scope()->function();
+        DCHECK(function->proxy()->var()->mode() == CONST ||
+               function->proxy()->var()->mode() == CONST_LEGACY);
+        DCHECK(function->proxy()->var()->location() != Variable::UNALLOCATED);
+        VisitVariableDeclaration(function);
+      }
+      VisitDeclarations(scope()->declarations());
+    }
+
+    {
+      Comment cmnt(masm_, "[ Stack check");
+      PrepareForBailoutForId(BailoutId::Declarations(), NO_REGISTERS);
+      Label ok;
+      __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+      __ cmpl(sp, ip);
+      __ bc_short(ge, &ok);
+      __ Call(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET);
+      __ bind(&ok);
+    }
+
+    {
+      Comment cmnt(masm_, "[ Body");
+      DCHECK(loop_depth() == 0);
+      VisitStatements(function()->body());
+      DCHECK(loop_depth() == 0);
+    }
+  }
+
+  // Always emit a 'return undefined' in case control fell off the end of
+  // the body.
+  {
+    Comment cmnt(masm_, "[ return <undefined>;");
+    __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
+  }
+  EmitReturnSequence();
+}
+
+
+void FullCodeGenerator::ClearAccumulator() {
+  __ LoadSmiLiteral(r3, Smi::FromInt(0));
+}
+
+
+void FullCodeGenerator::EmitProfilingCounterDecrement(int delta) {
+  __ mov(r5, Operand(profiling_counter_));
+  __ LoadP(r6, FieldMemOperand(r5, Cell::kValueOffset));
+  __ SubSmiLiteral(r6, r6, Smi::FromInt(delta), r0);
+  __ StoreP(r6, FieldMemOperand(r5, Cell::kValueOffset), r0);
+}
+
+
+void FullCodeGenerator::EmitProfilingCounterReset() {
+  int reset_value = FLAG_interrupt_budget;
+  if (info_->is_debug()) {
+    // Detect debug break requests as soon as possible.
+    reset_value = FLAG_interrupt_budget >> 4;
+  }
+  __ mov(r5, Operand(profiling_counter_));
+  __ LoadSmiLiteral(r6, Smi::FromInt(reset_value));
+  __ StoreP(r6, FieldMemOperand(r5, Cell::kValueOffset), r0);
+}
+
+
+void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
+                                                Label* back_edge_target) {
+  Comment cmnt(masm_, "[ Back edge bookkeeping");
+  Label ok;
+
+  DCHECK(back_edge_target->is_bound());
+  int distance = masm_->SizeOfCodeGeneratedSince(back_edge_target) +
+                 kCodeSizeMultiplier / 2;
+  int weight = Min(kMaxBackEdgeWeight, Max(1, distance / kCodeSizeMultiplier));
+  EmitProfilingCounterDecrement(weight);
+  {
+    Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+    // BackEdgeTable::PatchAt manipulates this sequence.
+    __ cmpi(r6, Operand::Zero());
+    __ bc_short(ge, &ok);
+    __ Call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
+
+    // Record a mapping of this PC offset to the OSR id.  This is used to find
+    // the AST id from the unoptimized code in order to use it as a key into
+    // the deoptimization input data found in the optimized code.
+    RecordBackEdge(stmt->OsrEntryId());
+  }
+  EmitProfilingCounterReset();
+
+  __ bind(&ok);
+  PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+  // Record a mapping of the OSR id to this PC.  This is used if the OSR
+  // entry becomes the target of a bailout.  We don't expect it to be, but
+  // we want it to work if it is.
+  PrepareForBailoutForId(stmt->OsrEntryId(), NO_REGISTERS);
+}
+
+
+void FullCodeGenerator::EmitReturnSequence() {
+  Comment cmnt(masm_, "[ Return sequence");
+  if (return_label_.is_bound()) {
+    __ b(&return_label_);
+  } else {
+    __ bind(&return_label_);
+    if (FLAG_trace) {
+      // Push the return value on the stack as the parameter.
+      // Runtime::TraceExit returns its parameter in r3
+      __ push(r3);
+      __ CallRuntime(Runtime::kTraceExit, 1);
+    }
+    // Pretend that the exit is a backwards jump to the entry.
+    int weight = 1;
+    if (info_->ShouldSelfOptimize()) {
+      weight = FLAG_interrupt_budget / FLAG_self_opt_count;
+    } else {
+      int distance = masm_->pc_offset() + kCodeSizeMultiplier / 2;
+      weight = Min(kMaxBackEdgeWeight, Max(1, distance / kCodeSizeMultiplier));
+    }
+    EmitProfilingCounterDecrement(weight);
+    Label ok;
+    __ cmpi(r6, Operand::Zero());
+    __ bge(&ok);
+    __ push(r3);
+    __ Call(isolate()->builtins()->InterruptCheck(), RelocInfo::CODE_TARGET);
+    __ pop(r3);
+    EmitProfilingCounterReset();
+    __ bind(&ok);
+
+#ifdef DEBUG
+    // Add a label for checking the size of the code used for returning.
+    Label check_exit_codesize;
+    __ bind(&check_exit_codesize);
+#endif
+    // Make sure that the constant pool is not emitted inside of the return
+    // sequence.
+    {
+      Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+      int32_t sp_delta = (info_->scope()->num_parameters() + 1) * kPointerSize;
+      CodeGenerator::RecordPositions(masm_, function()->end_position() - 1);
+      __ RecordJSReturn();
+      int no_frame_start = __ LeaveFrame(StackFrame::JAVA_SCRIPT, sp_delta);
+#if V8_TARGET_ARCH_PPC64
+      // With 64bit we may need nop() instructions to ensure we have
+      // enough space to SetDebugBreakAtReturn()
+      if (is_int16(sp_delta)) {
+#if !V8_OOL_CONSTANT_POOL
+        masm_->nop();
+#endif
+        masm_->nop();
+      }
+#endif
+      __ blr();
+      info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
+    }
+
+#ifdef DEBUG
+    // Check that the size of the code used for returning is large enough
+    // for the debugger's requirements.
+    DCHECK(Assembler::kJSReturnSequenceInstructions <=
+           masm_->InstructionsGeneratedSince(&check_exit_codesize));
+#endif
+  }
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(Variable* var) const {
+  DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(Variable* var) const {
+  DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+  codegen()->GetVar(result_register(), var);
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(Variable* var) const {
+  DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+  codegen()->GetVar(result_register(), var);
+  __ push(result_register());
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Variable* var) const {
+  DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+  // For simplicity we always test the accumulator register.
+  codegen()->GetVar(result_register(), var);
+  codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
+  codegen()->DoTest(this);
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(Heap::RootListIndex index) const {}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+    Heap::RootListIndex index) const {
+  __ LoadRoot(result_register(), index);
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(
+    Heap::RootListIndex index) const {
+  __ LoadRoot(result_register(), index);
+  __ push(result_register());
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Heap::RootListIndex index) const {
+  codegen()->PrepareForBailoutBeforeSplit(condition(), true, true_label_,
+                                          false_label_);
+  if (index == Heap::kUndefinedValueRootIndex ||
+      index == Heap::kNullValueRootIndex ||
+      index == Heap::kFalseValueRootIndex) {
+    if (false_label_ != fall_through_) __ b(false_label_);
+  } else if (index == Heap::kTrueValueRootIndex) {
+    if (true_label_ != fall_through_) __ b(true_label_);
+  } else {
+    __ LoadRoot(result_register(), index);
+    codegen()->DoTest(this);
+  }
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(Handle<Object> lit) const {}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+    Handle<Object> lit) const {
+  __ mov(result_register(), Operand(lit));
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(Handle<Object> lit) const {
+  // Immediates cannot be pushed directly.
+  __ mov(result_register(), Operand(lit));
+  __ push(result_register());
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Handle<Object> lit) const {
+  codegen()->PrepareForBailoutBeforeSplit(condition(), true, true_label_,
+                                          false_label_);
+  DCHECK(!lit->IsUndetectableObject());  // There are no undetectable literals.
+  if (lit->IsUndefined() || lit->IsNull() || lit->IsFalse()) {
+    if (false_label_ != fall_through_) __ b(false_label_);
+  } else if (lit->IsTrue() || lit->IsJSObject()) {
+    if (true_label_ != fall_through_) __ b(true_label_);
+  } else if (lit->IsString()) {
+    if (String::cast(*lit)->length() == 0) {
+      if (false_label_ != fall_through_) __ b(false_label_);
+    } else {
+      if (true_label_ != fall_through_) __ b(true_label_);
+    }
+  } else if (lit->IsSmi()) {
+    if (Smi::cast(*lit)->value() == 0) {
+      if (false_label_ != fall_through_) __ b(false_label_);
+    } else {
+      if (true_label_ != fall_through_) __ b(true_label_);
+    }
+  } else {
+    // For simplicity we always test the accumulator register.
+    __ mov(result_register(), Operand(lit));
+    codegen()->DoTest(this);
+  }
+}
+
+
+void FullCodeGenerator::EffectContext::DropAndPlug(int count,
+                                                   Register reg) const {
+  DCHECK(count > 0);
+  __ Drop(count);
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::DropAndPlug(
+    int count, Register reg) const {
+  DCHECK(count > 0);
+  __ Drop(count);
+  __ Move(result_register(), reg);
+}
+
+
+void FullCodeGenerator::StackValueContext::DropAndPlug(int count,
+                                                       Register reg) const {
+  DCHECK(count > 0);
+  if (count > 1) __ Drop(count - 1);
+  __ StoreP(reg, MemOperand(sp, 0));
+}
+
+
+void FullCodeGenerator::TestContext::DropAndPlug(int count,
+                                                 Register reg) const {
+  DCHECK(count > 0);
+  // For simplicity we always test the accumulator register.
+  __ Drop(count);
+  __ Move(result_register(), reg);
+  codegen()->PrepareForBailoutBeforeSplit(condition(), false, NULL, NULL);
+  codegen()->DoTest(this);
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(Label* materialize_true,
+                                            Label* materialize_false) const {
+  DCHECK(materialize_true == materialize_false);
+  __ bind(materialize_true);
+}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(
+    Label* materialize_true, Label* materialize_false) const {
+  Label done;
+  __ bind(materialize_true);
+  __ LoadRoot(result_register(), Heap::kTrueValueRootIndex);
+  __ b(&done);
+  __ bind(materialize_false);
+  __ LoadRoot(result_register(), Heap::kFalseValueRootIndex);
+  __ bind(&done);
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(
+    Label* materialize_true, Label* materialize_false) const {
+  Label done;
+  __ bind(materialize_true);
+  __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+  __ b(&done);
+  __ bind(materialize_false);
+  __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+  __ bind(&done);
+  __ push(ip);
+}
+
+
+void FullCodeGenerator::TestContext::Plug(Label* materialize_true,
+                                          Label* materialize_false) const {
+  DCHECK(materialize_true == true_label_);
+  DCHECK(materialize_false == false_label_);
+}
+
+
+void FullCodeGenerator::EffectContext::Plug(bool flag) const {}
+
+
+void FullCodeGenerator::AccumulatorValueContext::Plug(bool flag) const {
+  Heap::RootListIndex value_root_index =
+      flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
+  __ LoadRoot(result_register(), value_root_index);
+}
+
+
+void FullCodeGenerator::StackValueContext::Plug(bool flag) const {
+  Heap::RootListIndex value_root_index =
+      flag ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex;
+  __ LoadRoot(ip, value_root_index);
+  __ push(ip);
+}
+
+
+void FullCodeGenerator::TestContext::Plug(bool flag) const {
+  codegen()->PrepareForBailoutBeforeSplit(condition(), true, true_label_,
+                                          false_label_);
+  if (flag) {
+    if (true_label_ != fall_through_) __ b(true_label_);
+  } else {
+    if (false_label_ != fall_through_) __ b(false_label_);
+  }
+}
+
+
+void FullCodeGenerator::DoTest(Expression* condition, Label* if_true,
+                               Label* if_false, Label* fall_through) {
+  Handle<Code> ic = ToBooleanStub::GetUninitialized(isolate());
+  CallIC(ic, condition->test_id());
+  __ cmpi(result_register(), Operand::Zero());
+  Split(ne, if_true, if_false, fall_through);
+}
+
+
+void FullCodeGenerator::Split(Condition cond, Label* if_true, Label* if_false,
+                              Label* fall_through, CRegister cr) {
+  if (if_false == fall_through) {
+    __ b(cond, if_true, cr);
+  } else if (if_true == fall_through) {
+    __ b(NegateCondition(cond), if_false, cr);
+  } else {
+    __ b(cond, if_true, cr);
+    __ b(if_false);
+  }
+}
+
+
+MemOperand FullCodeGenerator::StackOperand(Variable* var) {
+  DCHECK(var->IsStackAllocated());
+  // Offset is negative because higher indexes are at lower addresses.
+  int offset = -var->index() * kPointerSize;
+  // Adjust by a (parameter or local) base offset.
+  if (var->IsParameter()) {
+    offset += (info_->scope()->num_parameters() + 1) * kPointerSize;
+  } else {
+    offset += JavaScriptFrameConstants::kLocal0Offset;
+  }
+  return MemOperand(fp, offset);
+}
+
+
+MemOperand FullCodeGenerator::VarOperand(Variable* var, Register scratch) {
+  DCHECK(var->IsContextSlot() || var->IsStackAllocated());
+  if (var->IsContextSlot()) {
+    int context_chain_length = scope()->ContextChainLength(var->scope());
+    __ LoadContext(scratch, context_chain_length);
+    return ContextOperand(scratch, var->index());
+  } else {
+    return StackOperand(var);
+  }
+}
+
+
+void FullCodeGenerator::GetVar(Register dest, Variable* var) {
+  // Use destination as scratch.
+  MemOperand location = VarOperand(var, dest);
+  __ LoadP(dest, location, r0);
+}
+
+
+void FullCodeGenerator::SetVar(Variable* var, Register src, Register scratch0,
+                               Register scratch1) {
+  DCHECK(var->IsContextSlot() || var->IsStackAllocated());
+  DCHECK(!scratch0.is(src));
+  DCHECK(!scratch0.is(scratch1));
+  DCHECK(!scratch1.is(src));
+  MemOperand location = VarOperand(var, scratch0);
+  __ StoreP(src, location, r0);
+
+  // Emit the write barrier code if the location is in the heap.
+  if (var->IsContextSlot()) {
+    __ RecordWriteContextSlot(scratch0, location.offset(), src, scratch1,
+                              kLRHasBeenSaved, kDontSaveFPRegs);
+  }
+}
+
+
+void FullCodeGenerator::PrepareForBailoutBeforeSplit(Expression* expr,
+                                                     bool should_normalize,
+                                                     Label* if_true,
+                                                     Label* if_false) {
+  // Only prepare for bailouts before splits if we're in a test
+  // context. Otherwise, we let the Visit function deal with the
+  // preparation to avoid preparing with the same AST id twice.
+  if (!context()->IsTest() || !info_->IsOptimizable()) return;
+
+  Label skip;
+  if (should_normalize) __ b(&skip);
+  PrepareForBailout(expr, TOS_REG);
+  if (should_normalize) {
+    __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+    __ cmp(r3, ip);
+    Split(eq, if_true, if_false, NULL);
+    __ bind(&skip);
+  }
+}
+
+
+void FullCodeGenerator::EmitDebugCheckDeclarationContext(Variable* variable) {
+  // The variable in the declaration always resides in the current function
+  // context.
+  DCHECK_EQ(0, scope()->ContextChainLength(variable->scope()));
+  if (generate_debug_code_) {
+    // Check that we're not inside a with or catch context.
+    __ LoadP(r4, FieldMemOperand(cp, HeapObject::kMapOffset));
+    __ CompareRoot(r4, Heap::kWithContextMapRootIndex);
+    __ Check(ne, kDeclarationInWithContext);
+    __ CompareRoot(r4, Heap::kCatchContextMapRootIndex);
+    __ Check(ne, kDeclarationInCatchContext);
+  }
+}
+
+
+void FullCodeGenerator::VisitVariableDeclaration(
+    VariableDeclaration* declaration) {
+  // If it was not possible to allocate the variable at compile time, we
+  // need to "declare" it at runtime to make sure it actually exists in the
+  // local context.
+  VariableProxy* proxy = declaration->proxy();
+  VariableMode mode = declaration->mode();
+  Variable* variable = proxy->var();
+  bool hole_init = mode == LET || mode == CONST || mode == CONST_LEGACY;
+  switch (variable->location()) {
+    case Variable::UNALLOCATED:
+      globals_->Add(variable->name(), zone());
+      globals_->Add(variable->binding_needs_init()
+                        ? isolate()->factory()->the_hole_value()
+                        : isolate()->factory()->undefined_value(),
+                    zone());
+      break;
+
+    case Variable::PARAMETER:
+    case Variable::LOCAL:
+      if (hole_init) {
+        Comment cmnt(masm_, "[ VariableDeclaration");
+        __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+        __ StoreP(ip, StackOperand(variable));
+      }
+      break;
+
+    case Variable::CONTEXT:
+      if (hole_init) {
+        Comment cmnt(masm_, "[ VariableDeclaration");
+        EmitDebugCheckDeclarationContext(variable);
+        __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+        __ StoreP(ip, ContextOperand(cp, variable->index()), r0);
+        // No write barrier since the_hole_value is in old space.
+        PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+      }
+      break;
+
+    case Variable::LOOKUP: {
+      Comment cmnt(masm_, "[ VariableDeclaration");
+      __ mov(r5, Operand(variable->name()));
+      // Declaration nodes are always introduced in one of four modes.
+      DCHECK(IsDeclaredVariableMode(mode));
+      PropertyAttributes attr =
+          IsImmutableVariableMode(mode) ? READ_ONLY : NONE;
+      __ LoadSmiLiteral(r4, Smi::FromInt(attr));
+      // Push initial value, if any.
+      // Note: For variables we must not push an initial value (such as
+      // 'undefined') because we may have a (legal) redeclaration and we
+      // must not destroy the current value.
+      if (hole_init) {
+        __ LoadRoot(r3, Heap::kTheHoleValueRootIndex);
+        __ Push(cp, r5, r4, r3);
+      } else {
+        __ LoadSmiLiteral(r3, Smi::FromInt(0));  // Indicates no initial value.
+        __ Push(cp, r5, r4, r3);
+      }
+      __ CallRuntime(Runtime::kDeclareLookupSlot, 4);
+      break;
+    }
+  }
+}
+
+
+void FullCodeGenerator::VisitFunctionDeclaration(
+    FunctionDeclaration* declaration) {
+  VariableProxy* proxy = declaration->proxy();
+  Variable* variable = proxy->var();
+  switch (variable->location()) {
+    case Variable::UNALLOCATED: {
+      globals_->Add(variable->name(), zone());
+      Handle<SharedFunctionInfo> function =
+          Compiler::BuildFunctionInfo(declaration->fun(), script(), info_);
+      // Check for stack-overflow exception.
+      if (function.is_null()) return SetStackOverflow();
+      globals_->Add(function, zone());
+      break;
+    }
+
+    case Variable::PARAMETER:
+    case Variable::LOCAL: {
+      Comment cmnt(masm_, "[ FunctionDeclaration");
+      VisitForAccumulatorValue(declaration->fun());
+      __ StoreP(result_register(), StackOperand(variable));
+      break;
+    }
+
+    case Variable::CONTEXT: {
+      Comment cmnt(masm_, "[ FunctionDeclaration");
+      EmitDebugCheckDeclarationContext(variable);
+      VisitForAccumulatorValue(declaration->fun());
+      __ StoreP(result_register(), ContextOperand(cp, variable->index()), r0);
+      int offset = Context::SlotOffset(variable->index());
+      // We know that we have written a function, which is not a smi.
+      __ RecordWriteContextSlot(cp, offset, result_register(), r5,
+                                kLRHasBeenSaved, kDontSaveFPRegs,
+                                EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+      PrepareForBailoutForId(proxy->id(), NO_REGISTERS);
+      break;
+    }
+
+    case Variable::LOOKUP: {
+      Comment cmnt(masm_, "[ FunctionDeclaration");
+      __ mov(r5, Operand(variable->name()));
+      __ LoadSmiLiteral(r4, Smi::FromInt(NONE));
+      __ Push(cp, r5, r4);
+      // Push initial value for function declaration.
+      VisitForStackValue(declaration->fun());
+      __ CallRuntime(Runtime::kDeclareLookupSlot, 4);
+      break;
+    }
+  }
+}
+
+
+void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
+  Variable* variable = declaration->proxy()->var();
+  DCHECK(variable->location() == Variable::CONTEXT);
+  DCHECK(variable->interface()->IsFrozen());
+
+  Comment cmnt(masm_, "[ ModuleDeclaration");
+  EmitDebugCheckDeclarationContext(variable);
+
+  // Load instance object.
+  __ LoadContext(r4, scope_->ContextChainLength(scope_->ScriptScope()));
+  __ LoadP(r4, ContextOperand(r4, variable->interface()->Index()));
+  __ LoadP(r4, ContextOperand(r4, Context::EXTENSION_INDEX));
+
+  // Assign it.
+  __ StoreP(r4, ContextOperand(cp, variable->index()), r0);
+  // We know that we have written a module, which is not a smi.
+  __ RecordWriteContextSlot(cp, Context::SlotOffset(variable->index()), r4, r6,
+                            kLRHasBeenSaved, kDontSaveFPRegs,
+                            EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
+  PrepareForBailoutForId(declaration->proxy()->id(), NO_REGISTERS);
+
+  // Traverse into body.
+  Visit(declaration->module());
+}
+
+
+void FullCodeGenerator::VisitImportDeclaration(ImportDeclaration* declaration) {
+  VariableProxy* proxy = declaration->proxy();
+  Variable* variable = proxy->var();
+  switch (variable->location()) {
+    case Variable::UNALLOCATED:
+      // TODO(rossberg)
+      break;
+
+    case Variable::CONTEXT: {
+      Comment cmnt(masm_, "[ ImportDeclaration");
+      EmitDebugCheckDeclarationContext(variable);
+      // TODO(rossberg)
+      break;
+    }
+
+    case Variable::PARAMETER:
+    case Variable::LOCAL:
+    case Variable::LOOKUP:
+      UNREACHABLE();
+  }
+}
+
+
+void FullCodeGenerator::VisitExportDeclaration(ExportDeclaration* declaration) {
+  // TODO(rossberg)
+}
+
+
+void FullCodeGenerator::DeclareGlobals(Handle<FixedArray> pairs) {
+  // Call the runtime to declare the globals.
+  // The context is the first argument.
+  __ mov(r4, Operand(pairs));
+  __ LoadSmiLiteral(r3, Smi::FromInt(DeclareGlobalsFlags()));
+  __ Push(cp, r4, r3);
+  __ CallRuntime(Runtime::kDeclareGlobals, 3);
+  // Return value is ignored.
+}
+
+
+void FullCodeGenerator::DeclareModules(Handle<FixedArray> descriptions) {
+  // Call the runtime to declare the modules.
+  __ Push(descriptions);
+  __ CallRuntime(Runtime::kDeclareModules, 1);
+  // Return value is ignored.
+}
+
+
+void FullCodeGenerator::VisitSwitchStatement(SwitchStatement* stmt) {
+  Comment cmnt(masm_, "[ SwitchStatement");
+  Breakable nested_statement(this, stmt);
+  SetStatementPosition(stmt);
+
+  // Keep the switch value on the stack until a case matches.
+  VisitForStackValue(stmt->tag());
+  PrepareForBailoutForId(stmt->EntryId(), NO_REGISTERS);
+
+  ZoneList<CaseClause*>* clauses = stmt->cases();
+  CaseClause* default_clause = NULL;  // Can occur anywhere in the list.
+
+  Label next_test;  // Recycled for each test.
+  // Compile all the tests with branches to their bodies.
+  for (int i = 0; i < clauses->length(); i++) {
+    CaseClause* clause = clauses->at(i);
+    clause->body_target()->Unuse();
+
+    // The default is not a test, but remember it as final fall through.
+    if (clause->is_default()) {
+      default_clause = clause;
+      continue;
+    }
+
+    Comment cmnt(masm_, "[ Case comparison");
+    __ bind(&next_test);
+    next_test.Unuse();
+
+    // Compile the label expression.
+    VisitForAccumulatorValue(clause->label());
+
+    // Perform the comparison as if via '==='.
+    __ LoadP(r4, MemOperand(sp, 0));  // Switch value.
+    bool inline_smi_code = ShouldInlineSmiCase(Token::EQ_STRICT);
+    JumpPatchSite patch_site(masm_);
+    if (inline_smi_code) {
+      Label slow_case;
+      __ orx(r5, r4, r3);
+      patch_site.EmitJumpIfNotSmi(r5, &slow_case);
+
+      __ cmp(r4, r3);
+      __ bne(&next_test);
+      __ Drop(1);  // Switch value is no longer needed.
+      __ b(clause->body_target());
+      __ bind(&slow_case);
+    }
+
+    // Record position before stub call for type feedback.
+    SetSourcePosition(clause->position());
+    Handle<Code> ic =
+        CodeFactory::CompareIC(isolate(), Token::EQ_STRICT).code();
+    CallIC(ic, clause->CompareId());
+    patch_site.EmitPatchInfo();
+
+    Label skip;
+    __ b(&skip);
+    PrepareForBailout(clause, TOS_REG);
+    __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+    __ cmp(r3, ip);
+    __ bne(&next_test);
+    __ Drop(1);
+    __ b(clause->body_target());
+    __ bind(&skip);
+
+    __ cmpi(r3, Operand::Zero());
+    __ bne(&next_test);
+    __ Drop(1);  // Switch value is no longer needed.
+    __ b(clause->body_target());
+  }
+
+  // Discard the test value and jump to the default if present, otherwise to
+  // the end of the statement.
+  __ bind(&next_test);
+  __ Drop(1);  // Switch value is no longer needed.
+  if (default_clause == NULL) {
+    __ b(nested_statement.break_label());
+  } else {
+    __ b(default_clause->body_target());
+  }
+
+  // Compile all the case bodies.
+  for (int i = 0; i < clauses->length(); i++) {
+    Comment cmnt(masm_, "[ Case body");
+    CaseClause* clause = clauses->at(i);
+    __ bind(clause->body_target());
+    PrepareForBailoutForId(clause->EntryId(), NO_REGISTERS);
+    VisitStatements(clause->statements());
+  }
+
+  __ bind(nested_statement.break_label());
+  PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+}
+
+
+void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
+  Comment cmnt(masm_, "[ ForInStatement");
+  FeedbackVectorSlot slot = stmt->ForInFeedbackSlot();
+  SetStatementPosition(stmt);
+
+  Label loop, exit;
+  ForIn loop_statement(this, stmt);
+  increment_loop_depth();
+
+  // Get the object to enumerate over. If the object is null or undefined, skip
+  // over the loop.  See ECMA-262 version 5, section 12.6.4.
+  VisitForAccumulatorValue(stmt->enumerable());
+  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+  __ cmp(r3, ip);
+  __ beq(&exit);
+  Register null_value = r7;
+  __ LoadRoot(null_value, Heap::kNullValueRootIndex);
+  __ cmp(r3, null_value);
+  __ beq(&exit);
+
+  PrepareForBailoutForId(stmt->PrepareId(), TOS_REG);
+
+  // Convert the object to a JS object.
+  Label convert, done_convert;
+  __ JumpIfSmi(r3, &convert);
+  __ CompareObjectType(r3, r4, r4, FIRST_SPEC_OBJECT_TYPE);
+  __ bge(&done_convert);
+  __ bind(&convert);
+  __ push(r3);
+  __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
+  __ bind(&done_convert);
+  PrepareForBailoutForId(stmt->ToObjectId(), TOS_REG);
+  __ push(r3);
+
+  // Check for proxies.
+  Label call_runtime;
+  STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+  __ CompareObjectType(r3, r4, r4, LAST_JS_PROXY_TYPE);
+  __ ble(&call_runtime);
+
+  // Check cache validity in generated code. This is a fast case for
+  // the JSObject::IsSimpleEnum cache validity checks. If we cannot
+  // guarantee cache validity, call the runtime system to check cache
+  // validity or get the property names in a fixed array.
+  __ CheckEnumCache(null_value, &call_runtime);
+
+  // The enum cache is valid.  Load the map of the object being
+  // iterated over and use the cache for the iteration.
+  Label use_cache;
+  __ LoadP(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
+  __ b(&use_cache);
+
+  // Get the set of properties to enumerate.
+  __ bind(&call_runtime);
+  __ push(r3);  // Duplicate the enumerable object on the stack.
+  __ CallRuntime(Runtime::kGetPropertyNamesFast, 1);
+  PrepareForBailoutForId(stmt->EnumId(), TOS_REG);
+
+  // If we got a map from the runtime call, we can do a fast
+  // modification check. Otherwise, we got a fixed array, and we have
+  // to do a slow check.
+  Label fixed_array;
+  __ LoadP(r5, FieldMemOperand(r3, HeapObject::kMapOffset));
+  __ LoadRoot(ip, Heap::kMetaMapRootIndex);
+  __ cmp(r5, ip);
+  __ bne(&fixed_array);
+
+  // We got a map in register r3. Get the enumeration cache from it.
+  Label no_descriptors;
+  __ bind(&use_cache);
+
+  __ EnumLength(r4, r3);
+  __ CmpSmiLiteral(r4, Smi::FromInt(0), r0);
+  __ beq(&no_descriptors);
+
+  __ LoadInstanceDescriptors(r3, r5);
+  __ LoadP(r5, FieldMemOperand(r5, DescriptorArray::kEnumCacheOffset));
+  __ LoadP(r5,
+           FieldMemOperand(r5, DescriptorArray::kEnumCacheBridgeCacheOffset));
+
+  // Set up the four remaining stack slots.
+  __ push(r3);  // Map.
+  __ LoadSmiLiteral(r3, Smi::FromInt(0));
+  // Push enumeration cache, enumeration cache length (as smi) and zero.
+  __ Push(r5, r4, r3);
+  __ b(&loop);
+
+  __ bind(&no_descriptors);
+  __ Drop(1);
+  __ b(&exit);
+
+  // We got a fixed array in register r3. Iterate through that.
+  Label non_proxy;
+  __ bind(&fixed_array);
+
+  __ Move(r4, FeedbackVector());
+  __ mov(r5, Operand(TypeFeedbackVector::MegamorphicSentinel(isolate())));
+  int vector_index = FeedbackVector()->GetIndex(slot);
+  __ StoreP(
+      r5, FieldMemOperand(r4, FixedArray::OffsetOfElementAt(vector_index)), r0);
+
+  __ LoadSmiLiteral(r4, Smi::FromInt(1));          // Smi indicates slow check
+  __ LoadP(r5, MemOperand(sp, 0 * kPointerSize));  // Get enumerated object
+  STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+  __ CompareObjectType(r5, r6, r6, LAST_JS_PROXY_TYPE);
+  __ bgt(&non_proxy);
+  __ LoadSmiLiteral(r4, Smi::FromInt(0));  // Zero indicates proxy
+  __ bind(&non_proxy);
+  __ Push(r4, r3);  // Smi and array
+  __ LoadP(r4, FieldMemOperand(r3, FixedArray::kLengthOffset));
+  __ LoadSmiLiteral(r3, Smi::FromInt(0));
+  __ Push(r4, r3);  // Fixed array length (as smi) and initial index.
+
+  // Generate code for doing the condition check.
+  PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
+  __ bind(&loop);
+  // Load the current count to r3, load the length to r4.
+  __ LoadP(r3, MemOperand(sp, 0 * kPointerSize));
+  __ LoadP(r4, MemOperand(sp, 1 * kPointerSize));
+  __ cmpl(r3, r4);  // Compare to the array length.
+  __ bge(loop_statement.break_label());
+
+  // Get the current entry of the array into register r6.
+  __ LoadP(r5, MemOperand(sp, 2 * kPointerSize));
+  __ addi(r5, r5, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ SmiToPtrArrayOffset(r6, r3);
+  __ LoadPX(r6, MemOperand(r6, r5));
+
+  // Get the expected map from the stack or a smi in the
+  // permanent slow case into register r5.
+  __ LoadP(r5, MemOperand(sp, 3 * kPointerSize));
+
+  // Check if the expected map still matches that of the enumerable.
+  // If not, we may have to filter the key.
+  Label update_each;
+  __ LoadP(r4, MemOperand(sp, 4 * kPointerSize));
+  __ LoadP(r7, FieldMemOperand(r4, HeapObject::kMapOffset));
+  __ cmp(r7, r5);
+  __ beq(&update_each);
+
+  // For proxies, no filtering is done.
+  // TODO(rossberg): What if only a prototype is a proxy? Not specified yet.
+  __ CmpSmiLiteral(r5, Smi::FromInt(0), r0);
+  __ beq(&update_each);
+
+  // Convert the entry to a string or (smi) 0 if it isn't a property
+  // any more. If the property has been removed while iterating, we
+  // just skip it.
+  __ Push(r4, r6);  // Enumerable and current entry.
+  __ InvokeBuiltin(Builtins::FILTER_KEY, CALL_FUNCTION);
+  __ mr(r6, r3);
+  __ cmpi(r6, Operand::Zero());
+  __ beq(loop_statement.continue_label());
+
+  // Update the 'each' property or variable from the possibly filtered
+  // entry in register r6.
+  __ bind(&update_each);
+  __ mr(result_register(), r6);
+  // Perform the assignment as if via '='.
+  {
+    EffectContext context(this);
+    EmitAssignment(stmt->each());
+  }
+
+  // Generate code for the body of the loop.
+  Visit(stmt->body());
+
+  // Generate code for the going to the next element by incrementing
+  // the index (smi) stored on top of the stack.
+  __ bind(loop_statement.continue_label());
+  __ pop(r3);
+  __ AddSmiLiteral(r3, r3, Smi::FromInt(1), r0);
+  __ push(r3);
+
+  EmitBackEdgeBookkeeping(stmt, &loop);
+  __ b(&loop);
+
+  // Remove the pointers stored on the stack.
+  __ bind(loop_statement.break_label());
+  __ Drop(5);
+
+  // Exit and decrement the loop depth.
+  PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+  __ bind(&exit);
+  decrement_loop_depth();
+}
+
+
+void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
+  Comment cmnt(masm_, "[ ForOfStatement");
+  SetStatementPosition(stmt);
+
+  Iteration loop_statement(this, stmt);
+  increment_loop_depth();
+
+  // var iterator = iterable[Symbol.iterator]();
+  VisitForEffect(stmt->assign_iterator());
+
+  // Loop entry.
+  __ bind(loop_statement.continue_label());
+
+  // result = iterator.next()
+  VisitForEffect(stmt->next_result());
+
+  // if (result.done) break;
+  Label result_not_done;
+  VisitForControl(stmt->result_done(), loop_statement.break_label(),
+                  &result_not_done, &result_not_done);
+  __ bind(&result_not_done);
+
+  // each = result.value
+  VisitForEffect(stmt->assign_each());
+
+  // Generate code for the body of the loop.
+  Visit(stmt->body());
+
+  // Check stack before looping.
+  PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS);
+  EmitBackEdgeBookkeeping(stmt, loop_statement.continue_label());
+  __ b(loop_statement.continue_label());
+
+  // Exit and decrement the loop depth.
+  PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
+  __ bind(loop_statement.break_label());
+  decrement_loop_depth();
+}
+
+
+void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
+                                       bool pretenure) {
+  // Use the fast case closure allocation code that allocates in new
+  // space for nested functions that don't need literals cloning. If
+  // we're running with the --always-opt or the --prepare-always-opt
+  // flag, we need to use the runtime function so that the new function
+  // we are creating here gets a chance to have its code optimized and
+  // doesn't just get a copy of the existing unoptimized code.
+  if (!FLAG_always_opt && !FLAG_prepare_always_opt && !pretenure &&
+      scope()->is_function_scope() && info->num_literals() == 0) {
+    FastNewClosureStub stub(isolate(), info->strict_mode(), info->kind());
+    __ mov(r5, Operand(info));
+    __ CallStub(&stub);
+  } else {
+    __ mov(r3, Operand(info));
+    __ LoadRoot(
+        r4, pretenure ? Heap::kTrueValueRootIndex : Heap::kFalseValueRootIndex);
+    __ Push(cp, r3, r4);
+    __ CallRuntime(Runtime::kNewClosure, 3);
+  }
+  context()->Plug(r3);
+}
+
+
+void FullCodeGenerator::VisitVariableProxy(VariableProxy* expr) {
+  Comment cmnt(masm_, "[ VariableProxy");
+  EmitVariableLoad(expr);
+}
+
+
+void FullCodeGenerator::EmitLoadHomeObject(SuperReference* expr) {
+  Comment cnmt(masm_, "[ SuperReference ");
+
+  __ LoadP(LoadDescriptor::ReceiverRegister(),
+           MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+
+  Handle<Symbol> home_object_symbol(isolate()->heap()->home_object_symbol());
+  __ Move(LoadDescriptor::NameRegister(), home_object_symbol);
+
+  if (FLAG_vector_ics) {
+    __ mov(VectorLoadICDescriptor::SlotRegister(),
+           Operand(SmiFromSlot(expr->HomeObjectFeedbackSlot())));
+    CallLoadIC(NOT_CONTEXTUAL);
+  } else {
+    CallLoadIC(NOT_CONTEXTUAL, expr->HomeObjectFeedbackId());
+  }
+
+  __ Cmpi(r3, Operand(isolate()->factory()->undefined_value()), r0);
+  Label done;
+  __ bne(&done);
+  __ CallRuntime(Runtime::kThrowNonMethodError, 0);
+  __ bind(&done);
+}
+
+
+void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
+                                                      TypeofState typeof_state,
+                                                      Label* slow) {
+  Register current = cp;
+  Register next = r4;
+  Register temp = r5;
+
+  Scope* s = scope();
+  while (s != NULL) {
+    if (s->num_heap_slots() > 0) {
+      if (s->calls_sloppy_eval()) {
+        // Check that extension is NULL.
+        __ LoadP(temp, ContextOperand(current, Context::EXTENSION_INDEX));
+        __ cmpi(temp, Operand::Zero());
+        __ bne(slow);
+      }
+      // Load next context in chain.
+      __ LoadP(next, ContextOperand(current, Context::PREVIOUS_INDEX));
+      // Walk the rest of the chain without clobbering cp.
+      current = next;
+    }
+    // If no outer scope calls eval, we do not need to check more
+    // context extensions.
+    if (!s->outer_scope_calls_sloppy_eval() || s->is_eval_scope()) break;
+    s = s->outer_scope();
+  }
+
+  if (s->is_eval_scope()) {
+    Label loop, fast;
+    if (!current.is(next)) {
+      __ Move(next, current);
+    }
+    __ bind(&loop);
+    // Terminate at native context.
+    __ LoadP(temp, FieldMemOperand(next, HeapObject::kMapOffset));
+    __ LoadRoot(ip, Heap::kNativeContextMapRootIndex);
+    __ cmp(temp, ip);
+    __ beq(&fast);
+    // Check that extension is NULL.
+    __ LoadP(temp, ContextOperand(next, Context::EXTENSION_INDEX));
+    __ cmpi(temp, Operand::Zero());
+    __ bne(slow);
+    // Load next context in chain.
+    __ LoadP(next, ContextOperand(next, Context::PREVIOUS_INDEX));
+    __ b(&loop);
+    __ bind(&fast);
+  }
+
+  __ LoadP(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+  __ mov(LoadDescriptor::NameRegister(), Operand(proxy->var()->name()));
+  if (FLAG_vector_ics) {
+    __ mov(VectorLoadICDescriptor::SlotRegister(),
+           Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
+  }
+
+  ContextualMode mode =
+      (typeof_state == INSIDE_TYPEOF) ? NOT_CONTEXTUAL : CONTEXTUAL;
+  CallLoadIC(mode);
+}
+
+
+MemOperand FullCodeGenerator::ContextSlotOperandCheckExtensions(Variable* var,
+                                                                Label* slow) {
+  DCHECK(var->IsContextSlot());
+  Register context = cp;
+  Register next = r6;
+  Register temp = r7;
+
+  for (Scope* s = scope(); s != var->scope(); s = s->outer_scope()) {
+    if (s->num_heap_slots() > 0) {
+      if (s->calls_sloppy_eval()) {
+        // Check that extension is NULL.
+        __ LoadP(temp, ContextOperand(context, Context::EXTENSION_INDEX));
+        __ cmpi(temp, Operand::Zero());
+        __ bne(slow);
+      }
+      __ LoadP(next, ContextOperand(context, Context::PREVIOUS_INDEX));
+      // Walk the rest of the chain without clobbering cp.
+      context = next;
+    }
+  }
+  // Check that last extension is NULL.
+  __ LoadP(temp, ContextOperand(context, Context::EXTENSION_INDEX));
+  __ cmpi(temp, Operand::Zero());
+  __ bne(slow);
+
+  // This function is used only for loads, not stores, so it's safe to
+  // return an cp-based operand (the write barrier cannot be allowed to
+  // destroy the cp register).
+  return ContextOperand(context, var->index());
+}
+
+
+void FullCodeGenerator::EmitDynamicLookupFastCase(VariableProxy* proxy,
+                                                  TypeofState typeof_state,
+                                                  Label* slow, Label* done) {
+  // Generate fast-case code for variables that might be shadowed by
+  // eval-introduced variables.  Eval is used a lot without
+  // introducing variables.  In those cases, we do not want to
+  // perform a runtime call for all variables in the scope
+  // containing the eval.
+  Variable* var = proxy->var();
+  if (var->mode() == DYNAMIC_GLOBAL) {
+    EmitLoadGlobalCheckExtensions(proxy, typeof_state, slow);
+    __ b(done);
+  } else if (var->mode() == DYNAMIC_LOCAL) {
+    Variable* local = var->local_if_not_shadowed();
+    __ LoadP(r3, ContextSlotOperandCheckExtensions(local, slow));
+    if (local->mode() == LET || local->mode() == CONST ||
+        local->mode() == CONST_LEGACY) {
+      __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
+      __ bne(done);
+      if (local->mode() == CONST_LEGACY) {
+        __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
+      } else {  // LET || CONST
+        __ mov(r3, Operand(var->name()));
+        __ push(r3);
+        __ CallRuntime(Runtime::kThrowReferenceError, 1);
+      }
+    }
+    __ b(done);
+  }
+}
+
+
+void FullCodeGenerator::EmitVariableLoad(VariableProxy* proxy) {
+  // Record position before possible IC call.
+  SetSourcePosition(proxy->position());
+  Variable* var = proxy->var();
+
+  // Three cases: global variables, lookup variables, and all other types of
+  // variables.
+  switch (var->location()) {
+    case Variable::UNALLOCATED: {
+      Comment cmnt(masm_, "[ Global variable");
+      __ LoadP(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+      __ mov(LoadDescriptor::NameRegister(), Operand(var->name()));
+      if (FLAG_vector_ics) {
+        __ mov(VectorLoadICDescriptor::SlotRegister(),
+               Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
+      }
+      CallLoadIC(CONTEXTUAL);
+      context()->Plug(r3);
+      break;
+    }
+
+    case Variable::PARAMETER:
+    case Variable::LOCAL:
+    case Variable::CONTEXT: {
+      Comment cmnt(masm_, var->IsContextSlot() ? "[ Context variable"
+                                               : "[ Stack variable");
+      if (var->binding_needs_init()) {
+        // var->scope() may be NULL when the proxy is located in eval code and
+        // refers to a potential outside binding. Currently those bindings are
+        // always looked up dynamically, i.e. in that case
+        //     var->location() == LOOKUP.
+        // always holds.
+        DCHECK(var->scope() != NULL);
+
+        // Check if the binding really needs an initialization check. The check
+        // can be skipped in the following situation: we have a LET or CONST
+        // binding in harmony mode, both the Variable and the VariableProxy have
+        // the same declaration scope (i.e. they are both in global code, in the
+        // same function or in the same eval code) and the VariableProxy is in
+        // the source physically located after the initializer of the variable.
+        //
+        // We cannot skip any initialization checks for CONST in non-harmony
+        // mode because const variables may be declared but never initialized:
+        //   if (false) { const x; }; var y = x;
+        //
+        // The condition on the declaration scopes is a conservative check for
+        // nested functions that access a binding and are called before the
+        // binding is initialized:
+        //   function() { f(); let x = 1; function f() { x = 2; } }
+        //
+        bool skip_init_check;
+        if (var->scope()->DeclarationScope() != scope()->DeclarationScope()) {
+          skip_init_check = false;
+        } else {
+          // Check that we always have valid source position.
+          DCHECK(var->initializer_position() != RelocInfo::kNoPosition);
+          DCHECK(proxy->position() != RelocInfo::kNoPosition);
+          skip_init_check = var->mode() != CONST_LEGACY &&
+                            var->initializer_position() < proxy->position();
+        }
+
+        if (!skip_init_check) {
+          Label done;
+          // Let and const need a read barrier.
+          GetVar(r3, var);
+          __ CompareRoot(r3, Heap::kTheHoleValueRootIndex);
+          __ bne(&done);
+          if (var->mode() == LET || var->mode() == CONST) {
+            // Throw a reference error when using an uninitialized let/const
+            // binding in harmony mode.
+            __ mov(r3, Operand(var->name()));
+            __ push(r3);
+            __ CallRuntime(Runtime::kThrowReferenceError, 1);
+          } else {
+            // Uninitalized const bindings outside of harmony mode are unholed.
+            DCHECK(var->mode() == CONST_LEGACY);
+            __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
+          }
+          __ bind(&done);
+          context()->Plug(r3);
+          break;
+        }
+      }
+      context()->Plug(var);
+      break;
+    }
+
+    case Variable::LOOKUP: {
+      Comment cmnt(masm_, "[ Lookup variable");
+      Label done, slow;
+      // Generate code for loading from variables potentially shadowed
+      // by eval-introduced variables.
+      EmitDynamicLookupFastCase(proxy, NOT_INSIDE_TYPEOF, &slow, &done);
+      __ bind(&slow);
+      __ mov(r4, Operand(var->name()));
+      __ Push(cp, r4);  // Context and name.
+      __ CallRuntime(Runtime::kLoadLookupSlot, 2);
+      __ bind(&done);
+      context()->Plug(r3);
+    }
+  }
+}
+
+
+void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
+  Comment cmnt(masm_, "[ RegExpLiteral");
+  Label materialized;
+  // Registers will be used as follows:
+  // r8 = materialized value (RegExp literal)
+  // r7 = JS function, literals array
+  // r6 = literal index
+  // r5 = RegExp pattern
+  // r4 = RegExp flags
+  // r3 = RegExp literal clone
+  __ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  __ LoadP(r7, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
+  int literal_offset =
+      FixedArray::kHeaderSize + expr->literal_index() * kPointerSize;
+  __ LoadP(r8, FieldMemOperand(r7, literal_offset), r0);
+  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+  __ cmp(r8, ip);
+  __ bne(&materialized);
+
+  // Create regexp literal using runtime function.
+  // Result will be in r3.
+  __ LoadSmiLiteral(r6, Smi::FromInt(expr->literal_index()));
+  __ mov(r5, Operand(expr->pattern()));
+  __ mov(r4, Operand(expr->flags()));
+  __ Push(r7, r6, r5, r4);
+  __ CallRuntime(Runtime::kMaterializeRegExpLiteral, 4);
+  __ mr(r8, r3);
+
+  __ bind(&materialized);
+  int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
+  Label allocated, runtime_allocate;
+  __ Allocate(size, r3, r5, r6, &runtime_allocate, TAG_OBJECT);
+  __ b(&allocated);
+
+  __ bind(&runtime_allocate);
+  __ LoadSmiLiteral(r3, Smi::FromInt(size));
+  __ Push(r8, r3);
+  __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+  __ pop(r8);
+
+  __ bind(&allocated);
+  // After this, registers are used as follows:
+  // r3: Newly allocated regexp.
+  // r8: Materialized regexp.
+  // r5: temp.
+  __ CopyFields(r3, r8, r5.bit(), size / kPointerSize);
+  context()->Plug(r3);
+}
+
+
+void FullCodeGenerator::EmitAccessor(Expression* expression) {
+  if (expression == NULL) {
+    __ LoadRoot(r4, Heap::kNullValueRootIndex);
+    __ push(r4);
+  } else {
+    VisitForStackValue(expression);
+  }
+}
+
+
+void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
+  Comment cmnt(masm_, "[ ObjectLiteral");
+
+  expr->BuildConstantProperties(isolate());
+  Handle<FixedArray> constant_properties = expr->constant_properties();
+  __ LoadP(r6, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  __ LoadP(r6, FieldMemOperand(r6, JSFunction::kLiteralsOffset));
+  __ LoadSmiLiteral(r5, Smi::FromInt(expr->literal_index()));
+  __ mov(r4, Operand(constant_properties));
+  int flags = expr->fast_elements() ? ObjectLiteral::kFastElements
+                                    : ObjectLiteral::kNoFlags;
+  flags |= expr->has_function() ? ObjectLiteral::kHasFunction
+                                : ObjectLiteral::kNoFlags;
+  __ LoadSmiLiteral(r3, Smi::FromInt(flags));
+  int properties_count = constant_properties->length() / 2;
+  if (expr->may_store_doubles() || expr->depth() > 1 ||
+      masm()->serializer_enabled() || flags != ObjectLiteral::kFastElements ||
+      properties_count > FastCloneShallowObjectStub::kMaximumClonedProperties) {
+    __ Push(r6, r5, r4, r3);
+    __ CallRuntime(Runtime::kCreateObjectLiteral, 4);
+  } else {
+    FastCloneShallowObjectStub stub(isolate(), properties_count);
+    __ CallStub(&stub);
+  }
+  PrepareForBailoutForId(expr->CreateLiteralId(), TOS_REG);
+
+  // If result_saved is true the result is on top of the stack.  If
+  // result_saved is false the result is in r3.
+  bool result_saved = false;
+
+  // Mark all computed expressions that are bound to a key that
+  // is shadowed by a later occurrence of the same key. For the
+  // marked expressions, no store code is emitted.
+  expr->CalculateEmitStore(zone());
+
+  AccessorTable accessor_table(zone());
+  for (int i = 0; i < expr->properties()->length(); i++) {
+    ObjectLiteral::Property* property = expr->properties()->at(i);
+    if (property->IsCompileTimeValue()) continue;
+
+    Literal* key = property->key();
+    Expression* value = property->value();
+    if (!result_saved) {
+      __ push(r3);  // Save result on stack
+      result_saved = true;
+    }
+    switch (property->kind()) {
+      case ObjectLiteral::Property::CONSTANT:
+        UNREACHABLE();
+      case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+        DCHECK(!CompileTimeValue::IsCompileTimeValue(property->value()));
+      // Fall through.
+      case ObjectLiteral::Property::COMPUTED:
+        // It is safe to use [[Put]] here because the boilerplate already
+        // contains computed properties with an uninitialized value.
+        if (key->value()->IsInternalizedString()) {
+          if (property->emit_store()) {
+            VisitForAccumulatorValue(value);
+            DCHECK(StoreDescriptor::ValueRegister().is(r3));
+            __ mov(StoreDescriptor::NameRegister(), Operand(key->value()));
+            __ LoadP(StoreDescriptor::ReceiverRegister(), MemOperand(sp));
+            CallStoreIC(key->LiteralFeedbackId());
+            PrepareForBailoutForId(key->id(), NO_REGISTERS);
+          } else {
+            VisitForEffect(value);
+          }
+          break;
+        }
+        // Duplicate receiver on stack.
+        __ LoadP(r3, MemOperand(sp));
+        __ push(r3);
+        VisitForStackValue(key);
+        VisitForStackValue(value);
+        if (property->emit_store()) {
+          __ LoadSmiLiteral(r3, Smi::FromInt(SLOPPY));  // PropertyAttributes
+          __ push(r3);
+          __ CallRuntime(Runtime::kSetProperty, 4);
+        } else {
+          __ Drop(3);
+        }
+        break;
+      case ObjectLiteral::Property::PROTOTYPE:
+        // Duplicate receiver on stack.
+        __ LoadP(r3, MemOperand(sp));
+        __ push(r3);
+        VisitForStackValue(value);
+        if (property->emit_store()) {
+          __ CallRuntime(Runtime::kInternalSetPrototype, 2);
+        } else {
+          __ Drop(2);
+        }
+        break;
+      case ObjectLiteral::Property::GETTER:
+        accessor_table.lookup(key)->second->getter = value;
+        break;
+      case ObjectLiteral::Property::SETTER:
+        accessor_table.lookup(key)->second->setter = value;
+        break;
+    }
+  }
+
+  // Emit code to define accessors, using only a single call to the runtime for
+  // each pair of corresponding getters and setters.
+  for (AccessorTable::Iterator it = accessor_table.begin();
+       it != accessor_table.end(); ++it) {
+    __ LoadP(r3, MemOperand(sp));  // Duplicate receiver.
+    __ push(r3);
+    VisitForStackValue(it->first);
+    EmitAccessor(it->second->getter);
+    EmitAccessor(it->second->setter);
+    __ LoadSmiLiteral(r3, Smi::FromInt(NONE));
+    __ push(r3);
+    __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
+  }
+
+  if (expr->has_function()) {
+    DCHECK(result_saved);
+    __ LoadP(r3, MemOperand(sp));
+    __ push(r3);
+    __ CallRuntime(Runtime::kToFastProperties, 1);
+  }
+
+  if (result_saved) {
+    context()->PlugTOS();
+  } else {
+    context()->Plug(r3);
+  }
+}
+
+
+void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
+  Comment cmnt(masm_, "[ ArrayLiteral");
+
+  expr->BuildConstantElements(isolate());
+  int flags = expr->depth() == 1 ? ArrayLiteral::kShallowElements
+                                 : ArrayLiteral::kNoFlags;
+
+  ZoneList<Expression*>* subexprs = expr->values();
+  int length = subexprs->length();
+  Handle<FixedArray> constant_elements = expr->constant_elements();
+  DCHECK_EQ(2, constant_elements->length());
+  ElementsKind constant_elements_kind =
+      static_cast<ElementsKind>(Smi::cast(constant_elements->get(0))->value());
+  bool has_fast_elements = IsFastObjectElementsKind(constant_elements_kind);
+  Handle<FixedArrayBase> constant_elements_values(
+      FixedArrayBase::cast(constant_elements->get(1)));
+
+  AllocationSiteMode allocation_site_mode = TRACK_ALLOCATION_SITE;
+  if (has_fast_elements && !FLAG_allocation_site_pretenuring) {
+    // If the only customer of allocation sites is transitioning, then
+    // we can turn it off if we don't have anywhere else to transition to.
+    allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
+  }
+
+  __ LoadP(r6, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  __ LoadP(r6, FieldMemOperand(r6, JSFunction::kLiteralsOffset));
+  __ LoadSmiLiteral(r5, Smi::FromInt(expr->literal_index()));
+  __ mov(r4, Operand(constant_elements));
+  if (expr->depth() > 1 || length > JSObject::kInitialMaxFastElementArray) {
+    __ LoadSmiLiteral(r3, Smi::FromInt(flags));
+    __ Push(r6, r5, r4, r3);
+    __ CallRuntime(Runtime::kCreateArrayLiteral, 4);
+  } else {
+    FastCloneShallowArrayStub stub(isolate(), allocation_site_mode);
+    __ CallStub(&stub);
+  }
+
+  bool result_saved = false;  // Is the result saved to the stack?
+
+  // Emit code to evaluate all the non-constant subexpressions and to store
+  // them into the newly cloned array.
+  for (int i = 0; i < length; i++) {
+    Expression* subexpr = subexprs->at(i);
+    // If the subexpression is a literal or a simple materialized literal it
+    // is already set in the cloned array.
+    if (CompileTimeValue::IsCompileTimeValue(subexpr)) continue;
+
+    if (!result_saved) {
+      __ push(r3);
+      __ Push(Smi::FromInt(expr->literal_index()));
+      result_saved = true;
+    }
+    VisitForAccumulatorValue(subexpr);
+
+    if (IsFastObjectElementsKind(constant_elements_kind)) {
+      int offset = FixedArray::kHeaderSize + (i * kPointerSize);
+      __ LoadP(r8, MemOperand(sp, kPointerSize));  // Copy of array literal.
+      __ LoadP(r4, FieldMemOperand(r8, JSObject::kElementsOffset));
+      __ StoreP(result_register(), FieldMemOperand(r4, offset), r0);
+      // Update the write barrier for the array store.
+      __ RecordWriteField(r4, offset, result_register(), r5, kLRHasBeenSaved,
+                          kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+                          INLINE_SMI_CHECK);
+    } else {
+      __ LoadSmiLiteral(r6, Smi::FromInt(i));
+      StoreArrayLiteralElementStub stub(isolate());
+      __ CallStub(&stub);
+    }
+
+    PrepareForBailoutForId(expr->GetIdForElement(i), NO_REGISTERS);
+  }
+
+  if (result_saved) {
+    __ pop();  // literal index
+    context()->PlugTOS();
+  } else {
+    context()->Plug(r3);
+  }
+}
+
+
+void FullCodeGenerator::VisitAssignment(Assignment* expr) {
+  DCHECK(expr->target()->IsValidReferenceExpression());
+
+  Comment cmnt(masm_, "[ Assignment");
+
+  Property* property = expr->target()->AsProperty();
+  LhsKind assign_type = GetAssignType(property);
+
+  // Evaluate LHS expression.
+  switch (assign_type) {
+    case VARIABLE:
+      // Nothing to do here.
+      break;
+    case NAMED_PROPERTY:
+      if (expr->is_compound()) {
+        // We need the receiver both on the stack and in the register.
+        VisitForStackValue(property->obj());
+        __ LoadP(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
+      } else {
+        VisitForStackValue(property->obj());
+      }
+      break;
+    case NAMED_SUPER_PROPERTY:
+      VisitForStackValue(property->obj()->AsSuperReference()->this_var());
+      EmitLoadHomeObject(property->obj()->AsSuperReference());
+      __ Push(result_register());
+      if (expr->is_compound()) {
+        const Register scratch = r4;
+        __ LoadP(scratch, MemOperand(sp, kPointerSize));
+        __ Push(scratch, result_register());
+      }
+      break;
+    case KEYED_SUPER_PROPERTY: {
+      const Register scratch = r4;
+      VisitForStackValue(property->obj()->AsSuperReference()->this_var());
+      EmitLoadHomeObject(property->obj()->AsSuperReference());
+      __ Move(scratch, result_register());
+      VisitForAccumulatorValue(property->key());
+      __ Push(scratch, result_register());
+      if (expr->is_compound()) {
+        const Register scratch1 = r5;
+        __ LoadP(scratch1, MemOperand(sp, 2 * kPointerSize));
+        __ Push(scratch1, scratch, result_register());
+      }
+      break;
+    }
+    case KEYED_PROPERTY:
+      if (expr->is_compound()) {
+        VisitForStackValue(property->obj());
+        VisitForStackValue(property->key());
+        __ LoadP(LoadDescriptor::ReceiverRegister(),
+                 MemOperand(sp, 1 * kPointerSize));
+        __ LoadP(LoadDescriptor::NameRegister(), MemOperand(sp, 0));
+      } else {
+        VisitForStackValue(property->obj());
+        VisitForStackValue(property->key());
+      }
+      break;
+  }
+
+  // For compound assignments we need another deoptimization point after the
+  // variable/property load.
+  if (expr->is_compound()) {
+    {
+      AccumulatorValueContext context(this);
+      switch (assign_type) {
+        case VARIABLE:
+          EmitVariableLoad(expr->target()->AsVariableProxy());
+          PrepareForBailout(expr->target(), TOS_REG);
+          break;
+        case NAMED_PROPERTY:
+          EmitNamedPropertyLoad(property);
+          PrepareForBailoutForId(property->LoadId(), TOS_REG);
+          break;
+        case NAMED_SUPER_PROPERTY:
+          EmitNamedSuperPropertyLoad(property);
+          PrepareForBailoutForId(property->LoadId(), TOS_REG);
+          break;
+        case KEYED_SUPER_PROPERTY:
+          EmitKeyedSuperPropertyLoad(property);
+          PrepareForBailoutForId(property->LoadId(), TOS_REG);
+          break;
+        case KEYED_PROPERTY:
+          EmitKeyedPropertyLoad(property);
+          PrepareForBailoutForId(property->LoadId(), TOS_REG);
+          break;
+      }
+    }
+
+    Token::Value op = expr->binary_op();
+    __ push(r3);  // Left operand goes on the stack.
+    VisitForAccumulatorValue(expr->value());
+
+    OverwriteMode mode = expr->value()->ResultOverwriteAllowed()
+                             ? OVERWRITE_RIGHT
+                             : NO_OVERWRITE;
+    SetSourcePosition(expr->position() + 1);
+    AccumulatorValueContext context(this);
+    if (ShouldInlineSmiCase(op)) {
+      EmitInlineSmiBinaryOp(expr->binary_operation(), op, mode, expr->target(),
+                            expr->value());
+    } else {
+      EmitBinaryOp(expr->binary_operation(), op, mode);
+    }
+
+    // Deoptimization point in case the binary operation may have side effects.
+    PrepareForBailout(expr->binary_operation(), TOS_REG);
+  } else {
+    VisitForAccumulatorValue(expr->value());
+  }
+
+  // Record source position before possible IC call.
+  SetSourcePosition(expr->position());
+
+  // Store the value.
+  switch (assign_type) {
+    case VARIABLE:
+      EmitVariableAssignment(expr->target()->AsVariableProxy()->var(),
+                             expr->op());
+      PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+      context()->Plug(r3);
+      break;
+    case NAMED_PROPERTY:
+      EmitNamedPropertyAssignment(expr);
+      break;
+    case NAMED_SUPER_PROPERTY:
+      EmitNamedSuperPropertyStore(property);
+      context()->Plug(r3);
+      break;
+    case KEYED_SUPER_PROPERTY:
+      EmitKeyedSuperPropertyStore(property);
+      context()->Plug(r3);
+      break;
+    case KEYED_PROPERTY:
+      EmitKeyedPropertyAssignment(expr);
+      break;
+  }
+}
+
+
+void FullCodeGenerator::VisitYield(Yield* expr) {
+  Comment cmnt(masm_, "[ Yield");
+  // Evaluate yielded value first; the initial iterator definition depends on
+  // this.  It stays on the stack while we update the iterator.
+  VisitForStackValue(expr->expression());
+
+  switch (expr->yield_kind()) {
+    case Yield::kSuspend:
+      // Pop value from top-of-stack slot; box result into result register.
+      EmitCreateIteratorResult(false);
+      __ push(result_register());
+    // Fall through.
+    case Yield::kInitial: {
+      Label suspend, continuation, post_runtime, resume;
+
+      __ b(&suspend);
+
+      __ bind(&continuation);
+      __ b(&resume);
+
+      __ bind(&suspend);
+      VisitForAccumulatorValue(expr->generator_object());
+      DCHECK(continuation.pos() > 0 && Smi::IsValid(continuation.pos()));
+      __ LoadSmiLiteral(r4, Smi::FromInt(continuation.pos()));
+      __ StoreP(r4, FieldMemOperand(r3, JSGeneratorObject::kContinuationOffset),
+                r0);
+      __ StoreP(cp, FieldMemOperand(r3, JSGeneratorObject::kContextOffset), r0);
+      __ mr(r4, cp);
+      __ RecordWriteField(r3, JSGeneratorObject::kContextOffset, r4, r5,
+                          kLRHasBeenSaved, kDontSaveFPRegs);
+      __ addi(r4, fp, Operand(StandardFrameConstants::kExpressionsOffset));
+      __ cmp(sp, r4);
+      __ beq(&post_runtime);
+      __ push(r3);  // generator object
+      __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+      __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+      __ bind(&post_runtime);
+      __ pop(result_register());
+      EmitReturnSequence();
+
+      __ bind(&resume);
+      context()->Plug(result_register());
+      break;
+    }
+
+    case Yield::kFinal: {
+      VisitForAccumulatorValue(expr->generator_object());
+      __ LoadSmiLiteral(r4, Smi::FromInt(JSGeneratorObject::kGeneratorClosed));
+      __ StoreP(r4, FieldMemOperand(result_register(),
+                                    JSGeneratorObject::kContinuationOffset),
+                r0);
+      // Pop value from top-of-stack slot, box result into result register.
+      EmitCreateIteratorResult(true);
+      EmitUnwindBeforeReturn();
+      EmitReturnSequence();
+      break;
+    }
+
+    case Yield::kDelegating: {
+      VisitForStackValue(expr->generator_object());
+
+      // Initial stack layout is as follows:
+      // [sp + 1 * kPointerSize] iter
+      // [sp + 0 * kPointerSize] g
+
+      Label l_catch, l_try, l_suspend, l_continuation, l_resume;
+      Label l_next, l_call;
+      Register load_receiver = LoadDescriptor::ReceiverRegister();
+      Register load_name = LoadDescriptor::NameRegister();
+
+      // Initial send value is undefined.
+      __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
+      __ b(&l_next);
+
+      // catch (e) { receiver = iter; f = 'throw'; arg = e; goto l_call; }
+      __ bind(&l_catch);
+      handler_table()->set(expr->index(), Smi::FromInt(l_catch.pos()));
+      __ LoadRoot(load_name, Heap::kthrow_stringRootIndex);  // "throw"
+      __ LoadP(r6, MemOperand(sp, 1 * kPointerSize));        // iter
+      __ Push(load_name, r6, r3);  // "throw", iter, except
+      __ b(&l_call);
+
+      // try { received = %yield result }
+      // Shuffle the received result above a try handler and yield it without
+      // re-boxing.
+      __ bind(&l_try);
+      __ pop(r3);  // result
+      __ PushTryHandler(StackHandler::CATCH, expr->index());
+      const int handler_size = StackHandlerConstants::kSize;
+      __ push(r3);  // result
+      __ b(&l_suspend);
+      __ bind(&l_continuation);
+      __ b(&l_resume);
+      __ bind(&l_suspend);
+      const int generator_object_depth = kPointerSize + handler_size;
+      __ LoadP(r3, MemOperand(sp, generator_object_depth));
+      __ push(r3);  // g
+      DCHECK(l_continuation.pos() > 0 && Smi::IsValid(l_continuation.pos()));
+      __ LoadSmiLiteral(r4, Smi::FromInt(l_continuation.pos()));
+      __ StoreP(r4, FieldMemOperand(r3, JSGeneratorObject::kContinuationOffset),
+                r0);
+      __ StoreP(cp, FieldMemOperand(r3, JSGeneratorObject::kContextOffset), r0);
+      __ mr(r4, cp);
+      __ RecordWriteField(r3, JSGeneratorObject::kContextOffset, r4, r5,
+                          kLRHasBeenSaved, kDontSaveFPRegs);
+      __ CallRuntime(Runtime::kSuspendJSGeneratorObject, 1);
+      __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+      __ pop(r3);  // result
+      EmitReturnSequence();
+      __ bind(&l_resume);  // received in r3
+      __ PopTryHandler();
+
+      // receiver = iter; f = 'next'; arg = received;
+      __ bind(&l_next);
+
+      __ LoadRoot(load_name, Heap::knext_stringRootIndex);  // "next"
+      __ LoadP(r6, MemOperand(sp, 1 * kPointerSize));       // iter
+      __ Push(load_name, r6, r3);  // "next", iter, received
+
+      // result = receiver[f](arg);
+      __ bind(&l_call);
+      __ LoadP(load_receiver, MemOperand(sp, kPointerSize));
+      __ LoadP(load_name, MemOperand(sp, 2 * kPointerSize));
+      if (FLAG_vector_ics) {
+        __ mov(VectorLoadICDescriptor::SlotRegister(),
+               Operand(SmiFromSlot(expr->KeyedLoadFeedbackSlot())));
+      }
+      Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
+      CallIC(ic, TypeFeedbackId::None());
+      __ mr(r4, r3);
+      __ StoreP(r4, MemOperand(sp, 2 * kPointerSize));
+      CallFunctionStub stub(isolate(), 1, CALL_AS_METHOD);
+      __ CallStub(&stub);
+
+      __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+      __ Drop(1);  // The function is still on the stack; drop it.
+
+      // if (!result.done) goto l_try;
+      __ Move(load_receiver, r3);
+
+      __ push(load_receiver);                               // save result
+      __ LoadRoot(load_name, Heap::kdone_stringRootIndex);  // "done"
+      if (FLAG_vector_ics) {
+        __ mov(VectorLoadICDescriptor::SlotRegister(),
+               Operand(SmiFromSlot(expr->DoneFeedbackSlot())));
+      }
+      CallLoadIC(NOT_CONTEXTUAL);  // r0=result.done
+      Handle<Code> bool_ic = ToBooleanStub::GetUninitialized(isolate());
+      CallIC(bool_ic);
+      __ cmpi(r3, Operand::Zero());
+      __ beq(&l_try);
+
+      // result.value
+      __ pop(load_receiver);                                 // result
+      __ LoadRoot(load_name, Heap::kvalue_stringRootIndex);  // "value"
+      if (FLAG_vector_ics) {
+        __ mov(VectorLoadICDescriptor::SlotRegister(),
+               Operand(SmiFromSlot(expr->ValueFeedbackSlot())));
+      }
+      CallLoadIC(NOT_CONTEXTUAL);     // r3=result.value
+      context()->DropAndPlug(2, r3);  // drop iter and g
+      break;
+    }
+  }
+}
+
+
+void FullCodeGenerator::EmitGeneratorResume(
+    Expression* generator, Expression* value,
+    JSGeneratorObject::ResumeMode resume_mode) {
+  // The value stays in r3, and is ultimately read by the resumed generator, as
+  // if CallRuntime(Runtime::kSuspendJSGeneratorObject) returned it. Or it
+  // is read to throw the value when the resumed generator is already closed.
+  // r4 will hold the generator object until the activation has been resumed.
+  VisitForStackValue(generator);
+  VisitForAccumulatorValue(value);
+  __ pop(r4);
+
+  // Check generator state.
+  Label wrong_state, closed_state, done;
+  __ LoadP(r6, FieldMemOperand(r4, JSGeneratorObject::kContinuationOffset));
+  STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting < 0);
+  STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed == 0);
+  __ CmpSmiLiteral(r6, Smi::FromInt(0), r0);
+  __ beq(&closed_state);
+  __ blt(&wrong_state);
+
+  // Load suspended function and context.
+  __ LoadP(cp, FieldMemOperand(r4, JSGeneratorObject::kContextOffset));
+  __ LoadP(r7, FieldMemOperand(r4, JSGeneratorObject::kFunctionOffset));
+
+  // Load receiver and store as the first argument.
+  __ LoadP(r5, FieldMemOperand(r4, JSGeneratorObject::kReceiverOffset));
+  __ push(r5);
+
+  // Push holes for the rest of the arguments to the generator function.
+  __ LoadP(r6, FieldMemOperand(r7, JSFunction::kSharedFunctionInfoOffset));
+  __ LoadWordArith(
+      r6, FieldMemOperand(r6, SharedFunctionInfo::kFormalParameterCountOffset));
+  __ LoadRoot(r5, Heap::kTheHoleValueRootIndex);
+  Label argument_loop, push_frame;
+#if V8_TARGET_ARCH_PPC64
+  __ cmpi(r6, Operand::Zero());
+  __ beq(&push_frame);
+#else
+  __ SmiUntag(r6, SetRC);
+  __ beq(&push_frame, cr0);
+#endif
+  __ mtctr(r6);
+  __ bind(&argument_loop);
+  __ push(r5);
+  __ bdnz(&argument_loop);
+
+  // Enter a new JavaScript frame, and initialize its slots as they were when
+  // the generator was suspended.
+  Label resume_frame;
+  __ bind(&push_frame);
+  __ b(&resume_frame, SetLK);
+  __ b(&done);
+  __ bind(&resume_frame);
+  // lr = return address.
+  // fp = caller's frame pointer.
+  // cp = callee's context,
+  // r7 = callee's JS function.
+  __ PushFixedFrame(r7);
+  // Adjust FP to point to saved FP.
+  __ addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+
+  // Load the operand stack size.
+  __ LoadP(r6, FieldMemOperand(r4, JSGeneratorObject::kOperandStackOffset));
+  __ LoadP(r6, FieldMemOperand(r6, FixedArray::kLengthOffset));
+  __ SmiUntag(r6, SetRC);
+
+  // If we are sending a value and there is no operand stack, we can jump back
+  // in directly.
+  Label call_resume;
+  if (resume_mode == JSGeneratorObject::NEXT) {
+    Label slow_resume;
+    __ bne(&slow_resume, cr0);
+    __ LoadP(ip, FieldMemOperand(r7, JSFunction::kCodeEntryOffset));
+#if V8_OOL_CONSTANT_POOL
+    {
+      ConstantPoolUnavailableScope constant_pool_unavailable(masm_);
+      // Load the new code object's constant pool pointer.
+      __ LoadP(kConstantPoolRegister,
+               MemOperand(ip, Code::kConstantPoolOffset - Code::kHeaderSize));
+#endif
+      __ LoadP(r5, FieldMemOperand(r4, JSGeneratorObject::kContinuationOffset));
+      __ SmiUntag(r5);
+      __ add(ip, ip, r5);
+      __ LoadSmiLiteral(r5,
+                        Smi::FromInt(JSGeneratorObject::kGeneratorExecuting));
+      __ StoreP(r5, FieldMemOperand(r4, JSGeneratorObject::kContinuationOffset),
+                r0);
+      __ Jump(ip);
+      __ bind(&slow_resume);
+#if V8_OOL_CONSTANT_POOL
+    }
+#endif
+  } else {
+    __ beq(&call_resume, cr0);
+  }
+
+  // Otherwise, we push holes for the operand stack and call the runtime to fix
+  // up the stack and the handlers.
+  Label operand_loop;
+  __ mtctr(r6);
+  __ bind(&operand_loop);
+  __ push(r5);
+  __ bdnz(&operand_loop);
+
+  __ bind(&call_resume);
+  DCHECK(!result_register().is(r4));
+  __ Push(r4, result_register());
+  __ Push(Smi::FromInt(resume_mode));
+  __ CallRuntime(Runtime::kResumeJSGeneratorObject, 3);
+  // Not reached: the runtime call returns elsewhere.
+  __ stop("not-reached");
+
+  // Reach here when generator is closed.
+  __ bind(&closed_state);
+  if (resume_mode == JSGeneratorObject::NEXT) {
+    // Return completed iterator result when generator is closed.
+    __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+    __ push(r5);
+    // Pop value from top-of-stack slot; box result into result register.
+    EmitCreateIteratorResult(true);
+  } else {
+    // Throw the provided value.
+    __ push(r3);
+    __ CallRuntime(Runtime::kThrow, 1);
+  }
+  __ b(&done);
+
+  // Throw error if we attempt to operate on a running generator.
+  __ bind(&wrong_state);
+  __ push(r4);
+  __ CallRuntime(Runtime::kThrowGeneratorStateError, 1);
+
+  __ bind(&done);
+  context()->Plug(result_register());
+}
+
+
+void FullCodeGenerator::EmitCreateIteratorResult(bool done) {
+  Label gc_required;
+  Label allocated;
+
+  const int instance_size = 5 * kPointerSize;
+  DCHECK_EQ(isolate()->native_context()->iterator_result_map()->instance_size(),
+            instance_size);
+
+  __ Allocate(instance_size, r3, r5, r6, &gc_required, TAG_OBJECT);
+  __ b(&allocated);
+
+  __ bind(&gc_required);
+  __ Push(Smi::FromInt(instance_size));
+  __ CallRuntime(Runtime::kAllocateInNewSpace, 1);
+  __ LoadP(context_register(),
+           MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+  __ bind(&allocated);
+  __ LoadP(r4, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
+  __ LoadP(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset));
+  __ LoadP(r4, ContextOperand(r4, Context::ITERATOR_RESULT_MAP_INDEX));
+  __ pop(r5);
+  __ mov(r6, Operand(isolate()->factory()->ToBoolean(done)));
+  __ mov(r7, Operand(isolate()->factory()->empty_fixed_array()));
+  __ StoreP(r4, FieldMemOperand(r3, HeapObject::kMapOffset), r0);
+  __ StoreP(r7, FieldMemOperand(r3, JSObject::kPropertiesOffset), r0);
+  __ StoreP(r7, FieldMemOperand(r3, JSObject::kElementsOffset), r0);
+  __ StoreP(r5,
+            FieldMemOperand(r3, JSGeneratorObject::kResultValuePropertyOffset),
+            r0);
+  __ StoreP(r6,
+            FieldMemOperand(r3, JSGeneratorObject::kResultDonePropertyOffset),
+            r0);
+
+  // Only the value field needs a write barrier, as the other values are in the
+  // root set.
+  __ RecordWriteField(r3, JSGeneratorObject::kResultValuePropertyOffset, r5, r6,
+                      kLRHasBeenSaved, kDontSaveFPRegs);
+}
+
+
+void FullCodeGenerator::EmitNamedPropertyLoad(Property* prop) {
+  SetSourcePosition(prop->position());
+  Literal* key = prop->key()->AsLiteral();
+  DCHECK(!prop->IsSuperAccess());
+
+  __ mov(LoadDescriptor::NameRegister(), Operand(key->value()));
+  if (FLAG_vector_ics) {
+    __ mov(VectorLoadICDescriptor::SlotRegister(),
+           Operand(SmiFromSlot(prop->PropertyFeedbackSlot())));
+    CallLoadIC(NOT_CONTEXTUAL);
+  } else {
+    CallLoadIC(NOT_CONTEXTUAL, prop->PropertyFeedbackId());
+  }
+}
+
+
+void FullCodeGenerator::EmitNamedSuperPropertyLoad(Property* prop) {
+  // Stack: receiver, home_object.
+  SetSourcePosition(prop->position());
+  Literal* key = prop->key()->AsLiteral();
+  DCHECK(!key->value()->IsSmi());
+  DCHECK(prop->IsSuperAccess());
+
+  __ Push(key->value());
+  __ CallRuntime(Runtime::kLoadFromSuper, 3);
+}
+
+
+void FullCodeGenerator::EmitKeyedPropertyLoad(Property* prop) {
+  SetSourcePosition(prop->position());
+  Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
+  if (FLAG_vector_ics) {
+    __ mov(VectorLoadICDescriptor::SlotRegister(),
+           Operand(SmiFromSlot(prop->PropertyFeedbackSlot())));
+    CallIC(ic);
+  } else {
+    CallIC(ic, prop->PropertyFeedbackId());
+  }
+}
+
+
+void FullCodeGenerator::EmitKeyedSuperPropertyLoad(Property* prop) {
+  // Stack: receiver, home_object, key.
+  SetSourcePosition(prop->position());
+
+  __ CallRuntime(Runtime::kLoadKeyedFromSuper, 3);
+}
+
+
+void FullCodeGenerator::EmitInlineSmiBinaryOp(BinaryOperation* expr,
+                                              Token::Value op,
+                                              OverwriteMode mode,
+                                              Expression* left_expr,
+                                              Expression* right_expr) {
+  Label done, smi_case, stub_call;
+
+  Register scratch1 = r5;
+  Register scratch2 = r6;
+
+  // Get the arguments.
+  Register left = r4;
+  Register right = r3;
+  __ pop(left);
+
+  // Perform combined smi check on both operands.
+  __ orx(scratch1, left, right);
+  STATIC_ASSERT(kSmiTag == 0);
+  JumpPatchSite patch_site(masm_);
+  patch_site.EmitJumpIfSmi(scratch1, &smi_case);
+
+  __ bind(&stub_call);
+  Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op, mode).code();
+  CallIC(code, expr->BinaryOperationFeedbackId());
+  patch_site.EmitPatchInfo();
+  __ b(&done);
+
+  __ bind(&smi_case);
+  // Smi case. This code works the same way as the smi-smi case in the type
+  // recording binary operation stub.
+  switch (op) {
+    case Token::SAR:
+      __ GetLeastBitsFromSmi(scratch1, right, 5);
+      __ ShiftRightArith(right, left, scratch1);
+      __ ClearRightImm(right, right, Operand(kSmiTagSize + kSmiShiftSize));
+      break;
+    case Token::SHL: {
+      __ GetLeastBitsFromSmi(scratch2, right, 5);
+#if V8_TARGET_ARCH_PPC64
+      __ ShiftLeft_(right, left, scratch2);
+#else
+      __ SmiUntag(scratch1, left);
+      __ ShiftLeft_(scratch1, scratch1, scratch2);
+      // Check that the *signed* result fits in a smi
+      __ JumpIfNotSmiCandidate(scratch1, scratch2, &stub_call);
+      __ SmiTag(right, scratch1);
+#endif
+      break;
+    }
+    case Token::SHR: {
+      __ SmiUntag(scratch1, left);
+      __ GetLeastBitsFromSmi(scratch2, right, 5);
+      __ srw(scratch1, scratch1, scratch2);
+      // Unsigned shift is not allowed to produce a negative number.
+      __ JumpIfNotUnsignedSmiCandidate(scratch1, r0, &stub_call);
+      __ SmiTag(right, scratch1);
+      break;
+    }
+    case Token::ADD: {
+      __ AddAndCheckForOverflow(scratch1, left, right, scratch2, r0);
+      __ bne(&stub_call, cr0);
+      __ mr(right, scratch1);
+      break;
+    }
+    case Token::SUB: {
+      __ SubAndCheckForOverflow(scratch1, left, right, scratch2, r0);
+      __ bne(&stub_call, cr0);
+      __ mr(right, scratch1);
+      break;
+    }
+    case Token::MUL: {
+      Label mul_zero;
+#if V8_TARGET_ARCH_PPC64
+      // Remove tag from both operands.
+      __ SmiUntag(ip, right);
+      __ SmiUntag(r0, left);
+      __ Mul(scratch1, r0, ip);
+      // Check for overflowing the smi range - no overflow if higher 33 bits of
+      // the result are identical.
+      __ TestIfInt32(scratch1, scratch2, ip);
+      __ bne(&stub_call);
+#else
+      __ SmiUntag(ip, right);
+      __ mullw(scratch1, left, ip);
+      __ mulhw(scratch2, left, ip);
+      // Check for overflowing the smi range - no overflow if higher 33 bits of
+      // the result are identical.
+      __ TestIfInt32(scratch2, scratch1, ip);
+      __ bne(&stub_call);
+#endif
+      // Go slow on zero result to handle -0.
+      __ cmpi(scratch1, Operand::Zero());
+      __ beq(&mul_zero);
+#if V8_TARGET_ARCH_PPC64
+      __ SmiTag(right, scratch1);
+#else
+      __ mr(right, scratch1);
+#endif
+      __ b(&done);
+      // We need -0 if we were multiplying a negative number with 0 to get 0.
+      // We know one of them was zero.
+      __ bind(&mul_zero);
+      __ add(scratch2, right, left);
+      __ cmpi(scratch2, Operand::Zero());
+      __ blt(&stub_call);
+      __ LoadSmiLiteral(right, Smi::FromInt(0));
+      break;
+    }
+    case Token::BIT_OR:
+      __ orx(right, left, right);
+      break;
+    case Token::BIT_AND:
+      __ and_(right, left, right);
+      break;
+    case Token::BIT_XOR:
+      __ xor_(right, left, right);
+      break;
+    default:
+      UNREACHABLE();
+  }
+
+  __ bind(&done);
+  context()->Plug(r3);
+}
+
+
+void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
+  // Constructor is in r3.
+  DCHECK(lit != NULL);
+  __ push(r3);
+
+  // No access check is needed here since the constructor is created by the
+  // class literal.
+  Register scratch = r4;
+  __ LoadP(scratch,
+           FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset));
+  __ push(scratch);
+
+  for (int i = 0; i < lit->properties()->length(); i++) {
+    ObjectLiteral::Property* property = lit->properties()->at(i);
+    Literal* key = property->key()->AsLiteral();
+    Expression* value = property->value();
+    DCHECK(key != NULL);
+
+    if (property->is_static()) {
+      __ LoadP(scratch, MemOperand(sp, kPointerSize));  // constructor
+    } else {
+      __ LoadP(scratch, MemOperand(sp, 0));  // prototype
+    }
+    __ push(scratch);
+    VisitForStackValue(key);
+    VisitForStackValue(value);
+
+    switch (property->kind()) {
+      case ObjectLiteral::Property::CONSTANT:
+      case ObjectLiteral::Property::MATERIALIZED_LITERAL:
+      case ObjectLiteral::Property::COMPUTED:
+      case ObjectLiteral::Property::PROTOTYPE:
+        __ CallRuntime(Runtime::kDefineClassMethod, 3);
+        break;
+
+      case ObjectLiteral::Property::GETTER:
+        __ CallRuntime(Runtime::kDefineClassGetter, 3);
+        break;
+
+      case ObjectLiteral::Property::SETTER:
+        __ CallRuntime(Runtime::kDefineClassSetter, 3);
+        break;
+
+      default:
+        UNREACHABLE();
+    }
+  }
+
+  // prototype
+  __ CallRuntime(Runtime::kToFastProperties, 1);
+
+  // constructor
+  __ CallRuntime(Runtime::kToFastProperties, 1);
+}
+
+
+void FullCodeGenerator::EmitBinaryOp(BinaryOperation* expr, Token::Value op,
+                                     OverwriteMode mode) {
+  __ pop(r4);
+  Handle<Code> code = CodeFactory::BinaryOpIC(isolate(), op, mode).code();
+  JumpPatchSite patch_site(masm_);  // unbound, signals no inlined smi code.
+  CallIC(code, expr->BinaryOperationFeedbackId());
+  patch_site.EmitPatchInfo();
+  context()->Plug(r3);
+}
+
+
+void FullCodeGenerator::EmitAssignment(Expression* expr) {
+  DCHECK(expr->IsValidReferenceExpression());
+
+  Property* prop = expr->AsProperty();
+  LhsKind assign_type = GetAssignType(prop);
+
+  switch (assign_type) {
+    case VARIABLE: {
+      Variable* var = expr->AsVariableProxy()->var();
+      EffectContext context(this);
+      EmitVariableAssignment(var, Token::ASSIGN);
+      break;
+    }
+    case NAMED_PROPERTY: {
+      __ push(r3);  // Preserve value.
+      VisitForAccumulatorValue(prop->obj());
+      __ Move(StoreDescriptor::ReceiverRegister(), r3);
+      __ pop(StoreDescriptor::ValueRegister());  // Restore value.
+      __ mov(StoreDescriptor::NameRegister(),
+             Operand(prop->key()->AsLiteral()->value()));
+      CallStoreIC();
+      break;
+    }
+    case NAMED_SUPER_PROPERTY: {
+      __ Push(r3);
+      VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
+      EmitLoadHomeObject(prop->obj()->AsSuperReference());
+      // stack: value, this; r3: home_object
+      Register scratch = r5;
+      Register scratch2 = r6;
+      __ mr(scratch, result_register());                  // home_object
+      __ LoadP(r3, MemOperand(sp, kPointerSize));         // value
+      __ LoadP(scratch2, MemOperand(sp, 0));              // this
+      __ StoreP(scratch2, MemOperand(sp, kPointerSize));  // this
+      __ StoreP(scratch, MemOperand(sp, 0));              // home_object
+      // stack: this, home_object; r3: value
+      EmitNamedSuperPropertyStore(prop);
+      break;
+    }
+    case KEYED_SUPER_PROPERTY: {
+      __ Push(r3);
+      VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
+      EmitLoadHomeObject(prop->obj()->AsSuperReference());
+      __ Push(result_register());
+      VisitForAccumulatorValue(prop->key());
+      Register scratch = r5;
+      Register scratch2 = r6;
+      __ LoadP(scratch2, MemOperand(sp, 2 * kPointerSize));  // value
+      // stack: value, this, home_object; r3: key, r6: value
+      __ LoadP(scratch, MemOperand(sp, kPointerSize));  // this
+      __ StoreP(scratch, MemOperand(sp, 2 * kPointerSize));
+      __ LoadP(scratch, MemOperand(sp, 0));  // home_object
+      __ StoreP(scratch, MemOperand(sp, kPointerSize));
+      __ StoreP(r3, MemOperand(sp, 0));
+      __ Move(r3, scratch2);
+      // stack: this, home_object, key; r3: value.
+      EmitKeyedSuperPropertyStore(prop);
+      break;
+    }
+    case KEYED_PROPERTY: {
+      __ push(r3);  // Preserve value.
+      VisitForStackValue(prop->obj());
+      VisitForAccumulatorValue(prop->key());
+      __ Move(StoreDescriptor::NameRegister(), r3);
+      __ Pop(StoreDescriptor::ValueRegister(),
+             StoreDescriptor::ReceiverRegister());
+      Handle<Code> ic =
+          CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
+      CallIC(ic);
+      break;
+    }
+  }
+  context()->Plug(r3);
+}
+
+
+void FullCodeGenerator::EmitStoreToStackLocalOrContextSlot(
+    Variable* var, MemOperand location) {
+  __ StoreP(result_register(), location, r0);
+  if (var->IsContextSlot()) {
+    // RecordWrite may destroy all its register arguments.
+    __ mr(r6, result_register());
+    int offset = Context::SlotOffset(var->index());
+    __ RecordWriteContextSlot(r4, offset, r6, r5, kLRHasBeenSaved,
+                              kDontSaveFPRegs);
+  }
+}
+
+
+void FullCodeGenerator::EmitVariableAssignment(Variable* var, Token::Value op) {
+  if (var->IsUnallocated()) {
+    // Global var, const, or let.
+    __ mov(StoreDescriptor::NameRegister(), Operand(var->name()));
+    __ LoadP(StoreDescriptor::ReceiverRegister(), GlobalObjectOperand());
+    CallStoreIC();
+
+  } else if (op == Token::INIT_CONST_LEGACY) {
+    // Const initializers need a write barrier.
+    DCHECK(!var->IsParameter());  // No const parameters.
+    if (var->IsLookupSlot()) {
+      __ push(r3);
+      __ mov(r3, Operand(var->name()));
+      __ Push(cp, r3);  // Context and name.
+      __ CallRuntime(Runtime::kInitializeLegacyConstLookupSlot, 3);
+    } else {
+      DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+      Label skip;
+      MemOperand location = VarOperand(var, r4);
+      __ LoadP(r5, location);
+      __ CompareRoot(r5, Heap::kTheHoleValueRootIndex);
+      __ bne(&skip);
+      EmitStoreToStackLocalOrContextSlot(var, location);
+      __ bind(&skip);
+    }
+
+  } else if (var->mode() == LET && op != Token::INIT_LET) {
+    // Non-initializing assignment to let variable needs a write barrier.
+    DCHECK(!var->IsLookupSlot());
+    DCHECK(var->IsStackAllocated() || var->IsContextSlot());
+    Label assign;
+    MemOperand location = VarOperand(var, r4);
+    __ LoadP(r6, location);
+    __ CompareRoot(r6, Heap::kTheHoleValueRootIndex);
+    __ bne(&assign);
+    __ mov(r6, Operand(var->name()));
+    __ push(r6);
+    __ CallRuntime(Runtime::kThrowReferenceError, 1);
+    // Perform the assignment.
+    __ bind(&assign);
+    EmitStoreToStackLocalOrContextSlot(var, location);
+
+  } else if (!var->is_const_mode() || op == Token::INIT_CONST) {
+    if (var->IsLookupSlot()) {
+      // Assignment to var.
+      __ push(r3);  // Value.
+      __ mov(r4, Operand(var->name()));
+      __ mov(r3, Operand(Smi::FromInt(strict_mode())));
+      __ Push(cp, r4, r3);  // Context, name, strict mode.
+      __ CallRuntime(Runtime::kStoreLookupSlot, 4);
+    } else {
+      // Assignment to var or initializing assignment to let/const in harmony
+      // mode.
+      DCHECK((var->IsStackAllocated() || var->IsContextSlot()));
+      MemOperand location = VarOperand(var, r4);
+      if (generate_debug_code_ && op == Token::INIT_LET) {
+        // Check for an uninitialized let binding.
+        __ LoadP(r5, location);
+        __ CompareRoot(r5, Heap::kTheHoleValueRootIndex);
+        __ Check(eq, kLetBindingReInitialization);
+      }
+      EmitStoreToStackLocalOrContextSlot(var, location);
+    }
+  }
+  // Non-initializing assignments to consts are ignored.
+}
+
+
+void FullCodeGenerator::EmitNamedPropertyAssignment(Assignment* expr) {
+  // Assignment to a property, using a named store IC.
+  Property* prop = expr->target()->AsProperty();
+  DCHECK(prop != NULL);
+  DCHECK(prop->key()->IsLiteral());
+
+  // Record source code position before IC call.
+  SetSourcePosition(expr->position());
+  __ mov(StoreDescriptor::NameRegister(),
+         Operand(prop->key()->AsLiteral()->value()));
+  __ pop(StoreDescriptor::ReceiverRegister());
+  CallStoreIC(expr->AssignmentFeedbackId());
+
+  PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+  context()->Plug(r3);
+}
+
+
+void FullCodeGenerator::EmitNamedSuperPropertyStore(Property* prop) {
+  // Assignment to named property of super.
+  // r3 : value
+  // stack : receiver ('this'), home_object
+  DCHECK(prop != NULL);
+  Literal* key = prop->key()->AsLiteral();
+  DCHECK(key != NULL);
+
+  __ Push(key->value());
+  __ Push(r3);
+  __ CallRuntime((strict_mode() == STRICT ? Runtime::kStoreToSuper_Strict
+                                          : Runtime::kStoreToSuper_Sloppy),
+                 4);
+}
+
+
+void FullCodeGenerator::EmitKeyedSuperPropertyStore(Property* prop) {
+  // Assignment to named property of super.
+  // r3 : value
+  // stack : receiver ('this'), home_object, key
+  DCHECK(prop != NULL);
+
+  __ Push(r3);
+  __ CallRuntime((strict_mode() == STRICT ? Runtime::kStoreKeyedToSuper_Strict
+                                          : Runtime::kStoreKeyedToSuper_Sloppy),
+                 4);
+}
+
+
+void FullCodeGenerator::EmitKeyedPropertyAssignment(Assignment* expr) {
+  // Assignment to a property, using a keyed store IC.
+
+  // Record source code position before IC call.
+  SetSourcePosition(expr->position());
+  __ Pop(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister());
+  DCHECK(StoreDescriptor::ValueRegister().is(r3));
+
+  Handle<Code> ic = CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
+  CallIC(ic, expr->AssignmentFeedbackId());
+
+  PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+  context()->Plug(r3);
+}
+
+
+void FullCodeGenerator::VisitProperty(Property* expr) {
+  Comment cmnt(masm_, "[ Property");
+  Expression* key = expr->key();
+
+  if (key->IsPropertyName()) {
+    if (!expr->IsSuperAccess()) {
+      VisitForAccumulatorValue(expr->obj());
+      __ Move(LoadDescriptor::ReceiverRegister(), r3);
+      EmitNamedPropertyLoad(expr);
+    } else {
+      VisitForStackValue(expr->obj()->AsSuperReference()->this_var());
+      EmitLoadHomeObject(expr->obj()->AsSuperReference());
+      __ Push(result_register());
+      EmitNamedSuperPropertyLoad(expr);
+    }
+    PrepareForBailoutForId(expr->LoadId(), TOS_REG);
+    context()->Plug(r3);
+  } else {
+    if (!expr->IsSuperAccess()) {
+      VisitForStackValue(expr->obj());
+      VisitForAccumulatorValue(expr->key());
+      __ Move(LoadDescriptor::NameRegister(), r3);
+      __ pop(LoadDescriptor::ReceiverRegister());
+      EmitKeyedPropertyLoad(expr);
+    } else {
+      VisitForStackValue(expr->obj()->AsSuperReference()->this_var());
+      EmitLoadHomeObject(expr->obj()->AsSuperReference());
+      __ Push(result_register());
+      VisitForStackValue(expr->key());
+      EmitKeyedSuperPropertyLoad(expr);
+    }
+    context()->Plug(r3);
+  }
+}
+
+
+void FullCodeGenerator::CallIC(Handle<Code> code, TypeFeedbackId ast_id) {
+  ic_total_count_++;
+  __ Call(code, RelocInfo::CODE_TARGET, ast_id);
+}
+
+
+// Code common for calls using the IC.
+void FullCodeGenerator::EmitCallWithLoadIC(Call* expr) {
+  Expression* callee = expr->expression();
+
+  CallICState::CallType call_type =
+      callee->IsVariableProxy() ? CallICState::FUNCTION : CallICState::METHOD;
+
+  // Get the target function.
+  if (call_type == CallICState::FUNCTION) {
+    {
+      StackValueContext context(this);
+      EmitVariableLoad(callee->AsVariableProxy());
+      PrepareForBailout(callee, NO_REGISTERS);
+    }
+    // Push undefined as receiver. This is patched in the method prologue if it
+    // is a sloppy mode method.
+    __ Push(isolate()->factory()->undefined_value());
+  } else {
+    // Load the function from the receiver.
+    DCHECK(callee->IsProperty());
+    DCHECK(!callee->AsProperty()->IsSuperAccess());
+    __ LoadP(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
+    EmitNamedPropertyLoad(callee->AsProperty());
+    PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+    // Push the target function under the receiver.
+    __ LoadP(ip, MemOperand(sp, 0));
+    __ push(ip);
+    __ StoreP(r3, MemOperand(sp, kPointerSize));
+  }
+
+  EmitCall(expr, call_type);
+}
+
+
+void FullCodeGenerator::EmitSuperCallWithLoadIC(Call* expr) {
+  Expression* callee = expr->expression();
+  DCHECK(callee->IsProperty());
+  Property* prop = callee->AsProperty();
+  DCHECK(prop->IsSuperAccess());
+
+  SetSourcePosition(prop->position());
+  Literal* key = prop->key()->AsLiteral();
+  DCHECK(!key->value()->IsSmi());
+  // Load the function from the receiver.
+  const Register scratch = r4;
+  SuperReference* super_ref = prop->obj()->AsSuperReference();
+  EmitLoadHomeObject(super_ref);
+  __ mr(scratch, r3);
+  VisitForAccumulatorValue(super_ref->this_var());
+  __ Push(scratch, r3, r3, scratch);
+  __ Push(key->value());
+
+  // Stack here:
+  //  - home_object
+  //  - this (receiver)
+  //  - this (receiver) <-- LoadFromSuper will pop here and below.
+  //  - home_object
+  //  - key
+  __ CallRuntime(Runtime::kLoadFromSuper, 3);
+
+  // Replace home_object with target function.
+  __ StoreP(r3, MemOperand(sp, kPointerSize));
+
+  // Stack here:
+  // - target function
+  // - this (receiver)
+  EmitCall(expr, CallICState::METHOD);
+}
+
+
+// Code common for calls using the IC.
+void FullCodeGenerator::EmitKeyedCallWithLoadIC(Call* expr, Expression* key) {
+  // Load the key.
+  VisitForAccumulatorValue(key);
+
+  Expression* callee = expr->expression();
+
+  // Load the function from the receiver.
+  DCHECK(callee->IsProperty());
+  __ LoadP(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
+  __ Move(LoadDescriptor::NameRegister(), r3);
+  EmitKeyedPropertyLoad(callee->AsProperty());
+  PrepareForBailoutForId(callee->AsProperty()->LoadId(), TOS_REG);
+
+  // Push the target function under the receiver.
+  __ LoadP(ip, MemOperand(sp, 0));
+  __ push(ip);
+  __ StoreP(r3, MemOperand(sp, kPointerSize));
+
+  EmitCall(expr, CallICState::METHOD);
+}
+
+
+void FullCodeGenerator::EmitKeyedSuperCallWithLoadIC(Call* expr) {
+  Expression* callee = expr->expression();
+  DCHECK(callee->IsProperty());
+  Property* prop = callee->AsProperty();
+  DCHECK(prop->IsSuperAccess());
+
+  SetSourcePosition(prop->position());
+  // Load the function from the receiver.
+  const Register scratch = r4;
+  SuperReference* super_ref = prop->obj()->AsSuperReference();
+  EmitLoadHomeObject(super_ref);
+  __ Push(r3);
+  VisitForAccumulatorValue(super_ref->this_var());
+  __ Push(r3);
+  __ Push(r3);
+  __ LoadP(scratch, MemOperand(sp, kPointerSize * 2));
+  __ Push(scratch);
+  VisitForStackValue(prop->key());
+
+  // Stack here:
+  //  - home_object
+  //  - this (receiver)
+  //  - this (receiver) <-- LoadKeyedFromSuper will pop here and below.
+  //  - home_object
+  //  - key
+  __ CallRuntime(Runtime::kLoadKeyedFromSuper, 3);
+
+  // Replace home_object with target function.
+  __ StoreP(r3, MemOperand(sp, kPointerSize));
+
+  // Stack here:
+  // - target function
+  // - this (receiver)
+  EmitCall(expr, CallICState::METHOD);
+}
+
+
+void FullCodeGenerator::EmitCall(Call* expr, CallICState::CallType call_type) {
+  // Load the arguments.
+  ZoneList<Expression*>* args = expr->arguments();
+  int arg_count = args->length();
+  {
+    PreservePositionScope scope(masm()->positions_recorder());
+    for (int i = 0; i < arg_count; i++) {
+      VisitForStackValue(args->at(i));
+    }
+  }
+
+  // Record source position of the IC call.
+  SetSourcePosition(expr->position());
+  Handle<Code> ic = CallIC::initialize_stub(isolate(), arg_count, call_type);
+  __ LoadSmiLiteral(r6, SmiFromSlot(expr->CallFeedbackSlot()));
+  __ LoadP(r4, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
+  // Don't assign a type feedback id to the IC, since type feedback is provided
+  // by the vector above.
+  CallIC(ic);
+
+  RecordJSReturnSite(expr);
+  // Restore context register.
+  __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  context()->DropAndPlug(1, r3);
+}
+
+
+void FullCodeGenerator::EmitResolvePossiblyDirectEval(int arg_count) {
+  // r8: copy of the first argument or undefined if it doesn't exist.
+  if (arg_count > 0) {
+    __ LoadP(r8, MemOperand(sp, arg_count * kPointerSize), r0);
+  } else {
+    __ LoadRoot(r8, Heap::kUndefinedValueRootIndex);
+  }
+
+  // r7: the receiver of the enclosing function.
+  __ LoadP(r7, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+
+  // r6: the receiver of the enclosing function.
+  int receiver_offset = 2 + info_->scope()->num_parameters();
+  __ LoadP(r6, MemOperand(fp, receiver_offset * kPointerSize), r0);
+
+  // r5: strict mode.
+  __ LoadSmiLiteral(r5, Smi::FromInt(strict_mode()));
+
+  // r4: the start position of the scope the calls resides in.
+  __ LoadSmiLiteral(r4, Smi::FromInt(scope()->start_position()));
+
+  // Do the runtime call.
+  __ Push(r8, r7, r6, r5, r4);
+  __ CallRuntime(Runtime::kResolvePossiblyDirectEval, 6);
+}
+
+
+void FullCodeGenerator::EmitLoadSuperConstructor(SuperReference* super_ref) {
+  DCHECK(super_ref != NULL);
+  __ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  __ Push(r3);
+  __ CallRuntime(Runtime::kGetPrototype, 1);
+}
+
+
+void FullCodeGenerator::VisitCall(Call* expr) {
+#ifdef DEBUG
+  // We want to verify that RecordJSReturnSite gets called on all paths
+  // through this function.  Avoid early returns.
+  expr->return_is_recorded_ = false;
+#endif
+
+  Comment cmnt(masm_, "[ Call");
+  Expression* callee = expr->expression();
+  Call::CallType call_type = expr->GetCallType(isolate());
+
+  if (call_type == Call::POSSIBLY_EVAL_CALL) {
+    // In a call to eval, we first call RuntimeHidden_ResolvePossiblyDirectEval
+    // to resolve the function we need to call and the receiver of the
+    // call.  Then we call the resolved function using the given
+    // arguments.
+    ZoneList<Expression*>* args = expr->arguments();
+    int arg_count = args->length();
+
+    {
+      PreservePositionScope pos_scope(masm()->positions_recorder());
+      VisitForStackValue(callee);
+      __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+      __ push(r5);  // Reserved receiver slot.
+
+      // Push the arguments.
+      for (int i = 0; i < arg_count; i++) {
+        VisitForStackValue(args->at(i));
+      }
+
+      // Push a copy of the function (found below the arguments) and
+      // resolve eval.
+      __ LoadP(r4, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
+      __ push(r4);
+      EmitResolvePossiblyDirectEval(arg_count);
+
+      // The runtime call returns a pair of values in r3 (function) and
+      // r4 (receiver). Touch up the stack with the right values.
+      __ StoreP(r3, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
+      __ StoreP(r4, MemOperand(sp, arg_count * kPointerSize), r0);
+
+      PrepareForBailoutForId(expr->EvalOrLookupId(), NO_REGISTERS);
+    }
+
+    // Record source position for debugger.
+    SetSourcePosition(expr->position());
+    CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
+    __ LoadP(r4, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
+    __ CallStub(&stub);
+    RecordJSReturnSite(expr);
+    // Restore context register.
+    __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+    context()->DropAndPlug(1, r3);
+  } else if (call_type == Call::GLOBAL_CALL) {
+    EmitCallWithLoadIC(expr);
+
+  } else if (call_type == Call::LOOKUP_SLOT_CALL) {
+    // Call to a lookup slot (dynamically introduced variable).
+    VariableProxy* proxy = callee->AsVariableProxy();
+    Label slow, done;
+
+    {
+      PreservePositionScope scope(masm()->positions_recorder());
+      // Generate code for loading from variables potentially shadowed
+      // by eval-introduced variables.
+      EmitDynamicLookupFastCase(proxy, NOT_INSIDE_TYPEOF, &slow, &done);
+    }
+
+    __ bind(&slow);
+    // Call the runtime to find the function to call (returned in r3)
+    // and the object holding it (returned in edx).
+    DCHECK(!context_register().is(r5));
+    __ mov(r5, Operand(proxy->name()));
+    __ Push(context_register(), r5);
+    __ CallRuntime(Runtime::kLoadLookupSlot, 2);
+    __ Push(r3, r4);  // Function, receiver.
+    PrepareForBailoutForId(expr->EvalOrLookupId(), NO_REGISTERS);
+
+    // If fast case code has been generated, emit code to push the
+    // function and receiver and have the slow path jump around this
+    // code.
+    if (done.is_linked()) {
+      Label call;
+      __ b(&call);
+      __ bind(&done);
+      // Push function.
+      __ push(r3);
+      // The receiver is implicitly the global receiver. Indicate this
+      // by passing the hole to the call function stub.
+      __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
+      __ push(r4);
+      __ bind(&call);
+    }
+
+    // The receiver is either the global receiver or an object found
+    // by LoadContextSlot.
+    EmitCall(expr);
+  } else if (call_type == Call::PROPERTY_CALL) {
+    Property* property = callee->AsProperty();
+    bool is_named_call = property->key()->IsPropertyName();
+    if (property->IsSuperAccess()) {
+      if (is_named_call) {
+        EmitSuperCallWithLoadIC(expr);
+      } else {
+        EmitKeyedSuperCallWithLoadIC(expr);
+      }
+    } else {
+      {
+        PreservePositionScope scope(masm()->positions_recorder());
+        VisitForStackValue(property->obj());
+      }
+      if (is_named_call) {
+        EmitCallWithLoadIC(expr);
+      } else {
+        EmitKeyedCallWithLoadIC(expr, property->key());
+      }
+    }
+  } else if (call_type == Call::SUPER_CALL) {
+    SuperReference* super_ref = callee->AsSuperReference();
+    EmitLoadSuperConstructor(super_ref);
+    __ Push(result_register());
+    VisitForStackValue(super_ref->this_var());
+    EmitCall(expr, CallICState::METHOD);
+  } else {
+    DCHECK(call_type == Call::OTHER_CALL);
+    // Call to an arbitrary expression not handled specially above.
+    {
+      PreservePositionScope scope(masm()->positions_recorder());
+      VisitForStackValue(callee);
+    }
+    __ LoadRoot(r4, Heap::kUndefinedValueRootIndex);
+    __ push(r4);
+    // Emit function call.
+    EmitCall(expr);
+  }
+
+#ifdef DEBUG
+  // RecordJSReturnSite should have been called.
+  DCHECK(expr->return_is_recorded_);
+#endif
+}
+
+
+void FullCodeGenerator::VisitCallNew(CallNew* expr) {
+  Comment cmnt(masm_, "[ CallNew");
+  // According to ECMA-262, section 11.2.2, page 44, the function
+  // expression in new calls must be evaluated before the
+  // arguments.
+
+  // Push constructor on the stack.  If it's not a function it's used as
+  // receiver for CALL_NON_FUNCTION, otherwise the value on the stack is
+  // ignored.
+  if (expr->expression()->IsSuperReference()) {
+    EmitLoadSuperConstructor(expr->expression()->AsSuperReference());
+    __ Push(result_register());
+  } else {
+    VisitForStackValue(expr->expression());
+  }
+
+  // Push the arguments ("left-to-right") on the stack.
+  ZoneList<Expression*>* args = expr->arguments();
+  int arg_count = args->length();
+  for (int i = 0; i < arg_count; i++) {
+    VisitForStackValue(args->at(i));
+  }
+
+  // Call the construct call builtin that handles allocation and
+  // constructor invocation.
+  SetSourcePosition(expr->position());
+
+  // Load function and argument count into r4 and r3.
+  __ mov(r3, Operand(arg_count));
+  __ LoadP(r4, MemOperand(sp, arg_count * kPointerSize), r0);
+
+  // Record call targets in unoptimized code.
+  if (FLAG_pretenuring_call_new) {
+    EnsureSlotContainsAllocationSite(expr->AllocationSiteFeedbackSlot());
+    DCHECK(expr->AllocationSiteFeedbackSlot().ToInt() ==
+           expr->CallNewFeedbackSlot().ToInt() + 1);
+  }
+
+  __ Move(r5, FeedbackVector());
+  __ LoadSmiLiteral(r6, SmiFromSlot(expr->CallNewFeedbackSlot()));
+
+  CallConstructStub stub(isolate(), RECORD_CONSTRUCTOR_TARGET);
+  __ Call(stub.GetCode(), RelocInfo::CONSTRUCT_CALL);
+  PrepareForBailoutForId(expr->ReturnId(), TOS_REG);
+  context()->Plug(r3);
+}
+
+
+void FullCodeGenerator::EmitIsSmi(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK(args->length() == 1);
+
+  VisitForAccumulatorValue(args->at(0));
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+                         &if_false, &fall_through);
+
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  __ TestIfSmi(r3, r0);
+  Split(eq, if_true, if_false, fall_through, cr0);
+
+  context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsNonNegativeSmi(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK(args->length() == 1);
+
+  VisitForAccumulatorValue(args->at(0));
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+                         &if_false, &fall_through);
+
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  __ TestIfPositiveSmi(r3, r0);
+  Split(eq, if_true, if_false, fall_through, cr0);
+
+  context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsObject(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK(args->length() == 1);
+
+  VisitForAccumulatorValue(args->at(0));
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+                         &if_false, &fall_through);
+
+  __ JumpIfSmi(r3, if_false);
+  __ LoadRoot(ip, Heap::kNullValueRootIndex);
+  __ cmp(r3, ip);
+  __ beq(if_true);
+  __ LoadP(r5, FieldMemOperand(r3, HeapObject::kMapOffset));
+  // Undetectable objects behave like undefined when tested with typeof.
+  __ lbz(r4, FieldMemOperand(r5, Map::kBitFieldOffset));
+  __ andi(r0, r4, Operand(1 << Map::kIsUndetectable));
+  __ bne(if_false, cr0);
+  __ lbz(r4, FieldMemOperand(r5, Map::kInstanceTypeOffset));
+  __ cmpi(r4, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+  __ blt(if_false);
+  __ cmpi(r4, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  Split(le, if_true, if_false, fall_through);
+
+  context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsSpecObject(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK(args->length() == 1);
+
+  VisitForAccumulatorValue(args->at(0));
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+                         &if_false, &fall_through);
+
+  __ JumpIfSmi(r3, if_false);
+  __ CompareObjectType(r3, r4, r4, FIRST_SPEC_OBJECT_TYPE);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  Split(ge, if_true, if_false, fall_through);
+
+  context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsUndetectableObject(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK(args->length() == 1);
+
+  VisitForAccumulatorValue(args->at(0));
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+                         &if_false, &fall_through);
+
+  __ JumpIfSmi(r3, if_false);
+  __ LoadP(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
+  __ lbz(r4, FieldMemOperand(r4, Map::kBitFieldOffset));
+  __ andi(r0, r4, Operand(1 << Map::kIsUndetectable));
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  Split(ne, if_true, if_false, fall_through, cr0);
+
+  context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsStringWrapperSafeForDefaultValueOf(
+    CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK(args->length() == 1);
+
+  VisitForAccumulatorValue(args->at(0));
+
+  Label materialize_true, materialize_false, skip_lookup;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+                         &if_false, &fall_through);
+
+  __ AssertNotSmi(r3);
+
+  __ LoadP(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
+  __ lbz(ip, FieldMemOperand(r4, Map::kBitField2Offset));
+  __ andi(r0, ip, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
+  __ bne(&skip_lookup, cr0);
+
+  // Check for fast case object. Generate false result for slow case object.
+  __ LoadP(r5, FieldMemOperand(r3, JSObject::kPropertiesOffset));
+  __ LoadP(r5, FieldMemOperand(r5, HeapObject::kMapOffset));
+  __ LoadRoot(ip, Heap::kHashTableMapRootIndex);
+  __ cmp(r5, ip);
+  __ beq(if_false);
+
+  // Look for valueOf name in the descriptor array, and indicate false if
+  // found. Since we omit an enumeration index check, if it is added via a
+  // transition that shares its descriptor array, this is a false positive.
+  Label entry, loop, done;
+
+  // Skip loop if no descriptors are valid.
+  __ NumberOfOwnDescriptors(r6, r4);
+  __ cmpi(r6, Operand::Zero());
+  __ beq(&done);
+
+  __ LoadInstanceDescriptors(r4, r7);
+  // r7: descriptor array.
+  // r6: valid entries in the descriptor array.
+  __ mov(ip, Operand(DescriptorArray::kDescriptorSize));
+  __ Mul(r6, r6, ip);
+  // Calculate location of the first key name.
+  __ addi(r7, r7, Operand(DescriptorArray::kFirstOffset - kHeapObjectTag));
+  // Calculate the end of the descriptor array.
+  __ mr(r5, r7);
+  __ ShiftLeftImm(ip, r6, Operand(kPointerSizeLog2));
+  __ add(r5, r5, ip);
+
+  // Loop through all the keys in the descriptor array. If one of these is the
+  // string "valueOf" the result is false.
+  // The use of ip to store the valueOf string assumes that it is not otherwise
+  // used in the loop below.
+  __ mov(ip, Operand(isolate()->factory()->value_of_string()));
+  __ b(&entry);
+  __ bind(&loop);
+  __ LoadP(r6, MemOperand(r7, 0));
+  __ cmp(r6, ip);
+  __ beq(if_false);
+  __ addi(r7, r7, Operand(DescriptorArray::kDescriptorSize * kPointerSize));
+  __ bind(&entry);
+  __ cmp(r7, r5);
+  __ bne(&loop);
+
+  __ bind(&done);
+
+  // Set the bit in the map to indicate that there is no local valueOf field.
+  __ lbz(r5, FieldMemOperand(r4, Map::kBitField2Offset));
+  __ ori(r5, r5, Operand(1 << Map::kStringWrapperSafeForDefaultValueOf));
+  __ stb(r5, FieldMemOperand(r4, Map::kBitField2Offset));
+
+  __ bind(&skip_lookup);
+
+  // If a valueOf property is not found on the object check that its
+  // prototype is the un-modified String prototype. If not result is false.
+  __ LoadP(r5, FieldMemOperand(r4, Map::kPrototypeOffset));
+  __ JumpIfSmi(r5, if_false);
+  __ LoadP(r5, FieldMemOperand(r5, HeapObject::kMapOffset));
+  __ LoadP(r6, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
+  __ LoadP(r6, FieldMemOperand(r6, GlobalObject::kNativeContextOffset));
+  __ LoadP(r6,
+           ContextOperand(r6, Context::STRING_FUNCTION_PROTOTYPE_MAP_INDEX));
+  __ cmp(r5, r6);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  Split(eq, if_true, if_false, fall_through);
+
+  context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsFunction(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK(args->length() == 1);
+
+  VisitForAccumulatorValue(args->at(0));
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+                         &if_false, &fall_through);
+
+  __ JumpIfSmi(r3, if_false);
+  __ CompareObjectType(r3, r4, r5, JS_FUNCTION_TYPE);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  Split(eq, if_true, if_false, fall_through);
+
+  context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsMinusZero(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK(args->length() == 1);
+
+  VisitForAccumulatorValue(args->at(0));
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+                         &if_false, &fall_through);
+
+  __ CheckMap(r3, r4, Heap::kHeapNumberMapRootIndex, if_false, DO_SMI_CHECK);
+#if V8_TARGET_ARCH_PPC64
+  __ LoadP(r4, FieldMemOperand(r3, HeapNumber::kValueOffset));
+  __ li(r5, Operand(1));
+  __ rotrdi(r5, r5, 1);  // r5 = 0x80000000_00000000
+  __ cmp(r4, r5);
+#else
+  __ lwz(r5, FieldMemOperand(r3, HeapNumber::kExponentOffset));
+  __ lwz(r4, FieldMemOperand(r3, HeapNumber::kMantissaOffset));
+  Label skip;
+  __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
+  __ cmp(r5, r0);
+  __ bne(&skip);
+  __ cmpi(r4, Operand::Zero());
+  __ bind(&skip);
+#endif
+
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  Split(eq, if_true, if_false, fall_through);
+
+  context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsArray(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK(args->length() == 1);
+
+  VisitForAccumulatorValue(args->at(0));
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+                         &if_false, &fall_through);
+
+  __ JumpIfSmi(r3, if_false);
+  __ CompareObjectType(r3, r4, r4, JS_ARRAY_TYPE);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  Split(eq, if_true, if_false, fall_through);
+
+  context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsRegExp(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK(args->length() == 1);
+
+  VisitForAccumulatorValue(args->at(0));
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+                         &if_false, &fall_through);
+
+  __ JumpIfSmi(r3, if_false);
+  __ CompareObjectType(r3, r4, r4, JS_REGEXP_TYPE);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  Split(eq, if_true, if_false, fall_through);
+
+  context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsJSProxy(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK(args->length() == 1);
+
+  VisitForAccumulatorValue(args->at(0));
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+                         &if_false, &fall_through);
+
+  __ JumpIfSmi(r3, if_false);
+  Register map = r4;
+  Register type_reg = r5;
+  __ LoadP(map, FieldMemOperand(r3, HeapObject::kMapOffset));
+  __ lbz(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
+  __ subi(type_reg, type_reg, Operand(FIRST_JS_PROXY_TYPE));
+  __ cmpli(type_reg, Operand(LAST_JS_PROXY_TYPE - FIRST_JS_PROXY_TYPE));
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  Split(le, if_true, if_false, fall_through);
+
+  context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitIsConstructCall(CallRuntime* expr) {
+  DCHECK(expr->arguments()->length() == 0);
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+                         &if_false, &fall_through);
+
+  // Get the frame pointer for the calling frame.
+  __ LoadP(r5, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+
+  // Skip the arguments adaptor frame if it exists.
+  Label check_frame_marker;
+  __ LoadP(r4, MemOperand(r5, StandardFrameConstants::kContextOffset));
+  __ CmpSmiLiteral(r4, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+  __ bne(&check_frame_marker);
+  __ LoadP(r5, MemOperand(r5, StandardFrameConstants::kCallerFPOffset));
+
+  // Check the marker in the calling frame.
+  __ bind(&check_frame_marker);
+  __ LoadP(r4, MemOperand(r5, StandardFrameConstants::kMarkerOffset));
+  STATIC_ASSERT(StackFrame::CONSTRUCT < 0x4000);
+  __ CmpSmiLiteral(r4, Smi::FromInt(StackFrame::CONSTRUCT), r0);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  Split(eq, if_true, if_false, fall_through);
+
+  context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitObjectEquals(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK(args->length() == 2);
+
+  // Load the two objects into registers and perform the comparison.
+  VisitForStackValue(args->at(0));
+  VisitForAccumulatorValue(args->at(1));
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+                         &if_false, &fall_through);
+
+  __ pop(r4);
+  __ cmp(r3, r4);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  Split(eq, if_true, if_false, fall_through);
+
+  context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitArguments(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK(args->length() == 1);
+
+  // ArgumentsAccessStub expects the key in edx and the formal
+  // parameter count in r3.
+  VisitForAccumulatorValue(args->at(0));
+  __ mr(r4, r3);
+  __ LoadSmiLiteral(r3, Smi::FromInt(info_->scope()->num_parameters()));
+  ArgumentsAccessStub stub(isolate(), ArgumentsAccessStub::READ_ELEMENT);
+  __ CallStub(&stub);
+  context()->Plug(r3);
+}
+
+
+void FullCodeGenerator::EmitArgumentsLength(CallRuntime* expr) {
+  DCHECK(expr->arguments()->length() == 0);
+  Label exit;
+  // Get the number of formal parameters.
+  __ LoadSmiLiteral(r3, Smi::FromInt(info_->scope()->num_parameters()));
+
+  // Check if the calling frame is an arguments adaptor frame.
+  __ LoadP(r5, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ LoadP(r6, MemOperand(r5, StandardFrameConstants::kContextOffset));
+  __ CmpSmiLiteral(r6, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+  __ bne(&exit);
+
+  // Arguments adaptor case: Read the arguments length from the
+  // adaptor frame.
+  __ LoadP(r3, MemOperand(r5, ArgumentsAdaptorFrameConstants::kLengthOffset));
+
+  __ bind(&exit);
+  context()->Plug(r3);
+}
+
+
+void FullCodeGenerator::EmitClassOf(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK(args->length() == 1);
+  Label done, null, function, non_function_constructor;
+
+  VisitForAccumulatorValue(args->at(0));
+
+  // If the object is a smi, we return null.
+  __ JumpIfSmi(r3, &null);
+
+  // Check that the object is a JS object but take special care of JS
+  // functions to make sure they have 'Function' as their class.
+  // Assume that there are only two callable types, and one of them is at
+  // either end of the type range for JS object types. Saves extra comparisons.
+  STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+  __ CompareObjectType(r3, r3, r4, FIRST_SPEC_OBJECT_TYPE);
+  // Map is now in r3.
+  __ blt(&null);
+  STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+                FIRST_SPEC_OBJECT_TYPE + 1);
+  __ beq(&function);
+
+  __ cmpi(r4, Operand(LAST_SPEC_OBJECT_TYPE));
+  STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_SPEC_OBJECT_TYPE - 1);
+  __ beq(&function);
+  // Assume that there is no larger type.
+  STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE == LAST_TYPE - 1);
+
+  // Check if the constructor in the map is a JS function.
+  __ LoadP(r3, FieldMemOperand(r3, Map::kConstructorOffset));
+  __ CompareObjectType(r3, r4, r4, JS_FUNCTION_TYPE);
+  __ bne(&non_function_constructor);
+
+  // r3 now contains the constructor function. Grab the
+  // instance class name from there.
+  __ LoadP(r3, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset));
+  __ LoadP(r3,
+           FieldMemOperand(r3, SharedFunctionInfo::kInstanceClassNameOffset));
+  __ b(&done);
+
+  // Functions have class 'Function'.
+  __ bind(&function);
+  __ LoadRoot(r3, Heap::kFunction_stringRootIndex);
+  __ b(&done);
+
+  // Objects with a non-function constructor have class 'Object'.
+  __ bind(&non_function_constructor);
+  __ LoadRoot(r3, Heap::kObject_stringRootIndex);
+  __ b(&done);
+
+  // Non-JS objects have class null.
+  __ bind(&null);
+  __ LoadRoot(r3, Heap::kNullValueRootIndex);
+
+  // All done.
+  __ bind(&done);
+
+  context()->Plug(r3);
+}
+
+
+void FullCodeGenerator::EmitSubString(CallRuntime* expr) {
+  // Load the arguments on the stack and call the stub.
+  SubStringStub stub(isolate());
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK(args->length() == 3);
+  VisitForStackValue(args->at(0));
+  VisitForStackValue(args->at(1));
+  VisitForStackValue(args->at(2));
+  __ CallStub(&stub);
+  context()->Plug(r3);
+}
+
+
+void FullCodeGenerator::EmitRegExpExec(CallRuntime* expr) {
+  // Load the arguments on the stack and call the stub.
+  RegExpExecStub stub(isolate());
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK(args->length() == 4);
+  VisitForStackValue(args->at(0));
+  VisitForStackValue(args->at(1));
+  VisitForStackValue(args->at(2));
+  VisitForStackValue(args->at(3));
+  __ CallStub(&stub);
+  context()->Plug(r3);
+}
+
+
+void FullCodeGenerator::EmitValueOf(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK(args->length() == 1);
+  VisitForAccumulatorValue(args->at(0));  // Load the object.
+
+  Label done;
+  // If the object is a smi return the object.
+  __ JumpIfSmi(r3, &done);
+  // If the object is not a value type, return the object.
+  __ CompareObjectType(r3, r4, r4, JS_VALUE_TYPE);
+  __ bne(&done);
+  __ LoadP(r3, FieldMemOperand(r3, JSValue::kValueOffset));
+
+  __ bind(&done);
+  context()->Plug(r3);
+}
+
+
+void FullCodeGenerator::EmitDateField(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK(args->length() == 2);
+  DCHECK_NE(NULL, args->at(1)->AsLiteral());
+  Smi* index = Smi::cast(*(args->at(1)->AsLiteral()->value()));
+
+  VisitForAccumulatorValue(args->at(0));  // Load the object.
+
+  Label runtime, done, not_date_object;
+  Register object = r3;
+  Register result = r3;
+  Register scratch0 = r11;
+  Register scratch1 = r4;
+
+  __ JumpIfSmi(object, &not_date_object);
+  __ CompareObjectType(object, scratch1, scratch1, JS_DATE_TYPE);
+  __ bne(&not_date_object);
+
+  if (index->value() == 0) {
+    __ LoadP(result, FieldMemOperand(object, JSDate::kValueOffset));
+    __ b(&done);
+  } else {
+    if (index->value() < JSDate::kFirstUncachedField) {
+      ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
+      __ mov(scratch1, Operand(stamp));
+      __ LoadP(scratch1, MemOperand(scratch1));
+      __ LoadP(scratch0, FieldMemOperand(object, JSDate::kCacheStampOffset));
+      __ cmp(scratch1, scratch0);
+      __ bne(&runtime);
+      __ LoadP(result,
+               FieldMemOperand(object, JSDate::kValueOffset +
+                                           kPointerSize * index->value()),
+               scratch0);
+      __ b(&done);
+    }
+    __ bind(&runtime);
+    __ PrepareCallCFunction(2, scratch1);
+    __ LoadSmiLiteral(r4, index);
+    __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
+    __ b(&done);
+  }
+
+  __ bind(&not_date_object);
+  __ CallRuntime(Runtime::kThrowNotDateError, 0);
+  __ bind(&done);
+  context()->Plug(r3);
+}
+
+
+void FullCodeGenerator::EmitOneByteSeqStringSetChar(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK_EQ(3, args->length());
+
+  Register string = r3;
+  Register index = r4;
+  Register value = r5;
+
+  VisitForStackValue(args->at(0));        // index
+  VisitForStackValue(args->at(1));        // value
+  VisitForAccumulatorValue(args->at(2));  // string
+  __ Pop(index, value);
+
+  if (FLAG_debug_code) {
+    __ TestIfSmi(value, r0);
+    __ Check(eq, kNonSmiValue, cr0);
+    __ TestIfSmi(index, r0);
+    __ Check(eq, kNonSmiIndex, cr0);
+    __ SmiUntag(index, index);
+    static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+    __ EmitSeqStringSetCharCheck(string, index, value, one_byte_seq_type);
+    __ SmiTag(index, index);
+  }
+
+  __ SmiUntag(value);
+  __ addi(ip, string, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+  __ SmiToByteArrayOffset(r0, index);
+  __ stbx(value, MemOperand(ip, r0));
+  context()->Plug(string);
+}
+
+
+void FullCodeGenerator::EmitTwoByteSeqStringSetChar(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK_EQ(3, args->length());
+
+  Register string = r3;
+  Register index = r4;
+  Register value = r5;
+
+  VisitForStackValue(args->at(0));        // index
+  VisitForStackValue(args->at(1));        // value
+  VisitForAccumulatorValue(args->at(2));  // string
+  __ Pop(index, value);
+
+  if (FLAG_debug_code) {
+    __ TestIfSmi(value, r0);
+    __ Check(eq, kNonSmiValue, cr0);
+    __ TestIfSmi(index, r0);
+    __ Check(eq, kNonSmiIndex, cr0);
+    __ SmiUntag(index, index);
+    static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+    __ EmitSeqStringSetCharCheck(string, index, value, two_byte_seq_type);
+    __ SmiTag(index, index);
+  }
+
+  __ SmiUntag(value);
+  __ addi(ip, string, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+  __ SmiToShortArrayOffset(r0, index);
+  __ sthx(value, MemOperand(ip, r0));
+  context()->Plug(string);
+}
+
+
+void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
+  // Load the arguments on the stack and call the runtime function.
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK(args->length() == 2);
+  VisitForStackValue(args->at(0));
+  VisitForStackValue(args->at(1));
+  MathPowStub stub(isolate(), MathPowStub::ON_STACK);
+  __ CallStub(&stub);
+  context()->Plug(r3);
+}
+
+
+void FullCodeGenerator::EmitSetValueOf(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK(args->length() == 2);
+  VisitForStackValue(args->at(0));        // Load the object.
+  VisitForAccumulatorValue(args->at(1));  // Load the value.
+  __ pop(r4);                             // r3 = value. r4 = object.
+
+  Label done;
+  // If the object is a smi, return the value.
+  __ JumpIfSmi(r4, &done);
+
+  // If the object is not a value type, return the value.
+  __ CompareObjectType(r4, r5, r5, JS_VALUE_TYPE);
+  __ bne(&done);
+
+  // Store the value.
+  __ StoreP(r3, FieldMemOperand(r4, JSValue::kValueOffset), r0);
+  // Update the write barrier.  Save the value as it will be
+  // overwritten by the write barrier code and is needed afterward.
+  __ mr(r5, r3);
+  __ RecordWriteField(r4, JSValue::kValueOffset, r5, r6, kLRHasBeenSaved,
+                      kDontSaveFPRegs);
+
+  __ bind(&done);
+  context()->Plug(r3);
+}
+
+
+void FullCodeGenerator::EmitNumberToString(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK_EQ(args->length(), 1);
+  // Load the argument into r3 and call the stub.
+  VisitForAccumulatorValue(args->at(0));
+
+  NumberToStringStub stub(isolate());
+  __ CallStub(&stub);
+  context()->Plug(r3);
+}
+
+
+void FullCodeGenerator::EmitStringCharFromCode(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK(args->length() == 1);
+  VisitForAccumulatorValue(args->at(0));
+
+  Label done;
+  StringCharFromCodeGenerator generator(r3, r4);
+  generator.GenerateFast(masm_);
+  __ b(&done);
+
+  NopRuntimeCallHelper call_helper;
+  generator.GenerateSlow(masm_, call_helper);
+
+  __ bind(&done);
+  context()->Plug(r4);
+}
+
+
+void FullCodeGenerator::EmitStringCharCodeAt(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK(args->length() == 2);
+  VisitForStackValue(args->at(0));
+  VisitForAccumulatorValue(args->at(1));
+
+  Register object = r4;
+  Register index = r3;
+  Register result = r6;
+
+  __ pop(object);
+
+  Label need_conversion;
+  Label index_out_of_range;
+  Label done;
+  StringCharCodeAtGenerator generator(object, index, result, &need_conversion,
+                                      &need_conversion, &index_out_of_range,
+                                      STRING_INDEX_IS_NUMBER);
+  generator.GenerateFast(masm_);
+  __ b(&done);
+
+  __ bind(&index_out_of_range);
+  // When the index is out of range, the spec requires us to return
+  // NaN.
+  __ LoadRoot(result, Heap::kNanValueRootIndex);
+  __ b(&done);
+
+  __ bind(&need_conversion);
+  // Load the undefined value into the result register, which will
+  // trigger conversion.
+  __ LoadRoot(result, Heap::kUndefinedValueRootIndex);
+  __ b(&done);
+
+  NopRuntimeCallHelper call_helper;
+  generator.GenerateSlow(masm_, call_helper);
+
+  __ bind(&done);
+  context()->Plug(result);
+}
+
+
+void FullCodeGenerator::EmitStringCharAt(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK(args->length() == 2);
+  VisitForStackValue(args->at(0));
+  VisitForAccumulatorValue(args->at(1));
+
+  Register object = r4;
+  Register index = r3;
+  Register scratch = r6;
+  Register result = r3;
+
+  __ pop(object);
+
+  Label need_conversion;
+  Label index_out_of_range;
+  Label done;
+  StringCharAtGenerator generator(object, index, scratch, result,
+                                  &need_conversion, &need_conversion,
+                                  &index_out_of_range, STRING_INDEX_IS_NUMBER);
+  generator.GenerateFast(masm_);
+  __ b(&done);
+
+  __ bind(&index_out_of_range);
+  // When the index is out of range, the spec requires us to return
+  // the empty string.
+  __ LoadRoot(result, Heap::kempty_stringRootIndex);
+  __ b(&done);
+
+  __ bind(&need_conversion);
+  // Move smi zero into the result register, which will trigger
+  // conversion.
+  __ LoadSmiLiteral(result, Smi::FromInt(0));
+  __ b(&done);
+
+  NopRuntimeCallHelper call_helper;
+  generator.GenerateSlow(masm_, call_helper);
+
+  __ bind(&done);
+  context()->Plug(result);
+}
+
+
+void FullCodeGenerator::EmitStringAdd(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK_EQ(2, args->length());
+  VisitForStackValue(args->at(0));
+  VisitForAccumulatorValue(args->at(1));
+
+  __ pop(r4);
+  StringAddStub stub(isolate(), STRING_ADD_CHECK_BOTH, NOT_TENURED);
+  __ CallStub(&stub);
+  context()->Plug(r3);
+}
+
+
+void FullCodeGenerator::EmitStringCompare(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK_EQ(2, args->length());
+  VisitForStackValue(args->at(0));
+  VisitForStackValue(args->at(1));
+
+  StringCompareStub stub(isolate());
+  __ CallStub(&stub);
+  context()->Plug(r3);
+}
+
+
+void FullCodeGenerator::EmitCallFunction(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK(args->length() >= 2);
+
+  int arg_count = args->length() - 2;  // 2 ~ receiver and function.
+  for (int i = 0; i < arg_count + 1; i++) {
+    VisitForStackValue(args->at(i));
+  }
+  VisitForAccumulatorValue(args->last());  // Function.
+
+  Label runtime, done;
+  // Check for non-function argument (including proxy).
+  __ JumpIfSmi(r3, &runtime);
+  __ CompareObjectType(r3, r4, r4, JS_FUNCTION_TYPE);
+  __ bne(&runtime);
+
+  // InvokeFunction requires the function in r4. Move it in there.
+  __ mr(r4, result_register());
+  ParameterCount count(arg_count);
+  __ InvokeFunction(r4, count, CALL_FUNCTION, NullCallWrapper());
+  __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  __ b(&done);
+
+  __ bind(&runtime);
+  __ push(r3);
+  __ CallRuntime(Runtime::kCall, args->length());
+  __ bind(&done);
+
+  context()->Plug(r3);
+}
+
+
+void FullCodeGenerator::EmitRegExpConstructResult(CallRuntime* expr) {
+  RegExpConstructResultStub stub(isolate());
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK(args->length() == 3);
+  VisitForStackValue(args->at(0));
+  VisitForStackValue(args->at(1));
+  VisitForAccumulatorValue(args->at(2));
+  __ Pop(r5, r4);
+  __ CallStub(&stub);
+  context()->Plug(r3);
+}
+
+
+void FullCodeGenerator::EmitGetFromCache(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK_EQ(2, args->length());
+  DCHECK_NE(NULL, args->at(0)->AsLiteral());
+  int cache_id = Smi::cast(*(args->at(0)->AsLiteral()->value()))->value();
+
+  Handle<FixedArray> jsfunction_result_caches(
+      isolate()->native_context()->jsfunction_result_caches());
+  if (jsfunction_result_caches->length() <= cache_id) {
+    __ Abort(kAttemptToUseUndefinedCache);
+    __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
+    context()->Plug(r3);
+    return;
+  }
+
+  VisitForAccumulatorValue(args->at(1));
+
+  Register key = r3;
+  Register cache = r4;
+  __ LoadP(cache, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX));
+  __ LoadP(cache, FieldMemOperand(cache, GlobalObject::kNativeContextOffset));
+  __ LoadP(cache,
+           ContextOperand(cache, Context::JSFUNCTION_RESULT_CACHES_INDEX));
+  __ LoadP(cache,
+           FieldMemOperand(cache, FixedArray::OffsetOfElementAt(cache_id)), r0);
+
+  Label done, not_found;
+  __ LoadP(r5, FieldMemOperand(cache, JSFunctionResultCache::kFingerOffset));
+  // r5 now holds finger offset as a smi.
+  __ addi(r6, cache, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  // r6 now points to the start of fixed array elements.
+  __ SmiToPtrArrayOffset(r5, r5);
+  __ LoadPUX(r5, MemOperand(r6, r5));
+  // r6 now points to the key of the pair.
+  __ cmp(key, r5);
+  __ bne(&not_found);
+
+  __ LoadP(r3, MemOperand(r6, kPointerSize));
+  __ b(&done);
+
+  __ bind(&not_found);
+  // Call runtime to perform the lookup.
+  __ Push(cache, key);
+  __ CallRuntime(Runtime::kGetFromCache, 2);
+
+  __ bind(&done);
+  context()->Plug(r3);
+}
+
+
+void FullCodeGenerator::EmitHasCachedArrayIndex(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  VisitForAccumulatorValue(args->at(0));
+
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+                         &if_false, &fall_through);
+
+  __ lwz(r3, FieldMemOperand(r3, String::kHashFieldOffset));
+  // PPC - assume ip is free
+  __ mov(ip, Operand(String::kContainsCachedArrayIndexMask));
+  __ and_(r0, r3, ip);
+  __ cmpi(r0, Operand::Zero());
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  Split(eq, if_true, if_false, fall_through);
+
+  context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitGetCachedArrayIndex(CallRuntime* expr) {
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK(args->length() == 1);
+  VisitForAccumulatorValue(args->at(0));
+
+  __ AssertString(r3);
+
+  __ lwz(r3, FieldMemOperand(r3, String::kHashFieldOffset));
+  __ IndexFromHash(r3, r3);
+
+  context()->Plug(r3);
+}
+
+
+void FullCodeGenerator::EmitFastOneByteArrayJoin(CallRuntime* expr) {
+  Label bailout, done, one_char_separator, long_separator, non_trivial_array,
+      not_size_one_array, loop, empty_separator_loop, one_char_separator_loop,
+      one_char_separator_loop_entry, long_separator_loop;
+  ZoneList<Expression*>* args = expr->arguments();
+  DCHECK(args->length() == 2);
+  VisitForStackValue(args->at(1));
+  VisitForAccumulatorValue(args->at(0));
+
+  // All aliases of the same register have disjoint lifetimes.
+  Register array = r3;
+  Register elements = no_reg;  // Will be r3.
+  Register result = no_reg;    // Will be r3.
+  Register separator = r4;
+  Register array_length = r5;
+  Register result_pos = no_reg;  // Will be r5
+  Register string_length = r6;
+  Register string = r7;
+  Register element = r8;
+  Register elements_end = r9;
+  Register scratch1 = r10;
+  Register scratch2 = r11;
+
+  // Separator operand is on the stack.
+  __ pop(separator);
+
+  // Check that the array is a JSArray.
+  __ JumpIfSmi(array, &bailout);
+  __ CompareObjectType(array, scratch1, scratch2, JS_ARRAY_TYPE);
+  __ bne(&bailout);
+
+  // Check that the array has fast elements.
+  __ CheckFastElements(scratch1, scratch2, &bailout);
+
+  // If the array has length zero, return the empty string.
+  __ LoadP(array_length, FieldMemOperand(array, JSArray::kLengthOffset));
+  __ SmiUntag(array_length);
+  __ cmpi(array_length, Operand::Zero());
+  __ bne(&non_trivial_array);
+  __ LoadRoot(r3, Heap::kempty_stringRootIndex);
+  __ b(&done);
+
+  __ bind(&non_trivial_array);
+
+  // Get the FixedArray containing array's elements.
+  elements = array;
+  __ LoadP(elements, FieldMemOperand(array, JSArray::kElementsOffset));
+  array = no_reg;  // End of array's live range.
+
+  // Check that all array elements are sequential one-byte strings, and
+  // accumulate the sum of their lengths, as a smi-encoded value.
+  __ li(string_length, Operand::Zero());
+  __ addi(element, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  __ ShiftLeftImm(elements_end, array_length, Operand(kPointerSizeLog2));
+  __ add(elements_end, element, elements_end);
+  // Loop condition: while (element < elements_end).
+  // Live values in registers:
+  //   elements: Fixed array of strings.
+  //   array_length: Length of the fixed array of strings (not smi)
+  //   separator: Separator string
+  //   string_length: Accumulated sum of string lengths (smi).
+  //   element: Current array element.
+  //   elements_end: Array end.
+  if (generate_debug_code_) {
+    __ cmpi(array_length, Operand::Zero());
+    __ Assert(gt, kNoEmptyArraysHereInEmitFastOneByteArrayJoin);
+  }
+  __ bind(&loop);
+  __ LoadP(string, MemOperand(element));
+  __ addi(element, element, Operand(kPointerSize));
+  __ JumpIfSmi(string, &bailout);
+  __ LoadP(scratch1, FieldMemOperand(string, HeapObject::kMapOffset));
+  __ lbz(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+  __ JumpIfInstanceTypeIsNotSequentialOneByte(scratch1, scratch2, &bailout);
+  __ LoadP(scratch1, FieldMemOperand(string, SeqOneByteString::kLengthOffset));
+
+  __ AddAndCheckForOverflow(string_length, string_length, scratch1, scratch2,
+                            r0);
+  __ BranchOnOverflow(&bailout);
+
+  __ cmp(element, elements_end);
+  __ blt(&loop);
+
+  // If array_length is 1, return elements[0], a string.
+  __ cmpi(array_length, Operand(1));
+  __ bne(&not_size_one_array);
+  __ LoadP(r3, FieldMemOperand(elements, FixedArray::kHeaderSize));
+  __ b(&done);
+
+  __ bind(&not_size_one_array);
+
+  // Live values in registers:
+  //   separator: Separator string
+  //   array_length: Length of the array.
+  //   string_length: Sum of string lengths (smi).
+  //   elements: FixedArray of strings.
+
+  // Check that the separator is a flat one-byte string.
+  __ JumpIfSmi(separator, &bailout);
+  __ LoadP(scratch1, FieldMemOperand(separator, HeapObject::kMapOffset));
+  __ lbz(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+  __ JumpIfInstanceTypeIsNotSequentialOneByte(scratch1, scratch2, &bailout);
+
+  // Add (separator length times array_length) - separator length to the
+  // string_length to get the length of the result string.
+  __ LoadP(scratch1,
+           FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
+  __ sub(string_length, string_length, scratch1);
+#if V8_TARGET_ARCH_PPC64
+  __ SmiUntag(scratch1, scratch1);
+  __ Mul(scratch2, array_length, scratch1);
+  // Check for smi overflow. No overflow if higher 33 bits of 64-bit result are
+  // zero.
+  __ ShiftRightImm(ip, scratch2, Operand(31), SetRC);
+  __ bne(&bailout, cr0);
+  __ SmiTag(scratch2, scratch2);
+#else
+  // array_length is not smi but the other values are, so the result is a smi
+  __ mullw(scratch2, array_length, scratch1);
+  __ mulhw(ip, array_length, scratch1);
+  // Check for smi overflow. No overflow if higher 33 bits of 64-bit result are
+  // zero.
+  __ cmpi(ip, Operand::Zero());
+  __ bne(&bailout);
+  __ cmpwi(scratch2, Operand::Zero());
+  __ blt(&bailout);
+#endif
+
+  __ AddAndCheckForOverflow(string_length, string_length, scratch2, scratch1,
+                            r0);
+  __ BranchOnOverflow(&bailout);
+  __ SmiUntag(string_length);
+
+  // Get first element in the array to free up the elements register to be used
+  // for the result.
+  __ addi(element, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  result = elements;  // End of live range for elements.
+  elements = no_reg;
+  // Live values in registers:
+  //   element: First array element
+  //   separator: Separator string
+  //   string_length: Length of result string (not smi)
+  //   array_length: Length of the array.
+  __ AllocateOneByteString(result, string_length, scratch1, scratch2,
+                           elements_end, &bailout);
+  // Prepare for looping. Set up elements_end to end of the array. Set
+  // result_pos to the position of the result where to write the first
+  // character.
+  __ ShiftLeftImm(elements_end, array_length, Operand(kPointerSizeLog2));
+  __ add(elements_end, element, elements_end);
+  result_pos = array_length;  // End of live range for array_length.
+  array_length = no_reg;
+  __ addi(result_pos, result,
+          Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+
+  // Check the length of the separator.
+  __ LoadP(scratch1,
+           FieldMemOperand(separator, SeqOneByteString::kLengthOffset));
+  __ CmpSmiLiteral(scratch1, Smi::FromInt(1), r0);
+  __ beq(&one_char_separator);
+  __ bgt(&long_separator);
+
+  // Empty separator case
+  __ bind(&empty_separator_loop);
+  // Live values in registers:
+  //   result_pos: the position to which we are currently copying characters.
+  //   element: Current array element.
+  //   elements_end: Array end.
+
+  // Copy next array element to the result.
+  __ LoadP(string, MemOperand(element));
+  __ addi(element, element, Operand(kPointerSize));
+  __ LoadP(string_length, FieldMemOperand(string, String::kLengthOffset));
+  __ SmiUntag(string_length);
+  __ addi(string, string,
+          Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+  __ CopyBytes(string, result_pos, string_length, scratch1);
+  __ cmp(element, elements_end);
+  __ blt(&empty_separator_loop);  // End while (element < elements_end).
+  DCHECK(result.is(r3));
+  __ b(&done);
+
+  // One-character separator case
+  __ bind(&one_char_separator);
+  // Replace separator with its one-byte character value.
+  __ lbz(separator, FieldMemOperand(separator, SeqOneByteString::kHeaderSize));
+  // Jump into the loop after the code that copies the separator, so the first
+  // element is not preceded by a separator
+  __ b(&one_char_separator_loop_entry);
+
+  __ bind(&one_char_separator_loop);
+  // Live values in registers:
+  //   result_pos: the position to which we are currently copying characters.
+  //   element: Current array element.
+  //   elements_end: Array end.
+  //   separator: Single separator one-byte char (in lower byte).
+
+  // Copy the separator character to the result.
+  __ stb(separator, MemOperand(result_pos));
+  __ addi(result_pos, result_pos, Operand(1));
+
+  // Copy next array element to the result.
+  __ bind(&one_char_separator_loop_entry);
+  __ LoadP(string, MemOperand(element));
+  __ addi(element, element, Operand(kPointerSize));
+  __ LoadP(string_length, FieldMemOperand(string, String::kLengthOffset));
+  __ SmiUntag(string_length);
+  __ addi(string, string,
+          Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+  __ CopyBytes(string, result_pos, string_length, scratch1);
+  __ cmpl(element, elements_end);
+  __ blt(&one_char_separator_loop);  // End while (element < elements_end).
+  DCHECK(result.is(r3));
+  __ b(&done);
+
+  // Long separator case (separator is more than one character). Entry is at the
+  // label long_separator below.
+  __ bind(&long_separator_loop);
+  // Live values in registers:
+  //   result_pos: the position to which we are currently copying characters.
+  //   element: Current array element.
+  //   elements_end: Array end.
+  //   separator: Separator string.
+
+  // Copy the separator to the result.
+  __ LoadP(string_length, FieldMemOperand(separator, String::kLengthOffset));
+  __ SmiUntag(string_length);
+  __ addi(string, separator,
+          Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+  __ CopyBytes(string, result_pos, string_length, scratch1);
+
+  __ bind(&long_separator);
+  __ LoadP(string, MemOperand(element));
+  __ addi(element, element, Operand(kPointerSize));
+  __ LoadP(string_length, FieldMemOperand(string, String::kLengthOffset));
+  __ SmiUntag(string_length);
+  __ addi(string, string,
+          Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag));
+  __ CopyBytes(string, result_pos, string_length, scratch1);
+  __ cmpl(element, elements_end);
+  __ blt(&long_separator_loop);  // End while (element < elements_end).
+  DCHECK(result.is(r3));
+  __ b(&done);
+
+  __ bind(&bailout);
+  __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
+  __ bind(&done);
+  context()->Plug(r3);
+}
+
+
+void FullCodeGenerator::EmitDebugIsActive(CallRuntime* expr) {
+  DCHECK(expr->arguments()->length() == 0);
+  ExternalReference debug_is_active =
+      ExternalReference::debug_is_active_address(isolate());
+  __ mov(ip, Operand(debug_is_active));
+  __ lbz(r3, MemOperand(ip));
+  __ SmiTag(r3);
+  context()->Plug(r3);
+}
+
+
+void FullCodeGenerator::VisitCallRuntime(CallRuntime* expr) {
+  if (expr->function() != NULL &&
+      expr->function()->intrinsic_type == Runtime::INLINE) {
+    Comment cmnt(masm_, "[ InlineRuntimeCall");
+    EmitInlineRuntimeCall(expr);
+    return;
+  }
+
+  Comment cmnt(masm_, "[ CallRuntime");
+  ZoneList<Expression*>* args = expr->arguments();
+  int arg_count = args->length();
+
+  if (expr->is_jsruntime()) {
+    // Push the builtins object as the receiver.
+    Register receiver = LoadDescriptor::ReceiverRegister();
+    __ LoadP(receiver, GlobalObjectOperand());
+    __ LoadP(receiver,
+             FieldMemOperand(receiver, GlobalObject::kBuiltinsOffset));
+    __ push(receiver);
+
+    // Load the function from the receiver.
+    __ mov(LoadDescriptor::NameRegister(), Operand(expr->name()));
+    if (FLAG_vector_ics) {
+      __ mov(VectorLoadICDescriptor::SlotRegister(),
+             Operand(SmiFromSlot(expr->CallRuntimeFeedbackSlot())));
+      CallLoadIC(NOT_CONTEXTUAL);
+    } else {
+      CallLoadIC(NOT_CONTEXTUAL, expr->CallRuntimeFeedbackId());
+    }
+
+    // Push the target function under the receiver.
+    __ LoadP(ip, MemOperand(sp, 0));
+    __ push(ip);
+    __ StoreP(r3, MemOperand(sp, kPointerSize));
+
+    // Push the arguments ("left-to-right").
+    int arg_count = args->length();
+    for (int i = 0; i < arg_count; i++) {
+      VisitForStackValue(args->at(i));
+    }
+
+    // Record source position of the IC call.
+    SetSourcePosition(expr->position());
+    CallFunctionStub stub(isolate(), arg_count, NO_CALL_FUNCTION_FLAGS);
+    __ LoadP(r4, MemOperand(sp, (arg_count + 1) * kPointerSize), r0);
+    __ CallStub(&stub);
+
+    // Restore context register.
+    __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+
+    context()->DropAndPlug(1, r3);
+  } else {
+    // Push the arguments ("left-to-right").
+    for (int i = 0; i < arg_count; i++) {
+      VisitForStackValue(args->at(i));
+    }
+
+    // Call the C runtime function.
+    __ CallRuntime(expr->function(), arg_count);
+    context()->Plug(r3);
+  }
+}
+
+
+void FullCodeGenerator::VisitUnaryOperation(UnaryOperation* expr) {
+  switch (expr->op()) {
+    case Token::DELETE: {
+      Comment cmnt(masm_, "[ UnaryOperation (DELETE)");
+      Property* property = expr->expression()->AsProperty();
+      VariableProxy* proxy = expr->expression()->AsVariableProxy();
+
+      if (property != NULL) {
+        VisitForStackValue(property->obj());
+        VisitForStackValue(property->key());
+        __ LoadSmiLiteral(r4, Smi::FromInt(strict_mode()));
+        __ push(r4);
+        __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+        context()->Plug(r3);
+      } else if (proxy != NULL) {
+        Variable* var = proxy->var();
+        // Delete of an unqualified identifier is disallowed in strict mode
+        // but "delete this" is allowed.
+        DCHECK(strict_mode() == SLOPPY || var->is_this());
+        if (var->IsUnallocated()) {
+          __ LoadP(r5, GlobalObjectOperand());
+          __ mov(r4, Operand(var->name()));
+          __ LoadSmiLiteral(r3, Smi::FromInt(SLOPPY));
+          __ Push(r5, r4, r3);
+          __ InvokeBuiltin(Builtins::DELETE, CALL_FUNCTION);
+          context()->Plug(r3);
+        } else if (var->IsStackAllocated() || var->IsContextSlot()) {
+          // Result of deleting non-global, non-dynamic variables is false.
+          // The subexpression does not have side effects.
+          context()->Plug(var->is_this());
+        } else {
+          // Non-global variable.  Call the runtime to try to delete from the
+          // context where the variable was introduced.
+          DCHECK(!context_register().is(r5));
+          __ mov(r5, Operand(var->name()));
+          __ Push(context_register(), r5);
+          __ CallRuntime(Runtime::kDeleteLookupSlot, 2);
+          context()->Plug(r3);
+        }
+      } else {
+        // Result of deleting non-property, non-variable reference is true.
+        // The subexpression may have side effects.
+        VisitForEffect(expr->expression());
+        context()->Plug(true);
+      }
+      break;
+    }
+
+    case Token::VOID: {
+      Comment cmnt(masm_, "[ UnaryOperation (VOID)");
+      VisitForEffect(expr->expression());
+      context()->Plug(Heap::kUndefinedValueRootIndex);
+      break;
+    }
+
+    case Token::NOT: {
+      Comment cmnt(masm_, "[ UnaryOperation (NOT)");
+      if (context()->IsEffect()) {
+        // Unary NOT has no side effects so it's only necessary to visit the
+        // subexpression.  Match the optimizing compiler by not branching.
+        VisitForEffect(expr->expression());
+      } else if (context()->IsTest()) {
+        const TestContext* test = TestContext::cast(context());
+        // The labels are swapped for the recursive call.
+        VisitForControl(expr->expression(), test->false_label(),
+                        test->true_label(), test->fall_through());
+        context()->Plug(test->true_label(), test->false_label());
+      } else {
+        // We handle value contexts explicitly rather than simply visiting
+        // for control and plugging the control flow into the context,
+        // because we need to prepare a pair of extra administrative AST ids
+        // for the optimizing compiler.
+        DCHECK(context()->IsAccumulatorValue() || context()->IsStackValue());
+        Label materialize_true, materialize_false, done;
+        VisitForControl(expr->expression(), &materialize_false,
+                        &materialize_true, &materialize_true);
+        __ bind(&materialize_true);
+        PrepareForBailoutForId(expr->MaterializeTrueId(), NO_REGISTERS);
+        __ LoadRoot(r3, Heap::kTrueValueRootIndex);
+        if (context()->IsStackValue()) __ push(r3);
+        __ b(&done);
+        __ bind(&materialize_false);
+        PrepareForBailoutForId(expr->MaterializeFalseId(), NO_REGISTERS);
+        __ LoadRoot(r3, Heap::kFalseValueRootIndex);
+        if (context()->IsStackValue()) __ push(r3);
+        __ bind(&done);
+      }
+      break;
+    }
+
+    case Token::TYPEOF: {
+      Comment cmnt(masm_, "[ UnaryOperation (TYPEOF)");
+      {
+        StackValueContext context(this);
+        VisitForTypeofValue(expr->expression());
+      }
+      __ CallRuntime(Runtime::kTypeof, 1);
+      context()->Plug(r3);
+      break;
+    }
+
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void FullCodeGenerator::VisitCountOperation(CountOperation* expr) {
+  DCHECK(expr->expression()->IsValidReferenceExpression());
+
+  Comment cmnt(masm_, "[ CountOperation");
+  SetSourcePosition(expr->position());
+
+  Property* prop = expr->expression()->AsProperty();
+  LhsKind assign_type = GetAssignType(prop);
+
+  // Evaluate expression and get value.
+  if (assign_type == VARIABLE) {
+    DCHECK(expr->expression()->AsVariableProxy()->var() != NULL);
+    AccumulatorValueContext context(this);
+    EmitVariableLoad(expr->expression()->AsVariableProxy());
+  } else {
+    // Reserve space for result of postfix operation.
+    if (expr->is_postfix() && !context()->IsEffect()) {
+      __ LoadSmiLiteral(ip, Smi::FromInt(0));
+      __ push(ip);
+    }
+    switch (assign_type) {
+      case NAMED_PROPERTY: {
+        // Put the object both on the stack and in the register.
+        VisitForStackValue(prop->obj());
+        __ LoadP(LoadDescriptor::ReceiverRegister(), MemOperand(sp, 0));
+        EmitNamedPropertyLoad(prop);
+        break;
+      }
+
+      case NAMED_SUPER_PROPERTY: {
+        VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
+        EmitLoadHomeObject(prop->obj()->AsSuperReference());
+        __ Push(result_register());
+        const Register scratch = r4;
+        __ LoadP(scratch, MemOperand(sp, kPointerSize));
+        __ Push(scratch, result_register());
+        EmitNamedSuperPropertyLoad(prop);
+        break;
+      }
+
+      case KEYED_SUPER_PROPERTY: {
+        VisitForStackValue(prop->obj()->AsSuperReference()->this_var());
+        EmitLoadHomeObject(prop->obj()->AsSuperReference());
+        const Register scratch = r4;
+        const Register scratch1 = r5;
+        __ Move(scratch, result_register());
+        VisitForAccumulatorValue(prop->key());
+        __ Push(scratch, result_register());
+        __ LoadP(scratch1, MemOperand(sp, 2 * kPointerSize));
+        __ Push(scratch1, scratch, result_register());
+        EmitKeyedSuperPropertyLoad(prop);
+        break;
+      }
+
+      case KEYED_PROPERTY: {
+        VisitForStackValue(prop->obj());
+        VisitForStackValue(prop->key());
+        __ LoadP(LoadDescriptor::ReceiverRegister(),
+                 MemOperand(sp, 1 * kPointerSize));
+        __ LoadP(LoadDescriptor::NameRegister(), MemOperand(sp, 0));
+        EmitKeyedPropertyLoad(prop);
+        break;
+      }
+
+      case VARIABLE:
+        UNREACHABLE();
+    }
+  }
+
+  // We need a second deoptimization point after loading the value
+  // in case evaluating the property load my have a side effect.
+  if (assign_type == VARIABLE) {
+    PrepareForBailout(expr->expression(), TOS_REG);
+  } else {
+    PrepareForBailoutForId(prop->LoadId(), TOS_REG);
+  }
+
+  // Inline smi case if we are in a loop.
+  Label stub_call, done;
+  JumpPatchSite patch_site(masm_);
+
+  int count_value = expr->op() == Token::INC ? 1 : -1;
+  if (ShouldInlineSmiCase(expr->op())) {
+    Label slow;
+    patch_site.EmitJumpIfNotSmi(r3, &slow);
+
+    // Save result for postfix expressions.
+    if (expr->is_postfix()) {
+      if (!context()->IsEffect()) {
+        // Save the result on the stack. If we have a named or keyed property
+        // we store the result under the receiver that is currently on top
+        // of the stack.
+        switch (assign_type) {
+          case VARIABLE:
+            __ push(r3);
+            break;
+          case NAMED_PROPERTY:
+            __ StoreP(r3, MemOperand(sp, kPointerSize));
+            break;
+          case NAMED_SUPER_PROPERTY:
+            __ StoreP(r3, MemOperand(sp, 2 * kPointerSize));
+            break;
+          case KEYED_PROPERTY:
+            __ StoreP(r3, MemOperand(sp, 2 * kPointerSize));
+            break;
+          case KEYED_SUPER_PROPERTY:
+            __ StoreP(r3, MemOperand(sp, 3 * kPointerSize));
+            break;
+        }
+      }
+    }
+
+    Register scratch1 = r4;
+    Register scratch2 = r5;
+    __ LoadSmiLiteral(scratch1, Smi::FromInt(count_value));
+    __ AddAndCheckForOverflow(r3, r3, scratch1, scratch2, r0);
+    __ BranchOnNoOverflow(&done);
+    // Call stub. Undo operation first.
+    __ sub(r3, r3, scratch1);
+    __ b(&stub_call);
+    __ bind(&slow);
+  }
+  ToNumberStub convert_stub(isolate());
+  __ CallStub(&convert_stub);
+
+  // Save result for postfix expressions.
+  if (expr->is_postfix()) {
+    if (!context()->IsEffect()) {
+      // Save the result on the stack. If we have a named or keyed property
+      // we store the result under the receiver that is currently on top
+      // of the stack.
+      switch (assign_type) {
+        case VARIABLE:
+          __ push(r3);
+          break;
+        case NAMED_PROPERTY:
+          __ StoreP(r3, MemOperand(sp, kPointerSize));
+          break;
+        case NAMED_SUPER_PROPERTY:
+          __ StoreP(r3, MemOperand(sp, 2 * kPointerSize));
+          break;
+        case KEYED_PROPERTY:
+          __ StoreP(r3, MemOperand(sp, 2 * kPointerSize));
+          break;
+        case KEYED_SUPER_PROPERTY:
+          __ StoreP(r3, MemOperand(sp, 3 * kPointerSize));
+          break;
+      }
+    }
+  }
+
+  __ bind(&stub_call);
+  __ mr(r4, r3);
+  __ LoadSmiLiteral(r3, Smi::FromInt(count_value));
+
+  // Record position before stub call.
+  SetSourcePosition(expr->position());
+
+  Handle<Code> code =
+      CodeFactory::BinaryOpIC(isolate(), Token::ADD, NO_OVERWRITE).code();
+  CallIC(code, expr->CountBinOpFeedbackId());
+  patch_site.EmitPatchInfo();
+  __ bind(&done);
+
+  // Store the value returned in r3.
+  switch (assign_type) {
+    case VARIABLE:
+      if (expr->is_postfix()) {
+        {
+          EffectContext context(this);
+          EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+                                 Token::ASSIGN);
+          PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+          context.Plug(r3);
+        }
+        // For all contexts except EffectConstant We have the result on
+        // top of the stack.
+        if (!context()->IsEffect()) {
+          context()->PlugTOS();
+        }
+      } else {
+        EmitVariableAssignment(expr->expression()->AsVariableProxy()->var(),
+                               Token::ASSIGN);
+        PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+        context()->Plug(r3);
+      }
+      break;
+    case NAMED_PROPERTY: {
+      __ mov(StoreDescriptor::NameRegister(),
+             Operand(prop->key()->AsLiteral()->value()));
+      __ pop(StoreDescriptor::ReceiverRegister());
+      CallStoreIC(expr->CountStoreFeedbackId());
+      PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+      if (expr->is_postfix()) {
+        if (!context()->IsEffect()) {
+          context()->PlugTOS();
+        }
+      } else {
+        context()->Plug(r3);
+      }
+      break;
+    }
+    case NAMED_SUPER_PROPERTY: {
+      EmitNamedSuperPropertyStore(prop);
+      if (expr->is_postfix()) {
+        if (!context()->IsEffect()) {
+          context()->PlugTOS();
+        }
+      } else {
+        context()->Plug(r3);
+      }
+      break;
+    }
+    case KEYED_SUPER_PROPERTY: {
+      EmitKeyedSuperPropertyStore(prop);
+      if (expr->is_postfix()) {
+        if (!context()->IsEffect()) {
+          context()->PlugTOS();
+        }
+      } else {
+        context()->Plug(r3);
+      }
+      break;
+    }
+    case KEYED_PROPERTY: {
+      __ Pop(StoreDescriptor::ReceiverRegister(),
+             StoreDescriptor::NameRegister());
+      Handle<Code> ic =
+          CodeFactory::KeyedStoreIC(isolate(), strict_mode()).code();
+      CallIC(ic, expr->CountStoreFeedbackId());
+      PrepareForBailoutForId(expr->AssignmentId(), TOS_REG);
+      if (expr->is_postfix()) {
+        if (!context()->IsEffect()) {
+          context()->PlugTOS();
+        }
+      } else {
+        context()->Plug(r3);
+      }
+      break;
+    }
+  }
+}
+
+
+void FullCodeGenerator::VisitForTypeofValue(Expression* expr) {
+  DCHECK(!context()->IsEffect());
+  DCHECK(!context()->IsTest());
+  VariableProxy* proxy = expr->AsVariableProxy();
+  if (proxy != NULL && proxy->var()->IsUnallocated()) {
+    Comment cmnt(masm_, "[ Global variable");
+    __ LoadP(LoadDescriptor::ReceiverRegister(), GlobalObjectOperand());
+    __ mov(LoadDescriptor::NameRegister(), Operand(proxy->name()));
+    if (FLAG_vector_ics) {
+      __ mov(VectorLoadICDescriptor::SlotRegister(),
+             Operand(SmiFromSlot(proxy->VariableFeedbackSlot())));
+    }
+    // Use a regular load, not a contextual load, to avoid a reference
+    // error.
+    CallLoadIC(NOT_CONTEXTUAL);
+    PrepareForBailout(expr, TOS_REG);
+    context()->Plug(r3);
+  } else if (proxy != NULL && proxy->var()->IsLookupSlot()) {
+    Comment cmnt(masm_, "[ Lookup slot");
+    Label done, slow;
+
+    // Generate code for loading from variables potentially shadowed
+    // by eval-introduced variables.
+    EmitDynamicLookupFastCase(proxy, INSIDE_TYPEOF, &slow, &done);
+
+    __ bind(&slow);
+    __ mov(r3, Operand(proxy->name()));
+    __ Push(cp, r3);
+    __ CallRuntime(Runtime::kLoadLookupSlotNoReferenceError, 2);
+    PrepareForBailout(expr, TOS_REG);
+    __ bind(&done);
+
+    context()->Plug(r3);
+  } else {
+    // This expression cannot throw a reference error at the top level.
+    VisitInDuplicateContext(expr);
+  }
+}
+
+
+void FullCodeGenerator::EmitLiteralCompareTypeof(Expression* expr,
+                                                 Expression* sub_expr,
+                                                 Handle<String> check) {
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+                         &if_false, &fall_through);
+
+  {
+    AccumulatorValueContext context(this);
+    VisitForTypeofValue(sub_expr);
+  }
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+
+  Factory* factory = isolate()->factory();
+  if (String::Equals(check, factory->number_string())) {
+    __ JumpIfSmi(r3, if_true);
+    __ LoadP(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
+    __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
+    __ cmp(r3, ip);
+    Split(eq, if_true, if_false, fall_through);
+  } else if (String::Equals(check, factory->string_string())) {
+    __ JumpIfSmi(r3, if_false);
+    // Check for undetectable objects => false.
+    __ CompareObjectType(r3, r3, r4, FIRST_NONSTRING_TYPE);
+    __ bge(if_false);
+    __ lbz(r4, FieldMemOperand(r3, Map::kBitFieldOffset));
+    STATIC_ASSERT((1 << Map::kIsUndetectable) < 0x8000);
+    __ andi(r0, r4, Operand(1 << Map::kIsUndetectable));
+    Split(eq, if_true, if_false, fall_through, cr0);
+  } else if (String::Equals(check, factory->symbol_string())) {
+    __ JumpIfSmi(r3, if_false);
+    __ CompareObjectType(r3, r3, r4, SYMBOL_TYPE);
+    Split(eq, if_true, if_false, fall_through);
+  } else if (String::Equals(check, factory->boolean_string())) {
+    __ CompareRoot(r3, Heap::kTrueValueRootIndex);
+    __ beq(if_true);
+    __ CompareRoot(r3, Heap::kFalseValueRootIndex);
+    Split(eq, if_true, if_false, fall_through);
+  } else if (String::Equals(check, factory->undefined_string())) {
+    __ CompareRoot(r3, Heap::kUndefinedValueRootIndex);
+    __ beq(if_true);
+    __ JumpIfSmi(r3, if_false);
+    // Check for undetectable objects => true.
+    __ LoadP(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
+    __ lbz(r4, FieldMemOperand(r3, Map::kBitFieldOffset));
+    __ andi(r0, r4, Operand(1 << Map::kIsUndetectable));
+    Split(ne, if_true, if_false, fall_through, cr0);
+
+  } else if (String::Equals(check, factory->function_string())) {
+    __ JumpIfSmi(r3, if_false);
+    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+    __ CompareObjectType(r3, r3, r4, JS_FUNCTION_TYPE);
+    __ beq(if_true);
+    __ cmpi(r4, Operand(JS_FUNCTION_PROXY_TYPE));
+    Split(eq, if_true, if_false, fall_through);
+  } else if (String::Equals(check, factory->object_string())) {
+    __ JumpIfSmi(r3, if_false);
+    __ CompareRoot(r3, Heap::kNullValueRootIndex);
+    __ beq(if_true);
+    // Check for JS objects => true.
+    __ CompareObjectType(r3, r3, r4, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE);
+    __ blt(if_false);
+    __ CompareInstanceType(r3, r4, LAST_NONCALLABLE_SPEC_OBJECT_TYPE);
+    __ bgt(if_false);
+    // Check for undetectable objects => false.
+    __ lbz(r4, FieldMemOperand(r3, Map::kBitFieldOffset));
+    __ andi(r0, r4, Operand(1 << Map::kIsUndetectable));
+    Split(eq, if_true, if_false, fall_through, cr0);
+  } else {
+    if (if_false != fall_through) __ b(if_false);
+  }
+  context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::VisitCompareOperation(CompareOperation* expr) {
+  Comment cmnt(masm_, "[ CompareOperation");
+  SetSourcePosition(expr->position());
+
+  // First we try a fast inlined version of the compare when one of
+  // the operands is a literal.
+  if (TryLiteralCompare(expr)) return;
+
+  // Always perform the comparison for its control flow.  Pack the result
+  // into the expression's context after the comparison is performed.
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+                         &if_false, &fall_through);
+
+  Token::Value op = expr->op();
+  VisitForStackValue(expr->left());
+  switch (op) {
+    case Token::IN:
+      VisitForStackValue(expr->right());
+      __ InvokeBuiltin(Builtins::IN, CALL_FUNCTION);
+      PrepareForBailoutBeforeSplit(expr, false, NULL, NULL);
+      __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+      __ cmp(r3, ip);
+      Split(eq, if_true, if_false, fall_through);
+      break;
+
+    case Token::INSTANCEOF: {
+      VisitForStackValue(expr->right());
+      InstanceofStub stub(isolate(), InstanceofStub::kNoFlags);
+      __ CallStub(&stub);
+      PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+      // The stub returns 0 for true.
+      __ cmpi(r3, Operand::Zero());
+      Split(eq, if_true, if_false, fall_through);
+      break;
+    }
+
+    default: {
+      VisitForAccumulatorValue(expr->right());
+      Condition cond = CompareIC::ComputeCondition(op);
+      __ pop(r4);
+
+      bool inline_smi_code = ShouldInlineSmiCase(op);
+      JumpPatchSite patch_site(masm_);
+      if (inline_smi_code) {
+        Label slow_case;
+        __ orx(r5, r3, r4);
+        patch_site.EmitJumpIfNotSmi(r5, &slow_case);
+        __ cmp(r4, r3);
+        Split(cond, if_true, if_false, NULL);
+        __ bind(&slow_case);
+      }
+
+      // Record position and call the compare IC.
+      SetSourcePosition(expr->position());
+      Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
+      CallIC(ic, expr->CompareOperationFeedbackId());
+      patch_site.EmitPatchInfo();
+      PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+      __ cmpi(r3, Operand::Zero());
+      Split(cond, if_true, if_false, fall_through);
+    }
+  }
+
+  // Convert the result of the comparison into one expected for this
+  // expression's context.
+  context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::EmitLiteralCompareNil(CompareOperation* expr,
+                                              Expression* sub_expr,
+                                              NilValue nil) {
+  Label materialize_true, materialize_false;
+  Label* if_true = NULL;
+  Label* if_false = NULL;
+  Label* fall_through = NULL;
+  context()->PrepareTest(&materialize_true, &materialize_false, &if_true,
+                         &if_false, &fall_through);
+
+  VisitForAccumulatorValue(sub_expr);
+  PrepareForBailoutBeforeSplit(expr, true, if_true, if_false);
+  if (expr->op() == Token::EQ_STRICT) {
+    Heap::RootListIndex nil_value = nil == kNullValue
+                                        ? Heap::kNullValueRootIndex
+                                        : Heap::kUndefinedValueRootIndex;
+    __ LoadRoot(r4, nil_value);
+    __ cmp(r3, r4);
+    Split(eq, if_true, if_false, fall_through);
+  } else {
+    Handle<Code> ic = CompareNilICStub::GetUninitialized(isolate(), nil);
+    CallIC(ic, expr->CompareOperationFeedbackId());
+    __ cmpi(r3, Operand::Zero());
+    Split(ne, if_true, if_false, fall_through);
+  }
+  context()->Plug(if_true, if_false);
+}
+
+
+void FullCodeGenerator::VisitThisFunction(ThisFunction* expr) {
+  __ LoadP(r3, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  context()->Plug(r3);
+}
+
+
+Register FullCodeGenerator::result_register() { return r3; }
+
+
+Register FullCodeGenerator::context_register() { return cp; }
+
+
+void FullCodeGenerator::StoreToFrameField(int frame_offset, Register value) {
+  DCHECK_EQ(static_cast<int>(POINTER_SIZE_ALIGN(frame_offset)), frame_offset);
+  __ StoreP(value, MemOperand(fp, frame_offset), r0);
+}
+
+
+void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
+  __ LoadP(dst, ContextOperand(cp, context_index), r0);
+}
+
+
+void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
+  Scope* declaration_scope = scope()->DeclarationScope();
+  if (declaration_scope->is_script_scope() ||
+      declaration_scope->is_module_scope()) {
+    // Contexts nested in the native context have a canonical empty function
+    // as their closure, not the anonymous closure containing the global
+    // code.  Pass a smi sentinel and let the runtime look up the empty
+    // function.
+    __ LoadSmiLiteral(ip, Smi::FromInt(0));
+  } else if (declaration_scope->is_eval_scope()) {
+    // Contexts created by a call to eval have the same closure as the
+    // context calling eval, not the anonymous closure containing the eval
+    // code.  Fetch it from the context.
+    __ LoadP(ip, ContextOperand(cp, Context::CLOSURE_INDEX));
+  } else {
+    DCHECK(declaration_scope->is_function_scope());
+    __ LoadP(ip, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+  }
+  __ push(ip);
+}
+
+
+// ----------------------------------------------------------------------------
+// Non-local control flow support.
+
+void FullCodeGenerator::EnterFinallyBlock() {
+  DCHECK(!result_register().is(r4));
+  // Store result register while executing finally block.
+  __ push(result_register());
+  // Cook return address in link register to stack (smi encoded Code* delta)
+  __ mflr(r4);
+  __ mov(ip, Operand(masm_->CodeObject()));
+  __ sub(r4, r4, ip);
+  __ SmiTag(r4);
+
+  // Store result register while executing finally block.
+  __ push(r4);
+
+  // Store pending message while executing finally block.
+  ExternalReference pending_message_obj =
+      ExternalReference::address_of_pending_message_obj(isolate());
+  __ mov(ip, Operand(pending_message_obj));
+  __ LoadP(r4, MemOperand(ip));
+  __ push(r4);
+
+  ExternalReference has_pending_message =
+      ExternalReference::address_of_has_pending_message(isolate());
+  __ mov(ip, Operand(has_pending_message));
+  __ lbz(r4, MemOperand(ip));
+  __ SmiTag(r4);
+  __ push(r4);
+
+  ExternalReference pending_message_script =
+      ExternalReference::address_of_pending_message_script(isolate());
+  __ mov(ip, Operand(pending_message_script));
+  __ LoadP(r4, MemOperand(ip));
+  __ push(r4);
+}
+
+
+void FullCodeGenerator::ExitFinallyBlock() {
+  DCHECK(!result_register().is(r4));
+  // Restore pending message from stack.
+  __ pop(r4);
+  ExternalReference pending_message_script =
+      ExternalReference::address_of_pending_message_script(isolate());
+  __ mov(ip, Operand(pending_message_script));
+  __ StoreP(r4, MemOperand(ip));
+
+  __ pop(r4);
+  __ SmiUntag(r4);
+  ExternalReference has_pending_message =
+      ExternalReference::address_of_has_pending_message(isolate());
+  __ mov(ip, Operand(has_pending_message));
+  __ stb(r4, MemOperand(ip));
+
+  __ pop(r4);
+  ExternalReference pending_message_obj =
+      ExternalReference::address_of_pending_message_obj(isolate());
+  __ mov(ip, Operand(pending_message_obj));
+  __ StoreP(r4, MemOperand(ip));
+
+  // Restore result register from stack.
+  __ pop(r4);
+
+  // Uncook return address and return.
+  __ pop(result_register());
+  __ SmiUntag(r4);
+  __ mov(ip, Operand(masm_->CodeObject()));
+  __ add(ip, ip, r4);
+  __ mtctr(ip);
+  __ bctr();
+}
+
+
+#undef __
+
+#define __ ACCESS_MASM(masm())
+
+FullCodeGenerator::NestedStatement* FullCodeGenerator::TryFinally::Exit(
+    int* stack_depth, int* context_length) {
+  // The macros used here must preserve the result register.
+
+  // Because the handler block contains the context of the finally
+  // code, we can restore it directly from there for the finally code
+  // rather than iteratively unwinding contexts via their previous
+  // links.
+  __ Drop(*stack_depth);  // Down to the handler block.
+  if (*context_length > 0) {
+    // Restore the context to its dedicated register and the stack.
+    __ LoadP(cp, MemOperand(sp, StackHandlerConstants::kContextOffset));
+    __ StoreP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  }
+  __ PopTryHandler();
+  __ b(finally_entry_, SetLK);
+
+  *stack_depth = 0;
+  *context_length = 0;
+  return previous_;
+}
+
+#undef __
+
+
+void BackEdgeTable::PatchAt(Code* unoptimized_code, Address pc,
+                            BackEdgeState target_state,
+                            Code* replacement_code) {
+  Address mov_address = Assembler::target_address_from_return_address(pc);
+  Address cmp_address = mov_address - 2 * Assembler::kInstrSize;
+  CodePatcher patcher(cmp_address, 1);
+
+  switch (target_state) {
+    case INTERRUPT: {
+      //  <decrement profiling counter>
+      //         cmpi    r6, 0
+      //         bge     <ok>            ;; not changed
+      //         mov     r12, <interrupt stub address>
+      //         mtlr    r12
+      //         blrl
+      //  <reset profiling counter>
+      //  ok-label
+      patcher.masm()->cmpi(r6, Operand::Zero());
+      break;
+    }
+    case ON_STACK_REPLACEMENT:
+    case OSR_AFTER_STACK_CHECK:
+      //  <decrement profiling counter>
+      //         crset
+      //         bge     <ok>            ;; not changed
+      //         mov     r12, <on-stack replacement address>
+      //         mtlr    r12
+      //         blrl
+      //  <reset profiling counter>
+      //  ok-label ----- pc_after points here
+
+      // Set the LT bit such that bge is a NOP
+      patcher.masm()->crset(Assembler::encode_crbit(cr7, CR_LT));
+      break;
+  }
+
+  // Replace the stack check address in the mov sequence with the
+  // entry address of the replacement code.
+  Assembler::set_target_address_at(mov_address, unoptimized_code,
+                                   replacement_code->entry());
+
+  unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
+      unoptimized_code, mov_address, replacement_code);
+}
+
+
+BackEdgeTable::BackEdgeState BackEdgeTable::GetBackEdgeState(
+    Isolate* isolate, Code* unoptimized_code, Address pc) {
+  Address mov_address = Assembler::target_address_from_return_address(pc);
+  Address cmp_address = mov_address - 2 * Assembler::kInstrSize;
+  Address interrupt_address =
+      Assembler::target_address_at(mov_address, unoptimized_code);
+
+  if (Assembler::IsCmpImmediate(Assembler::instr_at(cmp_address))) {
+    DCHECK(interrupt_address == isolate->builtins()->InterruptCheck()->entry());
+    return INTERRUPT;
+  }
+
+  DCHECK(Assembler::IsCrSet(Assembler::instr_at(cmp_address)));
+
+  if (interrupt_address == isolate->builtins()->OnStackReplacement()->entry()) {
+    return ON_STACK_REPLACEMENT;
+  }
+
+  DCHECK(interrupt_address ==
+         isolate->builtins()->OsrAfterStackCheck()->entry());
+  return OSR_AFTER_STACK_CHECK;
+}
+}
+}  // namespace v8::internal
+#endif  // V8_TARGET_ARCH_PPC
diff --git a/deps/v8/src/ppc/interface-descriptors-ppc.cc b/deps/v8/src/ppc/interface-descriptors-ppc.cc
new file mode 100644 (file)
index 0000000..693f341
--- /dev/null
@@ -0,0 +1,306 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_PPC
+
+#include "src/interface-descriptors.h"
+
+namespace v8 {
+namespace internal {
+
+const Register CallInterfaceDescriptor::ContextRegister() { return cp; }
+
+
+const Register LoadDescriptor::ReceiverRegister() { return r4; }
+const Register LoadDescriptor::NameRegister() { return r5; }
+
+
+const Register VectorLoadICTrampolineDescriptor::SlotRegister() { return r3; }
+
+
+const Register VectorLoadICDescriptor::VectorRegister() { return r6; }
+
+
+const Register StoreDescriptor::ReceiverRegister() { return r4; }
+const Register StoreDescriptor::NameRegister() { return r5; }
+const Register StoreDescriptor::ValueRegister() { return r3; }
+
+
+const Register StoreTransitionDescriptor::MapRegister() { return r6; }
+
+
+const Register ElementTransitionAndStoreDescriptor::MapRegister() { return r6; }
+
+
+const Register InstanceofDescriptor::left() { return r3; }
+const Register InstanceofDescriptor::right() { return r4; }
+
+
+const Register ArgumentsAccessReadDescriptor::index() { return r4; }
+const Register ArgumentsAccessReadDescriptor::parameter_count() { return r3; }
+
+
+const Register ApiGetterDescriptor::function_address() { return r5; }
+
+
+const Register MathPowTaggedDescriptor::exponent() { return r5; }
+
+
+const Register MathPowIntegerDescriptor::exponent() {
+  return MathPowTaggedDescriptor::exponent();
+}
+
+
+void FastNewClosureDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, r5};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void FastNewContextDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, r4};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ToNumberDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, r3};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void NumberToStringDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, r3};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void FastCloneShallowArrayDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, r6, r5, r4};
+  Representation representations[] = {
+      Representation::Tagged(), Representation::Tagged(), Representation::Smi(),
+      Representation::Tagged()};
+  data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void FastCloneShallowObjectDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, r6, r5, r4, r3};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void CreateAllocationSiteDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, r5, r6};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void StoreArrayLiteralElementDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, r6, r3};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void CallFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, r4};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void CallFunctionWithFeedbackDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, r4, r6};
+  Representation representations[] = {Representation::Tagged(),
+                                      Representation::Tagged(),
+                                      Representation::Smi()};
+  data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void CallConstructDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  // r3 : number of arguments
+  // r4 : the function to call
+  // r5 : feedback vector
+  // r6 : (only if r5 is not the megamorphic symbol) slot in feedback
+  //      vector (Smi)
+  // TODO(turbofan): So far we don't gather type feedback and hence skip the
+  // slot parameter, but ArrayConstructStub needs the vector to be undefined.
+  Register registers[] = {cp, r3, r4, r5};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void RegExpConstructResultDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, r5, r4, r3};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void TransitionElementsKindDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, r3, r4};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ArrayConstructorConstantArgCountDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  // register state
+  // cp -- context
+  // r3 -- number of arguments
+  // r4 -- function
+  // r5 -- allocation site with elements kind
+  Register registers[] = {cp, r4, r5};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ArrayConstructorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  // stack param count needs (constructor pointer, and single argument)
+  Register registers[] = {cp, r4, r5, r3};
+  Representation representations[] = {
+      Representation::Tagged(), Representation::Tagged(),
+      Representation::Tagged(), Representation::Integer32()};
+  data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void InternalArrayConstructorConstantArgCountDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  // register state
+  // cp -- context
+  // r3 -- number of arguments
+  // r4 -- constructor function
+  Register registers[] = {cp, r4};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void InternalArrayConstructorDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  // stack param count needs (constructor pointer, and single argument)
+  Register registers[] = {cp, r4, r3};
+  Representation representations[] = {Representation::Tagged(),
+                                      Representation::Tagged(),
+                                      Representation::Integer32()};
+  data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void CompareNilDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, r3};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void ToBooleanDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, r3};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void BinaryOpDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, r4, r3};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void BinaryOpWithAllocationSiteDescriptor::Initialize(
+    CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, r5, r4, r3};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void StringAddDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {cp, r4, r3};
+  data->Initialize(arraysize(registers), registers, NULL);
+}
+
+
+void KeyedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      cp,  // context
+      r5,  // key
+  };
+  Representation representations[] = {
+      Representation::Tagged(),  // context
+      Representation::Tagged(),  // key
+  };
+  data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void NamedDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      cp,  // context
+      r5,  // name
+  };
+  Representation representations[] = {
+      Representation::Tagged(),  // context
+      Representation::Tagged(),  // name
+  };
+  data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void CallHandlerDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      cp,  // context
+      r3,  // receiver
+  };
+  Representation representations[] = {
+      Representation::Tagged(),  // context
+      Representation::Tagged(),  // receiver
+  };
+  data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void ArgumentAdaptorDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      cp,  // context
+      r4,  // JSFunction
+      r3,  // actual number of arguments
+      r5,  // expected number of arguments
+  };
+  Representation representations[] = {
+      Representation::Tagged(),     // context
+      Representation::Tagged(),     // JSFunction
+      Representation::Integer32(),  // actual number of arguments
+      Representation::Integer32(),  // expected number of arguments
+  };
+  data->Initialize(arraysize(registers), registers, representations);
+}
+
+
+void ApiFunctionDescriptor::Initialize(CallInterfaceDescriptorData* data) {
+  Register registers[] = {
+      cp,  // context
+      r3,  // callee
+      r7,  // call_data
+      r5,  // holder
+      r4,  // api_function_address
+  };
+  Representation representations[] = {
+      Representation::Tagged(),    // context
+      Representation::Tagged(),    // callee
+      Representation::Tagged(),    // call_data
+      Representation::Tagged(),    // holder
+      Representation::External(),  // api_function_address
+  };
+  data->Initialize(arraysize(registers), registers, representations);
+}
+}
+}  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_PPC
diff --git a/deps/v8/src/ppc/lithium-codegen-ppc.cc b/deps/v8/src/ppc/lithium-codegen-ppc.cc
new file mode 100644 (file)
index 0000000..7b6052c
--- /dev/null
@@ -0,0 +1,6136 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/base/bits.h"
+#include "src/code-factory.h"
+#include "src/code-stubs.h"
+#include "src/hydrogen-osr.h"
+#include "src/ic/ic.h"
+#include "src/ic/stub-cache.h"
+#include "src/ppc/lithium-codegen-ppc.h"
+#include "src/ppc/lithium-gap-resolver-ppc.h"
+
+namespace v8 {
+namespace internal {
+
+
+class SafepointGenerator FINAL : public CallWrapper {
+ public:
+  SafepointGenerator(LCodeGen* codegen, LPointerMap* pointers,
+                     Safepoint::DeoptMode mode)
+      : codegen_(codegen), pointers_(pointers), deopt_mode_(mode) {}
+  virtual ~SafepointGenerator() {}
+
+  void BeforeCall(int call_size) const OVERRIDE {}
+
+  void AfterCall() const OVERRIDE {
+    codegen_->RecordSafepoint(pointers_, deopt_mode_);
+  }
+
+ private:
+  LCodeGen* codegen_;
+  LPointerMap* pointers_;
+  Safepoint::DeoptMode deopt_mode_;
+};
+
+
+#define __ masm()->
+
+bool LCodeGen::GenerateCode() {
+  LPhase phase("Z_Code generation", chunk());
+  DCHECK(is_unused());
+  status_ = GENERATING;
+
+  // Open a frame scope to indicate that there is a frame on the stack.  The
+  // NONE indicates that the scope shouldn't actually generate code to set up
+  // the frame (that is done in GeneratePrologue).
+  FrameScope frame_scope(masm_, StackFrame::NONE);
+
+  return GeneratePrologue() && GenerateBody() && GenerateDeferredCode() &&
+         GenerateJumpTable() && GenerateSafepointTable();
+}
+
+
+void LCodeGen::FinishCode(Handle<Code> code) {
+  DCHECK(is_done());
+  code->set_stack_slots(GetStackSlotCount());
+  code->set_safepoint_table_offset(safepoints_.GetCodeOffset());
+  if (code->is_optimized_code()) RegisterWeakObjectsInOptimizedCode(code);
+  PopulateDeoptimizationData(code);
+}
+
+
+void LCodeGen::SaveCallerDoubles() {
+  DCHECK(info()->saves_caller_doubles());
+  DCHECK(NeedsEagerFrame());
+  Comment(";;; Save clobbered callee double registers");
+  int count = 0;
+  BitVector* doubles = chunk()->allocated_double_registers();
+  BitVector::Iterator save_iterator(doubles);
+  while (!save_iterator.Done()) {
+    __ stfd(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
+            MemOperand(sp, count * kDoubleSize));
+    save_iterator.Advance();
+    count++;
+  }
+}
+
+
+void LCodeGen::RestoreCallerDoubles() {
+  DCHECK(info()->saves_caller_doubles());
+  DCHECK(NeedsEagerFrame());
+  Comment(";;; Restore clobbered callee double registers");
+  BitVector* doubles = chunk()->allocated_double_registers();
+  BitVector::Iterator save_iterator(doubles);
+  int count = 0;
+  while (!save_iterator.Done()) {
+    __ lfd(DoubleRegister::FromAllocationIndex(save_iterator.Current()),
+           MemOperand(sp, count * kDoubleSize));
+    save_iterator.Advance();
+    count++;
+  }
+}
+
+
+bool LCodeGen::GeneratePrologue() {
+  DCHECK(is_generating());
+
+  if (info()->IsOptimizing()) {
+    ProfileEntryHookStub::MaybeCallEntryHook(masm_);
+
+#ifdef DEBUG
+    if (strlen(FLAG_stop_at) > 0 &&
+        info_->function()->name()->IsUtf8EqualTo(CStrVector(FLAG_stop_at))) {
+      __ stop("stop_at");
+    }
+#endif
+
+    // r4: Callee's JS function.
+    // cp: Callee's context.
+    // pp: Callee's constant pool pointer (if FLAG_enable_ool_constant_pool)
+    // fp: Caller's frame pointer.
+    // lr: Caller's pc.
+    // ip: Our own function entry (required by the prologue)
+
+    // Sloppy mode functions and builtins need to replace the receiver with the
+    // global proxy when called as functions (without an explicit receiver
+    // object).
+    if (info_->this_has_uses() && info_->strict_mode() == SLOPPY &&
+        !info_->is_native()) {
+      Label ok;
+      int receiver_offset = info_->scope()->num_parameters() * kPointerSize;
+      __ LoadP(r5, MemOperand(sp, receiver_offset));
+      __ CompareRoot(r5, Heap::kUndefinedValueRootIndex);
+      __ bne(&ok);
+
+      __ LoadP(r5, GlobalObjectOperand());
+      __ LoadP(r5, FieldMemOperand(r5, GlobalObject::kGlobalProxyOffset));
+
+      __ StoreP(r5, MemOperand(sp, receiver_offset));
+
+      __ bind(&ok);
+    }
+  }
+
+  int prologue_offset = masm_->pc_offset();
+
+  if (prologue_offset) {
+    // Prologue logic requires it's starting address in ip and the
+    // corresponding offset from the function entry.
+    prologue_offset += Instruction::kInstrSize;
+    __ addi(ip, ip, Operand(prologue_offset));
+  }
+  info()->set_prologue_offset(prologue_offset);
+  if (NeedsEagerFrame()) {
+    if (info()->IsStub()) {
+      __ StubPrologue(prologue_offset);
+    } else {
+      __ Prologue(info()->IsCodePreAgingActive(), prologue_offset);
+    }
+    frame_is_built_ = true;
+    info_->AddNoFrameRange(0, masm_->pc_offset());
+  }
+
+  // Reserve space for the stack slots needed by the code.
+  int slots = GetStackSlotCount();
+  if (slots > 0) {
+    __ subi(sp, sp, Operand(slots * kPointerSize));
+    if (FLAG_debug_code) {
+      __ Push(r3, r4);
+      __ li(r0, Operand(slots));
+      __ mtctr(r0);
+      __ addi(r3, sp, Operand((slots + 2) * kPointerSize));
+      __ mov(r4, Operand(kSlotsZapValue));
+      Label loop;
+      __ bind(&loop);
+      __ StorePU(r4, MemOperand(r3, -kPointerSize));
+      __ bdnz(&loop);
+      __ Pop(r3, r4);
+    }
+  }
+
+  if (info()->saves_caller_doubles()) {
+    SaveCallerDoubles();
+  }
+
+  // Possibly allocate a local context.
+  int heap_slots = info()->num_heap_slots() - Context::MIN_CONTEXT_SLOTS;
+  if (heap_slots > 0) {
+    Comment(";;; Allocate local context");
+    bool need_write_barrier = true;
+    // Argument to NewContext is the function, which is in r4.
+    if (heap_slots <= FastNewContextStub::kMaximumSlots) {
+      FastNewContextStub stub(isolate(), heap_slots);
+      __ CallStub(&stub);
+      // Result of FastNewContextStub is always in new space.
+      need_write_barrier = false;
+    } else {
+      __ push(r4);
+      __ CallRuntime(Runtime::kNewFunctionContext, 1);
+    }
+    RecordSafepoint(Safepoint::kNoLazyDeopt);
+    // Context is returned in both r3 and cp.  It replaces the context
+    // passed to us.  It's saved in the stack and kept live in cp.
+    __ mr(cp, r3);
+    __ StoreP(r3, MemOperand(fp, StandardFrameConstants::kContextOffset));
+    // Copy any necessary parameters into the context.
+    int num_parameters = scope()->num_parameters();
+    for (int i = 0; i < num_parameters; i++) {
+      Variable* var = scope()->parameter(i);
+      if (var->IsContextSlot()) {
+        int parameter_offset = StandardFrameConstants::kCallerSPOffset +
+                               (num_parameters - 1 - i) * kPointerSize;
+        // Load parameter from stack.
+        __ LoadP(r3, MemOperand(fp, parameter_offset));
+        // Store it in the context.
+        MemOperand target = ContextOperand(cp, var->index());
+        __ StoreP(r3, target, r0);
+        // Update the write barrier. This clobbers r6 and r3.
+        if (need_write_barrier) {
+          __ RecordWriteContextSlot(cp, target.offset(), r3, r6,
+                                    GetLinkRegisterState(), kSaveFPRegs);
+        } else if (FLAG_debug_code) {
+          Label done;
+          __ JumpIfInNewSpace(cp, r3, &done);
+          __ Abort(kExpectedNewSpaceObject);
+          __ bind(&done);
+        }
+      }
+    }
+    Comment(";;; End allocate local context");
+  }
+
+  // Trace the call.
+  if (FLAG_trace && info()->IsOptimizing()) {
+    // We have not executed any compiled code yet, so cp still holds the
+    // incoming context.
+    __ CallRuntime(Runtime::kTraceEnter, 0);
+  }
+  return !is_aborted();
+}
+
+
+void LCodeGen::GenerateOsrPrologue() {
+  // Generate the OSR entry prologue at the first unknown OSR value, or if there
+  // are none, at the OSR entrypoint instruction.
+  if (osr_pc_offset_ >= 0) return;
+
+  osr_pc_offset_ = masm()->pc_offset();
+
+  // Adjust the frame size, subsuming the unoptimized frame into the
+  // optimized frame.
+  int slots = GetStackSlotCount() - graph()->osr()->UnoptimizedFrameSlots();
+  DCHECK(slots >= 0);
+  __ subi(sp, sp, Operand(slots * kPointerSize));
+}
+
+
+void LCodeGen::GenerateBodyInstructionPre(LInstruction* instr) {
+  if (instr->IsCall()) {
+    EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+  }
+  if (!instr->IsLazyBailout() && !instr->IsGap()) {
+    safepoints_.BumpLastLazySafepointIndex();
+  }
+}
+
+
+bool LCodeGen::GenerateDeferredCode() {
+  DCHECK(is_generating());
+  if (deferred_.length() > 0) {
+    for (int i = 0; !is_aborted() && i < deferred_.length(); i++) {
+      LDeferredCode* code = deferred_[i];
+
+      HValue* value =
+          instructions_->at(code->instruction_index())->hydrogen_value();
+      RecordAndWritePosition(
+          chunk()->graph()->SourcePositionToScriptPosition(value->position()));
+
+      Comment(
+          ";;; <@%d,#%d> "
+          "-------------------- Deferred %s --------------------",
+          code->instruction_index(), code->instr()->hydrogen_value()->id(),
+          code->instr()->Mnemonic());
+      __ bind(code->entry());
+      if (NeedsDeferredFrame()) {
+        Comment(";;; Build frame");
+        DCHECK(!frame_is_built_);
+        DCHECK(info()->IsStub());
+        frame_is_built_ = true;
+        __ LoadSmiLiteral(scratch0(), Smi::FromInt(StackFrame::STUB));
+        __ PushFixedFrame(scratch0());
+        __ addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+        Comment(";;; Deferred code");
+      }
+      code->Generate();
+      if (NeedsDeferredFrame()) {
+        Comment(";;; Destroy frame");
+        DCHECK(frame_is_built_);
+        __ PopFixedFrame(ip);
+        frame_is_built_ = false;
+      }
+      __ b(code->exit());
+    }
+  }
+
+  return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateJumpTable() {
+  // Check that the jump table is accessible from everywhere in the function
+  // code, i.e. that offsets to the table can be encoded in the 24bit signed
+  // immediate of a branch instruction.
+  // To simplify we consider the code size from the first instruction to the
+  // end of the jump table. We also don't consider the pc load delta.
+  // Each entry in the jump table generates one instruction and inlines one
+  // 32bit data after it.
+  if (!is_int24((masm()->pc_offset() / Assembler::kInstrSize) +
+                jump_table_.length() * 7)) {
+    Abort(kGeneratedCodeIsTooLarge);
+  }
+
+  if (jump_table_.length() > 0) {
+    Label needs_frame, call_deopt_entry;
+
+    Comment(";;; -------------------- Jump table --------------------");
+    Address base = jump_table_[0].address;
+
+    Register entry_offset = scratch0();
+
+    int length = jump_table_.length();
+    for (int i = 0; i < length; i++) {
+      Deoptimizer::JumpTableEntry* table_entry = &jump_table_[i];
+      __ bind(&table_entry->label);
+
+      DCHECK_EQ(jump_table_[0].bailout_type, table_entry->bailout_type);
+      Address entry = table_entry->address;
+      DeoptComment(table_entry->reason);
+
+      // Second-level deopt table entries are contiguous and small, so instead
+      // of loading the full, absolute address of each one, load an immediate
+      // offset which will be added to the base address later.
+      __ mov(entry_offset, Operand(entry - base));
+
+      if (table_entry->needs_frame) {
+        DCHECK(!info()->saves_caller_doubles());
+        if (needs_frame.is_bound()) {
+          __ b(&needs_frame);
+        } else {
+          __ bind(&needs_frame);
+          Comment(";;; call deopt with frame");
+          // This variant of deopt can only be used with stubs. Since we don't
+          // have a function pointer to install in the stack frame that we're
+          // building, install a special marker there instead.
+          DCHECK(info()->IsStub());
+          __ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::STUB));
+          __ PushFixedFrame(ip);
+          __ addi(fp, sp,
+                  Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+          __ bind(&call_deopt_entry);
+          // Add the base address to the offset previously loaded in
+          // entry_offset.
+          __ mov(ip, Operand(ExternalReference::ForDeoptEntry(base)));
+          __ add(ip, entry_offset, ip);
+          __ Call(ip);
+        }
+      } else {
+        // The last entry can fall through into `call_deopt_entry`, avoiding a
+        // branch.
+        bool need_branch = ((i + 1) != length) || call_deopt_entry.is_bound();
+
+        if (need_branch) __ b(&call_deopt_entry);
+      }
+    }
+
+    if (!call_deopt_entry.is_bound()) {
+      Comment(";;; call deopt");
+      __ bind(&call_deopt_entry);
+
+      if (info()->saves_caller_doubles()) {
+        DCHECK(info()->IsStub());
+        RestoreCallerDoubles();
+      }
+
+      // Add the base address to the offset previously loaded in entry_offset.
+      __ mov(ip, Operand(ExternalReference::ForDeoptEntry(base)));
+      __ add(ip, entry_offset, ip);
+      __ Call(ip);
+    }
+  }
+
+  // The deoptimization jump table is the last part of the instruction
+  // sequence. Mark the generated code as done unless we bailed out.
+  if (!is_aborted()) status_ = DONE;
+  return !is_aborted();
+}
+
+
+bool LCodeGen::GenerateSafepointTable() {
+  DCHECK(is_done());
+  safepoints_.Emit(masm(), GetStackSlotCount());
+  return !is_aborted();
+}
+
+
+Register LCodeGen::ToRegister(int index) const {
+  return Register::FromAllocationIndex(index);
+}
+
+
+DoubleRegister LCodeGen::ToDoubleRegister(int index) const {
+  return DoubleRegister::FromAllocationIndex(index);
+}
+
+
+Register LCodeGen::ToRegister(LOperand* op) const {
+  DCHECK(op->IsRegister());
+  return ToRegister(op->index());
+}
+
+
+Register LCodeGen::EmitLoadRegister(LOperand* op, Register scratch) {
+  if (op->IsRegister()) {
+    return ToRegister(op->index());
+  } else if (op->IsConstantOperand()) {
+    LConstantOperand* const_op = LConstantOperand::cast(op);
+    HConstant* constant = chunk_->LookupConstant(const_op);
+    Handle<Object> literal = constant->handle(isolate());
+    Representation r = chunk_->LookupLiteralRepresentation(const_op);
+    if (r.IsInteger32()) {
+      DCHECK(literal->IsNumber());
+      __ LoadIntLiteral(scratch, static_cast<int32_t>(literal->Number()));
+    } else if (r.IsDouble()) {
+      Abort(kEmitLoadRegisterUnsupportedDoubleImmediate);
+    } else {
+      DCHECK(r.IsSmiOrTagged());
+      __ Move(scratch, literal);
+    }
+    return scratch;
+  } else if (op->IsStackSlot()) {
+    __ LoadP(scratch, ToMemOperand(op));
+    return scratch;
+  }
+  UNREACHABLE();
+  return scratch;
+}
+
+
+void LCodeGen::EmitLoadIntegerConstant(LConstantOperand* const_op,
+                                       Register dst) {
+  DCHECK(IsInteger32(const_op));
+  HConstant* constant = chunk_->LookupConstant(const_op);
+  int32_t value = constant->Integer32Value();
+  if (IsSmi(const_op)) {
+    __ LoadSmiLiteral(dst, Smi::FromInt(value));
+  } else {
+    __ LoadIntLiteral(dst, value);
+  }
+}
+
+
+DoubleRegister LCodeGen::ToDoubleRegister(LOperand* op) const {
+  DCHECK(op->IsDoubleRegister());
+  return ToDoubleRegister(op->index());
+}
+
+
+Handle<Object> LCodeGen::ToHandle(LConstantOperand* op) const {
+  HConstant* constant = chunk_->LookupConstant(op);
+  DCHECK(chunk_->LookupLiteralRepresentation(op).IsSmiOrTagged());
+  return constant->handle(isolate());
+}
+
+
+bool LCodeGen::IsInteger32(LConstantOperand* op) const {
+  return chunk_->LookupLiteralRepresentation(op).IsSmiOrInteger32();
+}
+
+
+bool LCodeGen::IsSmi(LConstantOperand* op) const {
+  return chunk_->LookupLiteralRepresentation(op).IsSmi();
+}
+
+
+int32_t LCodeGen::ToInteger32(LConstantOperand* op) const {
+  return ToRepresentation(op, Representation::Integer32());
+}
+
+
+intptr_t LCodeGen::ToRepresentation(LConstantOperand* op,
+                                    const Representation& r) const {
+  HConstant* constant = chunk_->LookupConstant(op);
+  int32_t value = constant->Integer32Value();
+  if (r.IsInteger32()) return value;
+  DCHECK(r.IsSmiOrTagged());
+  return reinterpret_cast<intptr_t>(Smi::FromInt(value));
+}
+
+
+Smi* LCodeGen::ToSmi(LConstantOperand* op) const {
+  HConstant* constant = chunk_->LookupConstant(op);
+  return Smi::FromInt(constant->Integer32Value());
+}
+
+
+double LCodeGen::ToDouble(LConstantOperand* op) const {
+  HConstant* constant = chunk_->LookupConstant(op);
+  DCHECK(constant->HasDoubleValue());
+  return constant->DoubleValue();
+}
+
+
+Operand LCodeGen::ToOperand(LOperand* op) {
+  if (op->IsConstantOperand()) {
+    LConstantOperand* const_op = LConstantOperand::cast(op);
+    HConstant* constant = chunk()->LookupConstant(const_op);
+    Representation r = chunk_->LookupLiteralRepresentation(const_op);
+    if (r.IsSmi()) {
+      DCHECK(constant->HasSmiValue());
+      return Operand(Smi::FromInt(constant->Integer32Value()));
+    } else if (r.IsInteger32()) {
+      DCHECK(constant->HasInteger32Value());
+      return Operand(constant->Integer32Value());
+    } else if (r.IsDouble()) {
+      Abort(kToOperandUnsupportedDoubleImmediate);
+    }
+    DCHECK(r.IsTagged());
+    return Operand(constant->handle(isolate()));
+  } else if (op->IsRegister()) {
+    return Operand(ToRegister(op));
+  } else if (op->IsDoubleRegister()) {
+    Abort(kToOperandIsDoubleRegisterUnimplemented);
+    return Operand::Zero();
+  }
+  // Stack slots not implemented, use ToMemOperand instead.
+  UNREACHABLE();
+  return Operand::Zero();
+}
+
+
+static int ArgumentsOffsetWithoutFrame(int index) {
+  DCHECK(index < 0);
+  return -(index + 1) * kPointerSize;
+}
+
+
+MemOperand LCodeGen::ToMemOperand(LOperand* op) const {
+  DCHECK(!op->IsRegister());
+  DCHECK(!op->IsDoubleRegister());
+  DCHECK(op->IsStackSlot() || op->IsDoubleStackSlot());
+  if (NeedsEagerFrame()) {
+    return MemOperand(fp, StackSlotOffset(op->index()));
+  } else {
+    // Retrieve parameter without eager stack-frame relative to the
+    // stack-pointer.
+    return MemOperand(sp, ArgumentsOffsetWithoutFrame(op->index()));
+  }
+}
+
+
+MemOperand LCodeGen::ToHighMemOperand(LOperand* op) const {
+  DCHECK(op->IsDoubleStackSlot());
+  if (NeedsEagerFrame()) {
+    return MemOperand(fp, StackSlotOffset(op->index()) + kPointerSize);
+  } else {
+    // Retrieve parameter without eager stack-frame relative to the
+    // stack-pointer.
+    return MemOperand(sp,
+                      ArgumentsOffsetWithoutFrame(op->index()) + kPointerSize);
+  }
+}
+
+
+void LCodeGen::WriteTranslation(LEnvironment* environment,
+                                Translation* translation) {
+  if (environment == NULL) return;
+
+  // The translation includes one command per value in the environment.
+  int translation_size = environment->translation_size();
+  // The output frame height does not include the parameters.
+  int height = translation_size - environment->parameter_count();
+
+  WriteTranslation(environment->outer(), translation);
+  bool has_closure_id =
+      !info()->closure().is_null() &&
+      !info()->closure().is_identical_to(environment->closure());
+  int closure_id = has_closure_id
+                       ? DefineDeoptimizationLiteral(environment->closure())
+                       : Translation::kSelfLiteralId;
+
+  switch (environment->frame_type()) {
+    case JS_FUNCTION:
+      translation->BeginJSFrame(environment->ast_id(), closure_id, height);
+      break;
+    case JS_CONSTRUCT:
+      translation->BeginConstructStubFrame(closure_id, translation_size);
+      break;
+    case JS_GETTER:
+      DCHECK(translation_size == 1);
+      DCHECK(height == 0);
+      translation->BeginGetterStubFrame(closure_id);
+      break;
+    case JS_SETTER:
+      DCHECK(translation_size == 2);
+      DCHECK(height == 0);
+      translation->BeginSetterStubFrame(closure_id);
+      break;
+    case STUB:
+      translation->BeginCompiledStubFrame();
+      break;
+    case ARGUMENTS_ADAPTOR:
+      translation->BeginArgumentsAdaptorFrame(closure_id, translation_size);
+      break;
+  }
+
+  int object_index = 0;
+  int dematerialized_index = 0;
+  for (int i = 0; i < translation_size; ++i) {
+    LOperand* value = environment->values()->at(i);
+    AddToTranslation(
+        environment, translation, value, environment->HasTaggedValueAt(i),
+        environment->HasUint32ValueAt(i), &object_index, &dematerialized_index);
+  }
+}
+
+
+void LCodeGen::AddToTranslation(LEnvironment* environment,
+                                Translation* translation, LOperand* op,
+                                bool is_tagged, bool is_uint32,
+                                int* object_index_pointer,
+                                int* dematerialized_index_pointer) {
+  if (op == LEnvironment::materialization_marker()) {
+    int object_index = (*object_index_pointer)++;
+    if (environment->ObjectIsDuplicateAt(object_index)) {
+      int dupe_of = environment->ObjectDuplicateOfAt(object_index);
+      translation->DuplicateObject(dupe_of);
+      return;
+    }
+    int object_length = environment->ObjectLengthAt(object_index);
+    if (environment->ObjectIsArgumentsAt(object_index)) {
+      translation->BeginArgumentsObject(object_length);
+    } else {
+      translation->BeginCapturedObject(object_length);
+    }
+    int dematerialized_index = *dematerialized_index_pointer;
+    int env_offset = environment->translation_size() + dematerialized_index;
+    *dematerialized_index_pointer += object_length;
+    for (int i = 0; i < object_length; ++i) {
+      LOperand* value = environment->values()->at(env_offset + i);
+      AddToTranslation(environment, translation, value,
+                       environment->HasTaggedValueAt(env_offset + i),
+                       environment->HasUint32ValueAt(env_offset + i),
+                       object_index_pointer, dematerialized_index_pointer);
+    }
+    return;
+  }
+
+  if (op->IsStackSlot()) {
+    if (is_tagged) {
+      translation->StoreStackSlot(op->index());
+    } else if (is_uint32) {
+      translation->StoreUint32StackSlot(op->index());
+    } else {
+      translation->StoreInt32StackSlot(op->index());
+    }
+  } else if (op->IsDoubleStackSlot()) {
+    translation->StoreDoubleStackSlot(op->index());
+  } else if (op->IsRegister()) {
+    Register reg = ToRegister(op);
+    if (is_tagged) {
+      translation->StoreRegister(reg);
+    } else if (is_uint32) {
+      translation->StoreUint32Register(reg);
+    } else {
+      translation->StoreInt32Register(reg);
+    }
+  } else if (op->IsDoubleRegister()) {
+    DoubleRegister reg = ToDoubleRegister(op);
+    translation->StoreDoubleRegister(reg);
+  } else if (op->IsConstantOperand()) {
+    HConstant* constant = chunk()->LookupConstant(LConstantOperand::cast(op));
+    int src_index = DefineDeoptimizationLiteral(constant->handle(isolate()));
+    translation->StoreLiteral(src_index);
+  } else {
+    UNREACHABLE();
+  }
+}
+
+
+void LCodeGen::CallCode(Handle<Code> code, RelocInfo::Mode mode,
+                        LInstruction* instr) {
+  CallCodeGeneric(code, mode, instr, RECORD_SIMPLE_SAFEPOINT);
+}
+
+
+void LCodeGen::CallCodeGeneric(Handle<Code> code, RelocInfo::Mode mode,
+                               LInstruction* instr,
+                               SafepointMode safepoint_mode) {
+  DCHECK(instr != NULL);
+  __ Call(code, mode);
+  RecordSafepointWithLazyDeopt(instr, safepoint_mode);
+
+  // Signal that we don't inline smi code before these stubs in the
+  // optimizing code generator.
+  if (code->kind() == Code::BINARY_OP_IC || code->kind() == Code::COMPARE_IC) {
+    __ nop();
+  }
+}
+
+
+void LCodeGen::CallRuntime(const Runtime::Function* function, int num_arguments,
+                           LInstruction* instr, SaveFPRegsMode save_doubles) {
+  DCHECK(instr != NULL);
+
+  __ CallRuntime(function, num_arguments, save_doubles);
+
+  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+}
+
+
+void LCodeGen::LoadContextFromDeferred(LOperand* context) {
+  if (context->IsRegister()) {
+    __ Move(cp, ToRegister(context));
+  } else if (context->IsStackSlot()) {
+    __ LoadP(cp, ToMemOperand(context));
+  } else if (context->IsConstantOperand()) {
+    HConstant* constant =
+        chunk_->LookupConstant(LConstantOperand::cast(context));
+    __ Move(cp, Handle<Object>::cast(constant->handle(isolate())));
+  } else {
+    UNREACHABLE();
+  }
+}
+
+
+void LCodeGen::CallRuntimeFromDeferred(Runtime::FunctionId id, int argc,
+                                       LInstruction* instr, LOperand* context) {
+  LoadContextFromDeferred(context);
+  __ CallRuntimeSaveDoubles(id);
+  RecordSafepointWithRegisters(instr->pointer_map(), argc,
+                               Safepoint::kNoLazyDeopt);
+}
+
+
+void LCodeGen::RegisterEnvironmentForDeoptimization(LEnvironment* environment,
+                                                    Safepoint::DeoptMode mode) {
+  environment->set_has_been_used();
+  if (!environment->HasBeenRegistered()) {
+    // Physical stack frame layout:
+    // -x ............. -4  0 ..................................... y
+    // [incoming arguments] [spill slots] [pushed outgoing arguments]
+
+    // Layout of the environment:
+    // 0 ..................................................... size-1
+    // [parameters] [locals] [expression stack including arguments]
+
+    // Layout of the translation:
+    // 0 ........................................................ size - 1 + 4
+    // [expression stack including arguments] [locals] [4 words] [parameters]
+    // |>------------  translation_size ------------<|
+
+    int frame_count = 0;
+    int jsframe_count = 0;
+    for (LEnvironment* e = environment; e != NULL; e = e->outer()) {
+      ++frame_count;
+      if (e->frame_type() == JS_FUNCTION) {
+        ++jsframe_count;
+      }
+    }
+    Translation translation(&translations_, frame_count, jsframe_count, zone());
+    WriteTranslation(environment, &translation);
+    int deoptimization_index = deoptimizations_.length();
+    int pc_offset = masm()->pc_offset();
+    environment->Register(deoptimization_index, translation.index(),
+                          (mode == Safepoint::kLazyDeopt) ? pc_offset : -1);
+    deoptimizations_.Add(environment, zone());
+  }
+}
+
+
+void LCodeGen::DeoptimizeIf(Condition cond, LInstruction* instr,
+                            const char* detail,
+                            Deoptimizer::BailoutType bailout_type,
+                            CRegister cr) {
+  LEnvironment* environment = instr->environment();
+  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
+  DCHECK(environment->HasBeenRegistered());
+  int id = environment->deoptimization_index();
+  DCHECK(info()->IsOptimizing() || info()->IsStub());
+  Address entry =
+      Deoptimizer::GetDeoptimizationEntry(isolate(), id, bailout_type);
+  if (entry == NULL) {
+    Abort(kBailoutWasNotPrepared);
+    return;
+  }
+
+  if (FLAG_deopt_every_n_times != 0 && !info()->IsStub()) {
+    CRegister alt_cr = cr6;
+    Register scratch = scratch0();
+    ExternalReference count = ExternalReference::stress_deopt_count(isolate());
+    Label no_deopt;
+    DCHECK(!alt_cr.is(cr));
+    __ Push(r4, scratch);
+    __ mov(scratch, Operand(count));
+    __ lwz(r4, MemOperand(scratch));
+    __ subi(r4, r4, Operand(1));
+    __ cmpi(r4, Operand::Zero(), alt_cr);
+    __ bne(&no_deopt, alt_cr);
+    __ li(r4, Operand(FLAG_deopt_every_n_times));
+    __ stw(r4, MemOperand(scratch));
+    __ Pop(r4, scratch);
+
+    __ Call(entry, RelocInfo::RUNTIME_ENTRY);
+    __ bind(&no_deopt);
+    __ stw(r4, MemOperand(scratch));
+    __ Pop(r4, scratch);
+  }
+
+  if (info()->ShouldTrapOnDeopt()) {
+    __ stop("trap_on_deopt", cond, kDefaultStopCode, cr);
+  }
+
+  Deoptimizer::Reason reason(instr->hydrogen_value()->position().raw(),
+                             instr->Mnemonic(), detail);
+  DCHECK(info()->IsStub() || frame_is_built_);
+  // Go through jump table if we need to handle condition, build frame, or
+  // restore caller doubles.
+  if (cond == al && frame_is_built_ && !info()->saves_caller_doubles()) {
+    DeoptComment(reason);
+    __ Call(entry, RelocInfo::RUNTIME_ENTRY);
+  } else {
+    Deoptimizer::JumpTableEntry table_entry(entry, reason, bailout_type,
+                                            !frame_is_built_);
+    // We often have several deopts to the same entry, reuse the last
+    // jump entry if this is the case.
+    if (jump_table_.is_empty() ||
+        !table_entry.IsEquivalentTo(jump_table_.last())) {
+      jump_table_.Add(table_entry, zone());
+    }
+    __ b(cond, &jump_table_.last().label, cr);
+  }
+}
+
+
+void LCodeGen::DeoptimizeIf(Condition condition, LInstruction* instr,
+                            const char* detail, CRegister cr) {
+  Deoptimizer::BailoutType bailout_type =
+      info()->IsStub() ? Deoptimizer::LAZY : Deoptimizer::EAGER;
+  DeoptimizeIf(condition, instr, detail, bailout_type, cr);
+}
+
+
+void LCodeGen::PopulateDeoptimizationData(Handle<Code> code) {
+  int length = deoptimizations_.length();
+  if (length == 0) return;
+  Handle<DeoptimizationInputData> data =
+      DeoptimizationInputData::New(isolate(), length, TENURED);
+
+  Handle<ByteArray> translations =
+      translations_.CreateByteArray(isolate()->factory());
+  data->SetTranslationByteArray(*translations);
+  data->SetInlinedFunctionCount(Smi::FromInt(inlined_function_count_));
+  data->SetOptimizationId(Smi::FromInt(info_->optimization_id()));
+  if (info_->IsOptimizing()) {
+    // Reference to shared function info does not change between phases.
+    AllowDeferredHandleDereference allow_handle_dereference;
+    data->SetSharedFunctionInfo(*info_->shared_info());
+  } else {
+    data->SetSharedFunctionInfo(Smi::FromInt(0));
+  }
+
+  Handle<FixedArray> literals =
+      factory()->NewFixedArray(deoptimization_literals_.length(), TENURED);
+  {
+    AllowDeferredHandleDereference copy_handles;
+    for (int i = 0; i < deoptimization_literals_.length(); i++) {
+      literals->set(i, *deoptimization_literals_[i]);
+    }
+    data->SetLiteralArray(*literals);
+  }
+
+  data->SetOsrAstId(Smi::FromInt(info_->osr_ast_id().ToInt()));
+  data->SetOsrPcOffset(Smi::FromInt(osr_pc_offset_));
+
+  // Populate the deoptimization entries.
+  for (int i = 0; i < length; i++) {
+    LEnvironment* env = deoptimizations_[i];
+    data->SetAstId(i, env->ast_id());
+    data->SetTranslationIndex(i, Smi::FromInt(env->translation_index()));
+    data->SetArgumentsStackHeight(i,
+                                  Smi::FromInt(env->arguments_stack_height()));
+    data->SetPc(i, Smi::FromInt(env->pc_offset()));
+  }
+  code->set_deoptimization_data(*data);
+}
+
+
+int LCodeGen::DefineDeoptimizationLiteral(Handle<Object> literal) {
+  int result = deoptimization_literals_.length();
+  for (int i = 0; i < deoptimization_literals_.length(); ++i) {
+    if (deoptimization_literals_[i].is_identical_to(literal)) return i;
+  }
+  deoptimization_literals_.Add(literal, zone());
+  return result;
+}
+
+
+void LCodeGen::PopulateDeoptimizationLiteralsWithInlinedFunctions() {
+  DCHECK(deoptimization_literals_.length() == 0);
+
+  const ZoneList<Handle<JSFunction> >* inlined_closures =
+      chunk()->inlined_closures();
+
+  for (int i = 0, length = inlined_closures->length(); i < length; i++) {
+    DefineDeoptimizationLiteral(inlined_closures->at(i));
+  }
+
+  inlined_function_count_ = deoptimization_literals_.length();
+}
+
+
+void LCodeGen::RecordSafepointWithLazyDeopt(LInstruction* instr,
+                                            SafepointMode safepoint_mode) {
+  if (safepoint_mode == RECORD_SIMPLE_SAFEPOINT) {
+    RecordSafepoint(instr->pointer_map(), Safepoint::kLazyDeopt);
+  } else {
+    DCHECK(safepoint_mode == RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+    RecordSafepointWithRegisters(instr->pointer_map(), 0,
+                                 Safepoint::kLazyDeopt);
+  }
+}
+
+
+void LCodeGen::RecordSafepoint(LPointerMap* pointers, Safepoint::Kind kind,
+                               int arguments, Safepoint::DeoptMode deopt_mode) {
+  DCHECK(expected_safepoint_kind_ == kind);
+
+  const ZoneList<LOperand*>* operands = pointers->GetNormalizedOperands();
+  Safepoint safepoint =
+      safepoints_.DefineSafepoint(masm(), kind, arguments, deopt_mode);
+  for (int i = 0; i < operands->length(); i++) {
+    LOperand* pointer = operands->at(i);
+    if (pointer->IsStackSlot()) {
+      safepoint.DefinePointerSlot(pointer->index(), zone());
+    } else if (pointer->IsRegister() && (kind & Safepoint::kWithRegisters)) {
+      safepoint.DefinePointerRegister(ToRegister(pointer), zone());
+    }
+  }
+#if V8_OOL_CONSTANT_POOL
+  if (kind & Safepoint::kWithRegisters) {
+    // Register always contains a pointer to the constant pool.
+    safepoint.DefinePointerRegister(kConstantPoolRegister, zone());
+  }
+#endif
+}
+
+
+void LCodeGen::RecordSafepoint(LPointerMap* pointers,
+                               Safepoint::DeoptMode deopt_mode) {
+  RecordSafepoint(pointers, Safepoint::kSimple, 0, deopt_mode);
+}
+
+
+void LCodeGen::RecordSafepoint(Safepoint::DeoptMode deopt_mode) {
+  LPointerMap empty_pointers(zone());
+  RecordSafepoint(&empty_pointers, deopt_mode);
+}
+
+
+void LCodeGen::RecordSafepointWithRegisters(LPointerMap* pointers,
+                                            int arguments,
+                                            Safepoint::DeoptMode deopt_mode) {
+  RecordSafepoint(pointers, Safepoint::kWithRegisters, arguments, deopt_mode);
+}
+
+
+void LCodeGen::RecordAndWritePosition(int position) {
+  if (position == RelocInfo::kNoPosition) return;
+  masm()->positions_recorder()->RecordPosition(position);
+  masm()->positions_recorder()->WriteRecordedPositions();
+}
+
+
+static const char* LabelType(LLabel* label) {
+  if (label->is_loop_header()) return " (loop header)";
+  if (label->is_osr_entry()) return " (OSR entry)";
+  return "";
+}
+
+
+void LCodeGen::DoLabel(LLabel* label) {
+  Comment(";;; <@%d,#%d> -------------------- B%d%s --------------------",
+          current_instruction_, label->hydrogen_value()->id(),
+          label->block_id(), LabelType(label));
+  __ bind(label->label());
+  current_block_ = label->block_id();
+  DoGap(label);
+}
+
+
+void LCodeGen::DoParallelMove(LParallelMove* move) { resolver_.Resolve(move); }
+
+
+void LCodeGen::DoGap(LGap* gap) {
+  for (int i = LGap::FIRST_INNER_POSITION; i <= LGap::LAST_INNER_POSITION;
+       i++) {
+    LGap::InnerPosition inner_pos = static_cast<LGap::InnerPosition>(i);
+    LParallelMove* move = gap->GetParallelMove(inner_pos);
+    if (move != NULL) DoParallelMove(move);
+  }
+}
+
+
+void LCodeGen::DoInstructionGap(LInstructionGap* instr) { DoGap(instr); }
+
+
+void LCodeGen::DoParameter(LParameter* instr) {
+  // Nothing to do.
+}
+
+
+void LCodeGen::DoCallStub(LCallStub* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->result()).is(r3));
+  switch (instr->hydrogen()->major_key()) {
+    case CodeStub::RegExpExec: {
+      RegExpExecStub stub(isolate());
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    case CodeStub::SubString: {
+      SubStringStub stub(isolate());
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    case CodeStub::StringCompare: {
+      StringCompareStub stub(isolate());
+      CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+      break;
+    }
+    default:
+      UNREACHABLE();
+  }
+}
+
+
+void LCodeGen::DoUnknownOSRValue(LUnknownOSRValue* instr) {
+  GenerateOsrPrologue();
+}
+
+
+void LCodeGen::DoModByPowerOf2I(LModByPowerOf2I* instr) {
+  Register dividend = ToRegister(instr->dividend());
+  int32_t divisor = instr->divisor();
+  DCHECK(dividend.is(ToRegister(instr->result())));
+
+  // Theoretically, a variation of the branch-free code for integer division by
+  // a power of 2 (calculating the remainder via an additional multiplication
+  // (which gets simplified to an 'and') and subtraction) should be faster, and
+  // this is exactly what GCC and clang emit. Nevertheless, benchmarks seem to
+  // indicate that positive dividends are heavily favored, so the branching
+  // version performs better.
+  HMod* hmod = instr->hydrogen();
+  int32_t shift = WhichPowerOf2Abs(divisor);
+  Label dividend_is_not_negative, done;
+  if (hmod->CheckFlag(HValue::kLeftCanBeNegative)) {
+    __ cmpwi(dividend, Operand::Zero());
+    __ bge(&dividend_is_not_negative);
+    if (shift) {
+      // Note that this is correct even for kMinInt operands.
+      __ neg(dividend, dividend);
+      __ ExtractBitRange(dividend, dividend, shift - 1, 0);
+      __ neg(dividend, dividend, LeaveOE, SetRC);
+      if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+        DeoptimizeIf(eq, instr, "minus zero", cr0);
+      }
+    } else if (!hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      __ li(dividend, Operand::Zero());
+    } else {
+      DeoptimizeIf(al, instr, "minus zero");
+    }
+    __ b(&done);
+  }
+
+  __ bind(&dividend_is_not_negative);
+  if (shift) {
+    __ ExtractBitRange(dividend, dividend, shift - 1, 0);
+  } else {
+    __ li(dividend, Operand::Zero());
+  }
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoModByConstI(LModByConstI* instr) {
+  Register dividend = ToRegister(instr->dividend());
+  int32_t divisor = instr->divisor();
+  Register result = ToRegister(instr->result());
+  DCHECK(!dividend.is(result));
+
+  if (divisor == 0) {
+    DeoptimizeIf(al, instr, "division by zero");
+    return;
+  }
+
+  __ TruncatingDiv(result, dividend, Abs(divisor));
+  __ mov(ip, Operand(Abs(divisor)));
+  __ mullw(result, result, ip);
+  __ sub(result, dividend, result, LeaveOE, SetRC);
+
+  // Check for negative zero.
+  HMod* hmod = instr->hydrogen();
+  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    Label remainder_not_zero;
+    __ bne(&remainder_not_zero, cr0);
+    __ cmpwi(dividend, Operand::Zero());
+    DeoptimizeIf(lt, instr, "minus zero");
+    __ bind(&remainder_not_zero);
+  }
+}
+
+
+void LCodeGen::DoModI(LModI* instr) {
+  HMod* hmod = instr->hydrogen();
+  Register left_reg = ToRegister(instr->left());
+  Register right_reg = ToRegister(instr->right());
+  Register result_reg = ToRegister(instr->result());
+  Register scratch = scratch0();
+  Label done;
+
+  if (hmod->CheckFlag(HValue::kCanOverflow)) {
+    __ li(r0, Operand::Zero());  // clear xer
+    __ mtxer(r0);
+  }
+
+  __ divw(scratch, left_reg, right_reg, SetOE, SetRC);
+
+  // Check for x % 0.
+  if (hmod->CheckFlag(HValue::kCanBeDivByZero)) {
+    __ cmpwi(right_reg, Operand::Zero());
+    DeoptimizeIf(eq, instr, "division by zero");
+  }
+
+  // Check for kMinInt % -1, divw will return undefined, which is not what we
+  // want. We have to deopt if we care about -0, because we can't return that.
+  if (hmod->CheckFlag(HValue::kCanOverflow)) {
+    Label no_overflow_possible;
+    if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      DeoptimizeIf(overflow, instr, "minus zero", cr0);
+    } else {
+      __ bnooverflow(&no_overflow_possible, cr0);
+      __ li(result_reg, Operand::Zero());
+      __ b(&done);
+    }
+    __ bind(&no_overflow_possible);
+  }
+
+  __ mullw(scratch, right_reg, scratch);
+  __ sub(result_reg, left_reg, scratch, LeaveOE, SetRC);
+
+  // If we care about -0, test if the dividend is <0 and the result is 0.
+  if (hmod->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    __ bne(&done, cr0);
+    __ cmpwi(left_reg, Operand::Zero());
+    DeoptimizeIf(lt, instr, "minus zero");
+  }
+
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoDivByPowerOf2I(LDivByPowerOf2I* instr) {
+  Register dividend = ToRegister(instr->dividend());
+  int32_t divisor = instr->divisor();
+  Register result = ToRegister(instr->result());
+  DCHECK(divisor == kMinInt || base::bits::IsPowerOfTwo32(Abs(divisor)));
+  DCHECK(!result.is(dividend));
+
+  // Check for (0 / -x) that will produce negative zero.
+  HDiv* hdiv = instr->hydrogen();
+  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+    __ cmpwi(dividend, Operand::Zero());
+    DeoptimizeIf(eq, instr, "minus zero");
+  }
+  // Check for (kMinInt / -1).
+  if (hdiv->CheckFlag(HValue::kCanOverflow) && divisor == -1) {
+    __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
+    __ cmpw(dividend, r0);
+    DeoptimizeIf(eq, instr, "overflow");
+  }
+
+  int32_t shift = WhichPowerOf2Abs(divisor);
+
+  // Deoptimize if remainder will not be 0.
+  if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) && shift) {
+    __ TestBitRange(dividend, shift - 1, 0, r0);
+    DeoptimizeIf(ne, instr, "lost precision", cr0);
+  }
+
+  if (divisor == -1) {  // Nice shortcut, not needed for correctness.
+    __ neg(result, dividend);
+    return;
+  }
+  if (shift == 0) {
+    __ mr(result, dividend);
+  } else {
+    if (shift == 1) {
+      __ srwi(result, dividend, Operand(31));
+    } else {
+      __ srawi(result, dividend, 31);
+      __ srwi(result, result, Operand(32 - shift));
+    }
+    __ add(result, dividend, result);
+    __ srawi(result, result, shift);
+  }
+  if (divisor < 0) __ neg(result, result);
+}
+
+
+void LCodeGen::DoDivByConstI(LDivByConstI* instr) {
+  Register dividend = ToRegister(instr->dividend());
+  int32_t divisor = instr->divisor();
+  Register result = ToRegister(instr->result());
+  DCHECK(!dividend.is(result));
+
+  if (divisor == 0) {
+    DeoptimizeIf(al, instr, "division by zero");
+    return;
+  }
+
+  // Check for (0 / -x) that will produce negative zero.
+  HDiv* hdiv = instr->hydrogen();
+  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+    __ cmpwi(dividend, Operand::Zero());
+    DeoptimizeIf(eq, instr, "minus zero");
+  }
+
+  __ TruncatingDiv(result, dividend, Abs(divisor));
+  if (divisor < 0) __ neg(result, result);
+
+  if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+    Register scratch = scratch0();
+    __ mov(ip, Operand(divisor));
+    __ mullw(scratch, result, ip);
+    __ cmpw(scratch, dividend);
+    DeoptimizeIf(ne, instr, "lost precision");
+  }
+}
+
+
+// TODO(svenpanne) Refactor this to avoid code duplication with DoFlooringDivI.
+void LCodeGen::DoDivI(LDivI* instr) {
+  HBinaryOperation* hdiv = instr->hydrogen();
+  const Register dividend = ToRegister(instr->dividend());
+  const Register divisor = ToRegister(instr->divisor());
+  Register result = ToRegister(instr->result());
+
+  DCHECK(!dividend.is(result));
+  DCHECK(!divisor.is(result));
+
+  if (hdiv->CheckFlag(HValue::kCanOverflow)) {
+    __ li(r0, Operand::Zero());  // clear xer
+    __ mtxer(r0);
+  }
+
+  __ divw(result, dividend, divisor, SetOE, SetRC);
+
+  // Check for x / 0.
+  if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
+    __ cmpwi(divisor, Operand::Zero());
+    DeoptimizeIf(eq, instr, "division by zero");
+  }
+
+  // Check for (0 / -x) that will produce negative zero.
+  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    Label dividend_not_zero;
+    __ cmpwi(dividend, Operand::Zero());
+    __ bne(&dividend_not_zero);
+    __ cmpwi(divisor, Operand::Zero());
+    DeoptimizeIf(lt, instr, "minus zero");
+    __ bind(&dividend_not_zero);
+  }
+
+  // Check for (kMinInt / -1).
+  if (hdiv->CheckFlag(HValue::kCanOverflow)) {
+    Label no_overflow_possible;
+    if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
+      DeoptimizeIf(overflow, instr, "overflow", cr0);
+    } else {
+      // When truncating, we want kMinInt / -1 = kMinInt.
+      __ bnooverflow(&no_overflow_possible, cr0);
+      __ mr(result, dividend);
+    }
+    __ bind(&no_overflow_possible);
+  }
+
+  if (!hdiv->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+    // Deoptimize if remainder is not 0.
+    Register scratch = scratch0();
+    __ mullw(scratch, divisor, result);
+    __ cmpw(dividend, scratch);
+    DeoptimizeIf(ne, instr, "lost precision");
+  }
+}
+
+
+void LCodeGen::DoFlooringDivByPowerOf2I(LFlooringDivByPowerOf2I* instr) {
+  HBinaryOperation* hdiv = instr->hydrogen();
+  Register dividend = ToRegister(instr->dividend());
+  Register result = ToRegister(instr->result());
+  int32_t divisor = instr->divisor();
+
+  // If the divisor is positive, things are easy: There can be no deopts and we
+  // can simply do an arithmetic right shift.
+  int32_t shift = WhichPowerOf2Abs(divisor);
+  if (divisor > 0) {
+    if (shift || !result.is(dividend)) {
+      __ srawi(result, dividend, shift);
+    }
+    return;
+  }
+
+  // If the divisor is negative, we have to negate and handle edge cases.
+  OEBit oe = LeaveOE;
+#if V8_TARGET_ARCH_PPC64
+  if (divisor == -1 && hdiv->CheckFlag(HValue::kLeftCanBeMinInt)) {
+    __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
+    __ cmpw(dividend, r0);
+    DeoptimizeIf(eq, instr, "overflow");
+  }
+#else
+  if (hdiv->CheckFlag(HValue::kLeftCanBeMinInt)) {
+    __ li(r0, Operand::Zero());  // clear xer
+    __ mtxer(r0);
+    oe = SetOE;
+  }
+#endif
+
+  __ neg(result, dividend, oe, SetRC);
+  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    DeoptimizeIf(eq, instr, "minus zero", cr0);
+  }
+
+// If the negation could not overflow, simply shifting is OK.
+#if !V8_TARGET_ARCH_PPC64
+  if (!instr->hydrogen()->CheckFlag(HValue::kLeftCanBeMinInt)) {
+#endif
+    if (shift) {
+      __ ShiftRightArithImm(result, result, shift);
+    }
+    return;
+#if !V8_TARGET_ARCH_PPC64
+  }
+
+  // Dividing by -1 is basically negation, unless we overflow.
+  if (divisor == -1) {
+    DeoptimizeIf(overflow, instr, "overflow", cr0);
+    return;
+  }
+
+  Label overflow, done;
+  __ boverflow(&overflow, cr0);
+  __ srawi(result, result, shift);
+  __ b(&done);
+  __ bind(&overflow);
+  __ mov(result, Operand(kMinInt / divisor));
+  __ bind(&done);
+#endif
+}
+
+
+void LCodeGen::DoFlooringDivByConstI(LFlooringDivByConstI* instr) {
+  Register dividend = ToRegister(instr->dividend());
+  int32_t divisor = instr->divisor();
+  Register result = ToRegister(instr->result());
+  DCHECK(!dividend.is(result));
+
+  if (divisor == 0) {
+    DeoptimizeIf(al, instr, "division by zero");
+    return;
+  }
+
+  // Check for (0 / -x) that will produce negative zero.
+  HMathFloorOfDiv* hdiv = instr->hydrogen();
+  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) {
+    __ cmpwi(dividend, Operand::Zero());
+    DeoptimizeIf(eq, instr, "minus zero");
+  }
+
+  // Easy case: We need no dynamic check for the dividend and the flooring
+  // division is the same as the truncating division.
+  if ((divisor > 0 && !hdiv->CheckFlag(HValue::kLeftCanBeNegative)) ||
+      (divisor < 0 && !hdiv->CheckFlag(HValue::kLeftCanBePositive))) {
+    __ TruncatingDiv(result, dividend, Abs(divisor));
+    if (divisor < 0) __ neg(result, result);
+    return;
+  }
+
+  // In the general case we may need to adjust before and after the truncating
+  // division to get a flooring division.
+  Register temp = ToRegister(instr->temp());
+  DCHECK(!temp.is(dividend) && !temp.is(result));
+  Label needs_adjustment, done;
+  __ cmpwi(dividend, Operand::Zero());
+  __ b(divisor > 0 ? lt : gt, &needs_adjustment);
+  __ TruncatingDiv(result, dividend, Abs(divisor));
+  if (divisor < 0) __ neg(result, result);
+  __ b(&done);
+  __ bind(&needs_adjustment);
+  __ addi(temp, dividend, Operand(divisor > 0 ? 1 : -1));
+  __ TruncatingDiv(result, temp, Abs(divisor));
+  if (divisor < 0) __ neg(result, result);
+  __ subi(result, result, Operand(1));
+  __ bind(&done);
+}
+
+
+// TODO(svenpanne) Refactor this to avoid code duplication with DoDivI.
+void LCodeGen::DoFlooringDivI(LFlooringDivI* instr) {
+  HBinaryOperation* hdiv = instr->hydrogen();
+  const Register dividend = ToRegister(instr->dividend());
+  const Register divisor = ToRegister(instr->divisor());
+  Register result = ToRegister(instr->result());
+
+  DCHECK(!dividend.is(result));
+  DCHECK(!divisor.is(result));
+
+  if (hdiv->CheckFlag(HValue::kCanOverflow)) {
+    __ li(r0, Operand::Zero());  // clear xer
+    __ mtxer(r0);
+  }
+
+  __ divw(result, dividend, divisor, SetOE, SetRC);
+
+  // Check for x / 0.
+  if (hdiv->CheckFlag(HValue::kCanBeDivByZero)) {
+    __ cmpwi(divisor, Operand::Zero());
+    DeoptimizeIf(eq, instr, "division by zero");
+  }
+
+  // Check for (0 / -x) that will produce negative zero.
+  if (hdiv->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    Label dividend_not_zero;
+    __ cmpwi(dividend, Operand::Zero());
+    __ bne(&dividend_not_zero);
+    __ cmpwi(divisor, Operand::Zero());
+    DeoptimizeIf(lt, instr, "minus zero");
+    __ bind(&dividend_not_zero);
+  }
+
+  // Check for (kMinInt / -1).
+  if (hdiv->CheckFlag(HValue::kCanOverflow)) {
+    Label no_overflow_possible;
+    if (!hdiv->CheckFlag(HValue::kAllUsesTruncatingToInt32)) {
+      DeoptimizeIf(overflow, instr, "overflow", cr0);
+    } else {
+      // When truncating, we want kMinInt / -1 = kMinInt.
+      __ bnooverflow(&no_overflow_possible, cr0);
+      __ mr(result, dividend);
+    }
+    __ bind(&no_overflow_possible);
+  }
+
+  Label done;
+  Register scratch = scratch0();
+// If both operands have the same sign then we are done.
+#if V8_TARGET_ARCH_PPC64
+  __ xor_(scratch, dividend, divisor);
+  __ cmpwi(scratch, Operand::Zero());
+  __ bge(&done);
+#else
+  __ xor_(scratch, dividend, divisor, SetRC);
+  __ bge(&done, cr0);
+#endif
+
+  // If there is no remainder then we are done.
+  __ mullw(scratch, divisor, result);
+  __ cmpw(dividend, scratch);
+  __ beq(&done);
+
+  // We performed a truncating division. Correct the result.
+  __ subi(result, result, Operand(1));
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoMultiplyAddD(LMultiplyAddD* instr) {
+  DoubleRegister addend = ToDoubleRegister(instr->addend());
+  DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
+  DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
+  DoubleRegister result = ToDoubleRegister(instr->result());
+
+  __ fmadd(result, multiplier, multiplicand, addend);
+}
+
+
+void LCodeGen::DoMultiplySubD(LMultiplySubD* instr) {
+  DoubleRegister minuend = ToDoubleRegister(instr->minuend());
+  DoubleRegister multiplier = ToDoubleRegister(instr->multiplier());
+  DoubleRegister multiplicand = ToDoubleRegister(instr->multiplicand());
+  DoubleRegister result = ToDoubleRegister(instr->result());
+
+  __ fmsub(result, multiplier, multiplicand, minuend);
+}
+
+
+void LCodeGen::DoMulI(LMulI* instr) {
+  Register scratch = scratch0();
+  Register result = ToRegister(instr->result());
+  // Note that result may alias left.
+  Register left = ToRegister(instr->left());
+  LOperand* right_op = instr->right();
+
+  bool bailout_on_minus_zero =
+      instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero);
+  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+
+  if (right_op->IsConstantOperand()) {
+    int32_t constant = ToInteger32(LConstantOperand::cast(right_op));
+
+    if (bailout_on_minus_zero && (constant < 0)) {
+      // The case of a null constant will be handled separately.
+      // If constant is negative and left is null, the result should be -0.
+      __ cmpi(left, Operand::Zero());
+      DeoptimizeIf(eq, instr, "minus zero");
+    }
+
+    switch (constant) {
+      case -1:
+        if (can_overflow) {
+#if V8_TARGET_ARCH_PPC64
+          if (instr->hydrogen()->representation().IsSmi()) {
+#endif
+            __ li(r0, Operand::Zero());  // clear xer
+            __ mtxer(r0);
+            __ neg(result, left, SetOE, SetRC);
+            DeoptimizeIf(overflow, instr, "overflow", cr0);
+#if V8_TARGET_ARCH_PPC64
+          } else {
+            __ neg(result, left);
+            __ TestIfInt32(result, scratch, r0);
+            DeoptimizeIf(ne, instr, "overflow");
+          }
+#endif
+        } else {
+          __ neg(result, left);
+        }
+        break;
+      case 0:
+        if (bailout_on_minus_zero) {
+// If left is strictly negative and the constant is null, the
+// result is -0. Deoptimize if required, otherwise return 0.
+#if V8_TARGET_ARCH_PPC64
+          if (instr->hydrogen()->representation().IsSmi()) {
+#endif
+            __ cmpi(left, Operand::Zero());
+#if V8_TARGET_ARCH_PPC64
+          } else {
+            __ cmpwi(left, Operand::Zero());
+          }
+#endif
+          DeoptimizeIf(lt, instr, "minus zero");
+        }
+        __ li(result, Operand::Zero());
+        break;
+      case 1:
+        __ Move(result, left);
+        break;
+      default:
+        // Multiplying by powers of two and powers of two plus or minus
+        // one can be done faster with shifted operands.
+        // For other constants we emit standard code.
+        int32_t mask = constant >> 31;
+        uint32_t constant_abs = (constant + mask) ^ mask;
+
+        if (base::bits::IsPowerOfTwo32(constant_abs)) {
+          int32_t shift = WhichPowerOf2(constant_abs);
+          __ ShiftLeftImm(result, left, Operand(shift));
+          // Correct the sign of the result if the constant is negative.
+          if (constant < 0) __ neg(result, result);
+        } else if (base::bits::IsPowerOfTwo32(constant_abs - 1)) {
+          int32_t shift = WhichPowerOf2(constant_abs - 1);
+          __ ShiftLeftImm(scratch, left, Operand(shift));
+          __ add(result, scratch, left);
+          // Correct the sign of the result if the constant is negative.
+          if (constant < 0) __ neg(result, result);
+        } else if (base::bits::IsPowerOfTwo32(constant_abs + 1)) {
+          int32_t shift = WhichPowerOf2(constant_abs + 1);
+          __ ShiftLeftImm(scratch, left, Operand(shift));
+          __ sub(result, scratch, left);
+          // Correct the sign of the result if the constant is negative.
+          if (constant < 0) __ neg(result, result);
+        } else {
+          // Generate standard code.
+          __ mov(ip, Operand(constant));
+          __ Mul(result, left, ip);
+        }
+    }
+
+  } else {
+    DCHECK(right_op->IsRegister());
+    Register right = ToRegister(right_op);
+
+    if (can_overflow) {
+#if V8_TARGET_ARCH_PPC64
+      // result = left * right.
+      if (instr->hydrogen()->representation().IsSmi()) {
+        __ SmiUntag(result, left);
+        __ SmiUntag(scratch, right);
+        __ Mul(result, result, scratch);
+      } else {
+        __ Mul(result, left, right);
+      }
+      __ TestIfInt32(result, scratch, r0);
+      DeoptimizeIf(ne, instr, "overflow");
+      if (instr->hydrogen()->representation().IsSmi()) {
+        __ SmiTag(result);
+      }
+#else
+      // scratch:result = left * right.
+      if (instr->hydrogen()->representation().IsSmi()) {
+        __ SmiUntag(result, left);
+        __ mulhw(scratch, result, right);
+        __ mullw(result, result, right);
+      } else {
+        __ mulhw(scratch, left, right);
+        __ mullw(result, left, right);
+      }
+      __ TestIfInt32(scratch, result, r0);
+      DeoptimizeIf(ne, instr, "overflow");
+#endif
+    } else {
+      if (instr->hydrogen()->representation().IsSmi()) {
+        __ SmiUntag(result, left);
+        __ Mul(result, result, right);
+      } else {
+        __ Mul(result, left, right);
+      }
+    }
+
+    if (bailout_on_minus_zero) {
+      Label done;
+#if V8_TARGET_ARCH_PPC64
+      if (instr->hydrogen()->representation().IsSmi()) {
+#endif
+        __ xor_(r0, left, right, SetRC);
+        __ bge(&done, cr0);
+#if V8_TARGET_ARCH_PPC64
+      } else {
+        __ xor_(r0, left, right);
+        __ cmpwi(r0, Operand::Zero());
+        __ bge(&done);
+      }
+#endif
+      // Bail out if the result is minus zero.
+      __ cmpi(result, Operand::Zero());
+      DeoptimizeIf(eq, instr, "minus zero");
+      __ bind(&done);
+    }
+  }
+}
+
+
+void LCodeGen::DoBitI(LBitI* instr) {
+  LOperand* left_op = instr->left();
+  LOperand* right_op = instr->right();
+  DCHECK(left_op->IsRegister());
+  Register left = ToRegister(left_op);
+  Register result = ToRegister(instr->result());
+  Operand right(no_reg);
+
+  if (right_op->IsStackSlot()) {
+    right = Operand(EmitLoadRegister(right_op, ip));
+  } else {
+    DCHECK(right_op->IsRegister() || right_op->IsConstantOperand());
+    right = ToOperand(right_op);
+
+    if (right_op->IsConstantOperand() && is_uint16(right.immediate())) {
+      switch (instr->op()) {
+        case Token::BIT_AND:
+          __ andi(result, left, right);
+          break;
+        case Token::BIT_OR:
+          __ ori(result, left, right);
+          break;
+        case Token::BIT_XOR:
+          __ xori(result, left, right);
+          break;
+        default:
+          UNREACHABLE();
+          break;
+      }
+      return;
+    }
+  }
+
+  switch (instr->op()) {
+    case Token::BIT_AND:
+      __ And(result, left, right);
+      break;
+    case Token::BIT_OR:
+      __ Or(result, left, right);
+      break;
+    case Token::BIT_XOR:
+      if (right_op->IsConstantOperand() && right.immediate() == int32_t(~0)) {
+        __ notx(result, left);
+      } else {
+        __ Xor(result, left, right);
+      }
+      break;
+    default:
+      UNREACHABLE();
+      break;
+  }
+}
+
+
+void LCodeGen::DoShiftI(LShiftI* instr) {
+  // Both 'left' and 'right' are "used at start" (see LCodeGen::DoShift), so
+  // result may alias either of them.
+  LOperand* right_op = instr->right();
+  Register left = ToRegister(instr->left());
+  Register result = ToRegister(instr->result());
+  Register scratch = scratch0();
+  if (right_op->IsRegister()) {
+    // Mask the right_op operand.
+    __ andi(scratch, ToRegister(right_op), Operand(0x1F));
+    switch (instr->op()) {
+      case Token::ROR:
+        // rotate_right(a, b) == rotate_left(a, 32 - b)
+        __ subfic(scratch, scratch, Operand(32));
+        __ rotlw(result, left, scratch);
+        break;
+      case Token::SAR:
+        __ sraw(result, left, scratch);
+        break;
+      case Token::SHR:
+        if (instr->can_deopt()) {
+          __ srw(result, left, scratch, SetRC);
+#if V8_TARGET_ARCH_PPC64
+          __ extsw(result, result, SetRC);
+#endif
+          DeoptimizeIf(lt, instr, "negative value", cr0);
+        } else {
+          __ srw(result, left, scratch);
+        }
+        break;
+      case Token::SHL:
+        __ slw(result, left, scratch);
+#if V8_TARGET_ARCH_PPC64
+        __ extsw(result, result);
+#endif
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+  } else {
+    // Mask the right_op operand.
+    int value = ToInteger32(LConstantOperand::cast(right_op));
+    uint8_t shift_count = static_cast<uint8_t>(value & 0x1F);
+    switch (instr->op()) {
+      case Token::ROR:
+        if (shift_count != 0) {
+          __ rotrwi(result, left, shift_count);
+        } else {
+          __ Move(result, left);
+        }
+        break;
+      case Token::SAR:
+        if (shift_count != 0) {
+          __ srawi(result, left, shift_count);
+        } else {
+          __ Move(result, left);
+        }
+        break;
+      case Token::SHR:
+        if (shift_count != 0) {
+          __ srwi(result, left, Operand(shift_count));
+        } else {
+          if (instr->can_deopt()) {
+            __ cmpwi(left, Operand::Zero());
+            DeoptimizeIf(lt, instr, "negative value");
+          }
+          __ Move(result, left);
+        }
+        break;
+      case Token::SHL:
+        if (shift_count != 0) {
+#if V8_TARGET_ARCH_PPC64
+          if (instr->hydrogen_value()->representation().IsSmi()) {
+            __ sldi(result, left, Operand(shift_count));
+#else
+          if (instr->hydrogen_value()->representation().IsSmi() &&
+              instr->can_deopt()) {
+            if (shift_count != 1) {
+              __ slwi(result, left, Operand(shift_count - 1));
+              __ SmiTagCheckOverflow(result, result, scratch);
+            } else {
+              __ SmiTagCheckOverflow(result, left, scratch);
+            }
+            DeoptimizeIf(lt, instr, "overflow", cr0);
+#endif
+          } else {
+            __ slwi(result, left, Operand(shift_count));
+#if V8_TARGET_ARCH_PPC64
+            __ extsw(result, result);
+#endif
+          }
+        } else {
+          __ Move(result, left);
+        }
+        break;
+      default:
+        UNREACHABLE();
+        break;
+    }
+  }
+}
+
+
+void LCodeGen::DoSubI(LSubI* instr) {
+  LOperand* right = instr->right();
+  Register left = ToRegister(instr->left());
+  Register result = ToRegister(instr->result());
+  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+  if (!can_overflow) {
+    if (right->IsConstantOperand()) {
+      __ Add(result, left, -(ToOperand(right).immediate()), r0);
+    } else {
+      __ sub(result, left, EmitLoadRegister(right, ip));
+    }
+  } else {
+    if (right->IsConstantOperand()) {
+      __ AddAndCheckForOverflow(result, left, -(ToOperand(right).immediate()),
+                                scratch0(), r0);
+    } else {
+      __ SubAndCheckForOverflow(result, left, EmitLoadRegister(right, ip),
+                                scratch0(), r0);
+    }
+// Doptimize on overflow
+#if V8_TARGET_ARCH_PPC64
+    if (!instr->hydrogen()->representation().IsSmi()) {
+      __ extsw(scratch0(), scratch0(), SetRC);
+    }
+#endif
+    DeoptimizeIf(lt, instr, "overflow", cr0);
+  }
+
+#if V8_TARGET_ARCH_PPC64
+  if (!instr->hydrogen()->representation().IsSmi()) {
+    __ extsw(result, result);
+  }
+#endif
+}
+
+
+void LCodeGen::DoRSubI(LRSubI* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  LOperand* result = instr->result();
+
+  DCHECK(!instr->hydrogen()->CheckFlag(HValue::kCanOverflow) &&
+         right->IsConstantOperand());
+
+  Operand right_operand = ToOperand(right);
+  if (is_int16(right_operand.immediate())) {
+    __ subfic(ToRegister(result), ToRegister(left), right_operand);
+  } else {
+    __ mov(r0, right_operand);
+    __ sub(ToRegister(result), r0, ToRegister(left));
+  }
+}
+
+
+void LCodeGen::DoConstantI(LConstantI* instr) {
+  __ mov(ToRegister(instr->result()), Operand(instr->value()));
+}
+
+
+void LCodeGen::DoConstantS(LConstantS* instr) {
+  __ LoadSmiLiteral(ToRegister(instr->result()), instr->value());
+}
+
+
+// TODO(penguin): put const to constant pool instead
+// of storing double to stack
+void LCodeGen::DoConstantD(LConstantD* instr) {
+  DCHECK(instr->result()->IsDoubleRegister());
+  DoubleRegister result = ToDoubleRegister(instr->result());
+  double v = instr->value();
+  __ LoadDoubleLiteral(result, v, scratch0());
+}
+
+
+void LCodeGen::DoConstantE(LConstantE* instr) {
+  __ mov(ToRegister(instr->result()), Operand(instr->value()));
+}
+
+
+void LCodeGen::DoConstantT(LConstantT* instr) {
+  Handle<Object> object = instr->value(isolate());
+  AllowDeferredHandleDereference smi_check;
+  __ Move(ToRegister(instr->result()), object);
+}
+
+
+void LCodeGen::DoMapEnumLength(LMapEnumLength* instr) {
+  Register result = ToRegister(instr->result());
+  Register map = ToRegister(instr->value());
+  __ EnumLength(result, map);
+}
+
+
+void LCodeGen::DoDateField(LDateField* instr) {
+  Register object = ToRegister(instr->date());
+  Register result = ToRegister(instr->result());
+  Register scratch = ToRegister(instr->temp());
+  Smi* index = instr->index();
+  Label runtime, done;
+  DCHECK(object.is(result));
+  DCHECK(object.is(r3));
+  DCHECK(!scratch.is(scratch0()));
+  DCHECK(!scratch.is(object));
+
+  __ TestIfSmi(object, r0);
+  DeoptimizeIf(eq, instr, "Smi", cr0);
+  __ CompareObjectType(object, scratch, scratch, JS_DATE_TYPE);
+  DeoptimizeIf(ne, instr, "not a date object");
+
+  if (index->value() == 0) {
+    __ LoadP(result, FieldMemOperand(object, JSDate::kValueOffset));
+  } else {
+    if (index->value() < JSDate::kFirstUncachedField) {
+      ExternalReference stamp = ExternalReference::date_cache_stamp(isolate());
+      __ mov(scratch, Operand(stamp));
+      __ LoadP(scratch, MemOperand(scratch));
+      __ LoadP(scratch0(), FieldMemOperand(object, JSDate::kCacheStampOffset));
+      __ cmp(scratch, scratch0());
+      __ bne(&runtime);
+      __ LoadP(result,
+               FieldMemOperand(object, JSDate::kValueOffset +
+                                           kPointerSize * index->value()));
+      __ b(&done);
+    }
+    __ bind(&runtime);
+    __ PrepareCallCFunction(2, scratch);
+    __ LoadSmiLiteral(r4, index);
+    __ CallCFunction(ExternalReference::get_date_field_function(isolate()), 2);
+    __ bind(&done);
+  }
+}
+
+
+MemOperand LCodeGen::BuildSeqStringOperand(Register string, LOperand* index,
+                                           String::Encoding encoding) {
+  if (index->IsConstantOperand()) {
+    int offset = ToInteger32(LConstantOperand::cast(index));
+    if (encoding == String::TWO_BYTE_ENCODING) {
+      offset *= kUC16Size;
+    }
+    STATIC_ASSERT(kCharSize == 1);
+    return FieldMemOperand(string, SeqString::kHeaderSize + offset);
+  }
+  Register scratch = scratch0();
+  DCHECK(!scratch.is(string));
+  DCHECK(!scratch.is(ToRegister(index)));
+  if (encoding == String::ONE_BYTE_ENCODING) {
+    __ add(scratch, string, ToRegister(index));
+  } else {
+    STATIC_ASSERT(kUC16Size == 2);
+    __ ShiftLeftImm(scratch, ToRegister(index), Operand(1));
+    __ add(scratch, string, scratch);
+  }
+  return FieldMemOperand(scratch, SeqString::kHeaderSize);
+}
+
+
+void LCodeGen::DoSeqStringGetChar(LSeqStringGetChar* instr) {
+  String::Encoding encoding = instr->hydrogen()->encoding();
+  Register string = ToRegister(instr->string());
+  Register result = ToRegister(instr->result());
+
+  if (FLAG_debug_code) {
+    Register scratch = scratch0();
+    __ LoadP(scratch, FieldMemOperand(string, HeapObject::kMapOffset));
+    __ lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+
+    __ andi(scratch, scratch,
+            Operand(kStringRepresentationMask | kStringEncodingMask));
+    static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+    static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+    __ cmpi(scratch,
+            Operand(encoding == String::ONE_BYTE_ENCODING ? one_byte_seq_type
+                                                          : two_byte_seq_type));
+    __ Check(eq, kUnexpectedStringType);
+  }
+
+  MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
+  if (encoding == String::ONE_BYTE_ENCODING) {
+    __ lbz(result, operand);
+  } else {
+    __ lhz(result, operand);
+  }
+}
+
+
+void LCodeGen::DoSeqStringSetChar(LSeqStringSetChar* instr) {
+  String::Encoding encoding = instr->hydrogen()->encoding();
+  Register string = ToRegister(instr->string());
+  Register value = ToRegister(instr->value());
+
+  if (FLAG_debug_code) {
+    Register index = ToRegister(instr->index());
+    static const uint32_t one_byte_seq_type = kSeqStringTag | kOneByteStringTag;
+    static const uint32_t two_byte_seq_type = kSeqStringTag | kTwoByteStringTag;
+    int encoding_mask =
+        instr->hydrogen()->encoding() == String::ONE_BYTE_ENCODING
+            ? one_byte_seq_type
+            : two_byte_seq_type;
+    __ EmitSeqStringSetCharCheck(string, index, value, encoding_mask);
+  }
+
+  MemOperand operand = BuildSeqStringOperand(string, instr->index(), encoding);
+  if (encoding == String::ONE_BYTE_ENCODING) {
+    __ stb(value, operand);
+  } else {
+    __ sth(value, operand);
+  }
+}
+
+
+void LCodeGen::DoAddI(LAddI* instr) {
+  LOperand* right = instr->right();
+  Register left = ToRegister(instr->left());
+  Register result = ToRegister(instr->result());
+  bool can_overflow = instr->hydrogen()->CheckFlag(HValue::kCanOverflow);
+#if V8_TARGET_ARCH_PPC64
+  bool isInteger = !(instr->hydrogen()->representation().IsSmi() ||
+                     instr->hydrogen()->representation().IsExternal());
+#endif
+
+  if (!can_overflow) {
+    if (right->IsConstantOperand()) {
+      __ Add(result, left, ToOperand(right).immediate(), r0);
+    } else {
+      __ add(result, left, EmitLoadRegister(right, ip));
+    }
+  } else {
+    if (right->IsConstantOperand()) {
+      __ AddAndCheckForOverflow(result, left, ToOperand(right).immediate(),
+                                scratch0(), r0);
+    } else {
+      __ AddAndCheckForOverflow(result, left, EmitLoadRegister(right, ip),
+                                scratch0(), r0);
+    }
+// Doptimize on overflow
+#if V8_TARGET_ARCH_PPC64
+    if (isInteger) {
+      __ extsw(scratch0(), scratch0(), SetRC);
+    }
+#endif
+    DeoptimizeIf(lt, instr, "overflow", cr0);
+  }
+
+#if V8_TARGET_ARCH_PPC64
+  if (isInteger) {
+    __ extsw(result, result);
+  }
+#endif
+}
+
+
+void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  HMathMinMax::Operation operation = instr->hydrogen()->operation();
+  Condition cond = (operation == HMathMinMax::kMathMin) ? le : ge;
+  if (instr->hydrogen()->representation().IsSmiOrInteger32()) {
+    Register left_reg = ToRegister(left);
+    Register right_reg = EmitLoadRegister(right, ip);
+    Register result_reg = ToRegister(instr->result());
+    Label return_left, done;
+#if V8_TARGET_ARCH_PPC64
+    if (instr->hydrogen_value()->representation().IsSmi()) {
+#endif
+      __ cmp(left_reg, right_reg);
+#if V8_TARGET_ARCH_PPC64
+    } else {
+      __ cmpw(left_reg, right_reg);
+    }
+#endif
+    __ b(cond, &return_left);
+    __ Move(result_reg, right_reg);
+    __ b(&done);
+    __ bind(&return_left);
+    __ Move(result_reg, left_reg);
+    __ bind(&done);
+  } else {
+    DCHECK(instr->hydrogen()->representation().IsDouble());
+    DoubleRegister left_reg = ToDoubleRegister(left);
+    DoubleRegister right_reg = ToDoubleRegister(right);
+    DoubleRegister result_reg = ToDoubleRegister(instr->result());
+    Label check_nan_left, check_zero, return_left, return_right, done;
+    __ fcmpu(left_reg, right_reg);
+    __ bunordered(&check_nan_left);
+    __ beq(&check_zero);
+    __ b(cond, &return_left);
+    __ b(&return_right);
+
+    __ bind(&check_zero);
+    __ fcmpu(left_reg, kDoubleRegZero);
+    __ bne(&return_left);  // left == right != 0.
+
+    // At this point, both left and right are either 0 or -0.
+    // N.B. The following works because +0 + -0 == +0
+    if (operation == HMathMinMax::kMathMin) {
+      // For min we want logical-or of sign bit: -(-L + -R)
+      __ fneg(left_reg, left_reg);
+      __ fsub(result_reg, left_reg, right_reg);
+      __ fneg(result_reg, result_reg);
+    } else {
+      // For max we want logical-and of sign bit: (L + R)
+      __ fadd(result_reg, left_reg, right_reg);
+    }
+    __ b(&done);
+
+    __ bind(&check_nan_left);
+    __ fcmpu(left_reg, left_reg);
+    __ bunordered(&return_left);  // left == NaN.
+
+    __ bind(&return_right);
+    if (!right_reg.is(result_reg)) {
+      __ fmr(result_reg, right_reg);
+    }
+    __ b(&done);
+
+    __ bind(&return_left);
+    if (!left_reg.is(result_reg)) {
+      __ fmr(result_reg, left_reg);
+    }
+    __ bind(&done);
+  }
+}
+
+
+void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
+  DoubleRegister left = ToDoubleRegister(instr->left());
+  DoubleRegister right = ToDoubleRegister(instr->right());
+  DoubleRegister result = ToDoubleRegister(instr->result());
+  switch (instr->op()) {
+    case Token::ADD:
+      __ fadd(result, left, right);
+      break;
+    case Token::SUB:
+      __ fsub(result, left, right);
+      break;
+    case Token::MUL:
+      __ fmul(result, left, right);
+      break;
+    case Token::DIV:
+      __ fdiv(result, left, right);
+      break;
+    case Token::MOD: {
+      __ PrepareCallCFunction(0, 2, scratch0());
+      __ MovToFloatParameters(left, right);
+      __ CallCFunction(ExternalReference::mod_two_doubles_operation(isolate()),
+                       0, 2);
+      // Move the result in the double result register.
+      __ MovFromFloatResult(result);
+      break;
+    }
+    default:
+      UNREACHABLE();
+      break;
+  }
+}
+
+
+void LCodeGen::DoArithmeticT(LArithmeticT* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->left()).is(r4));
+  DCHECK(ToRegister(instr->right()).is(r3));
+  DCHECK(ToRegister(instr->result()).is(r3));
+
+  Handle<Code> code =
+      CodeFactory::BinaryOpIC(isolate(), instr->op(), NO_OVERWRITE).code();
+  CallCode(code, RelocInfo::CODE_TARGET, instr);
+}
+
+
+template <class InstrType>
+void LCodeGen::EmitBranch(InstrType instr, Condition cond, CRegister cr) {
+  int left_block = instr->TrueDestination(chunk_);
+  int right_block = instr->FalseDestination(chunk_);
+
+  int next_block = GetNextEmittedBlock();
+
+  if (right_block == left_block || cond == al) {
+    EmitGoto(left_block);
+  } else if (left_block == next_block) {
+    __ b(NegateCondition(cond), chunk_->GetAssemblyLabel(right_block), cr);
+  } else if (right_block == next_block) {
+    __ b(cond, chunk_->GetAssemblyLabel(left_block), cr);
+  } else {
+    __ b(cond, chunk_->GetAssemblyLabel(left_block), cr);
+    __ b(chunk_->GetAssemblyLabel(right_block));
+  }
+}
+
+
+template <class InstrType>
+void LCodeGen::EmitFalseBranch(InstrType instr, Condition cond, CRegister cr) {
+  int false_block = instr->FalseDestination(chunk_);
+  __ b(cond, chunk_->GetAssemblyLabel(false_block), cr);
+}
+
+
+void LCodeGen::DoDebugBreak(LDebugBreak* instr) { __ stop("LBreak"); }
+
+
+void LCodeGen::DoBranch(LBranch* instr) {
+  Representation r = instr->hydrogen()->value()->representation();
+  DoubleRegister dbl_scratch = double_scratch0();
+  const uint crZOrNaNBits = (1 << (31 - Assembler::encode_crbit(cr7, CR_EQ)) |
+                             1 << (31 - Assembler::encode_crbit(cr7, CR_FU)));
+
+  if (r.IsInteger32()) {
+    DCHECK(!info()->IsStub());
+    Register reg = ToRegister(instr->value());
+    __ cmpwi(reg, Operand::Zero());
+    EmitBranch(instr, ne);
+  } else if (r.IsSmi()) {
+    DCHECK(!info()->IsStub());
+    Register reg = ToRegister(instr->value());
+    __ cmpi(reg, Operand::Zero());
+    EmitBranch(instr, ne);
+  } else if (r.IsDouble()) {
+    DCHECK(!info()->IsStub());
+    DoubleRegister reg = ToDoubleRegister(instr->value());
+    // Test the double value. Zero and NaN are false.
+    __ fcmpu(reg, kDoubleRegZero, cr7);
+    __ mfcr(r0);
+    __ andi(r0, r0, Operand(crZOrNaNBits));
+    EmitBranch(instr, eq, cr0);
+  } else {
+    DCHECK(r.IsTagged());
+    Register reg = ToRegister(instr->value());
+    HType type = instr->hydrogen()->value()->type();
+    if (type.IsBoolean()) {
+      DCHECK(!info()->IsStub());
+      __ CompareRoot(reg, Heap::kTrueValueRootIndex);
+      EmitBranch(instr, eq);
+    } else if (type.IsSmi()) {
+      DCHECK(!info()->IsStub());
+      __ cmpi(reg, Operand::Zero());
+      EmitBranch(instr, ne);
+    } else if (type.IsJSArray()) {
+      DCHECK(!info()->IsStub());
+      EmitBranch(instr, al);
+    } else if (type.IsHeapNumber()) {
+      DCHECK(!info()->IsStub());
+      __ lfd(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
+      // Test the double value. Zero and NaN are false.
+      __ fcmpu(dbl_scratch, kDoubleRegZero, cr7);
+      __ mfcr(r0);
+      __ andi(r0, r0, Operand(crZOrNaNBits));
+      EmitBranch(instr, eq, cr0);
+    } else if (type.IsString()) {
+      DCHECK(!info()->IsStub());
+      __ LoadP(ip, FieldMemOperand(reg, String::kLengthOffset));
+      __ cmpi(ip, Operand::Zero());
+      EmitBranch(instr, ne);
+    } else {
+      ToBooleanStub::Types expected = instr->hydrogen()->expected_input_types();
+      // Avoid deopts in the case where we've never executed this path before.
+      if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+
+      if (expected.Contains(ToBooleanStub::UNDEFINED)) {
+        // undefined -> false.
+        __ CompareRoot(reg, Heap::kUndefinedValueRootIndex);
+        __ beq(instr->FalseLabel(chunk_));
+      }
+      if (expected.Contains(ToBooleanStub::BOOLEAN)) {
+        // Boolean -> its value.
+        __ CompareRoot(reg, Heap::kTrueValueRootIndex);
+        __ beq(instr->TrueLabel(chunk_));
+        __ CompareRoot(reg, Heap::kFalseValueRootIndex);
+        __ beq(instr->FalseLabel(chunk_));
+      }
+      if (expected.Contains(ToBooleanStub::NULL_TYPE)) {
+        // 'null' -> false.
+        __ CompareRoot(reg, Heap::kNullValueRootIndex);
+        __ beq(instr->FalseLabel(chunk_));
+      }
+
+      if (expected.Contains(ToBooleanStub::SMI)) {
+        // Smis: 0 -> false, all other -> true.
+        __ cmpi(reg, Operand::Zero());
+        __ beq(instr->FalseLabel(chunk_));
+        __ JumpIfSmi(reg, instr->TrueLabel(chunk_));
+      } else if (expected.NeedsMap()) {
+        // If we need a map later and have a Smi -> deopt.
+        __ TestIfSmi(reg, r0);
+        DeoptimizeIf(eq, instr, "Smi", cr0);
+      }
+
+      const Register map = scratch0();
+      if (expected.NeedsMap()) {
+        __ LoadP(map, FieldMemOperand(reg, HeapObject::kMapOffset));
+
+        if (expected.CanBeUndetectable()) {
+          // Undetectable -> false.
+          __ lbz(ip, FieldMemOperand(map, Map::kBitFieldOffset));
+          __ TestBit(ip, Map::kIsUndetectable, r0);
+          __ bne(instr->FalseLabel(chunk_), cr0);
+        }
+      }
+
+      if (expected.Contains(ToBooleanStub::SPEC_OBJECT)) {
+        // spec object -> true.
+        __ CompareInstanceType(map, ip, FIRST_SPEC_OBJECT_TYPE);
+        __ bge(instr->TrueLabel(chunk_));
+      }
+
+      if (expected.Contains(ToBooleanStub::STRING)) {
+        // String value -> false iff empty.
+        Label not_string;
+        __ CompareInstanceType(map, ip, FIRST_NONSTRING_TYPE);
+        __ bge(&not_string);
+        __ LoadP(ip, FieldMemOperand(reg, String::kLengthOffset));
+        __ cmpi(ip, Operand::Zero());
+        __ bne(instr->TrueLabel(chunk_));
+        __ b(instr->FalseLabel(chunk_));
+        __ bind(&not_string);
+      }
+
+      if (expected.Contains(ToBooleanStub::SYMBOL)) {
+        // Symbol value -> true.
+        __ CompareInstanceType(map, ip, SYMBOL_TYPE);
+        __ beq(instr->TrueLabel(chunk_));
+      }
+
+      if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
+        // heap number -> false iff +0, -0, or NaN.
+        Label not_heap_number;
+        __ CompareRoot(map, Heap::kHeapNumberMapRootIndex);
+        __ bne(&not_heap_number);
+        __ lfd(dbl_scratch, FieldMemOperand(reg, HeapNumber::kValueOffset));
+        // Test the double value. Zero and NaN are false.
+        __ fcmpu(dbl_scratch, kDoubleRegZero, cr7);
+        __ mfcr(r0);
+        __ andi(r0, r0, Operand(crZOrNaNBits));
+        __ bne(instr->FalseLabel(chunk_), cr0);
+        __ b(instr->TrueLabel(chunk_));
+        __ bind(&not_heap_number);
+      }
+
+      if (!expected.IsGeneric()) {
+        // We've seen something for the first time -> deopt.
+        // This can only happen if we are not generic already.
+        DeoptimizeIf(al, instr, "unexpected object");
+      }
+    }
+  }
+}
+
+
+void LCodeGen::EmitGoto(int block) {
+  if (!IsNextEmittedBlock(block)) {
+    __ b(chunk_->GetAssemblyLabel(LookupDestination(block)));
+  }
+}
+
+
+void LCodeGen::DoGoto(LGoto* instr) { EmitGoto(instr->block_id()); }
+
+
+Condition LCodeGen::TokenToCondition(Token::Value op) {
+  Condition cond = kNoCondition;
+  switch (op) {
+    case Token::EQ:
+    case Token::EQ_STRICT:
+      cond = eq;
+      break;
+    case Token::NE:
+    case Token::NE_STRICT:
+      cond = ne;
+      break;
+    case Token::LT:
+      cond = lt;
+      break;
+    case Token::GT:
+      cond = gt;
+      break;
+    case Token::LTE:
+      cond = le;
+      break;
+    case Token::GTE:
+      cond = ge;
+      break;
+    case Token::IN:
+    case Token::INSTANCEOF:
+    default:
+      UNREACHABLE();
+  }
+  return cond;
+}
+
+
+void LCodeGen::DoCompareNumericAndBranch(LCompareNumericAndBranch* instr) {
+  LOperand* left = instr->left();
+  LOperand* right = instr->right();
+  bool is_unsigned =
+      instr->hydrogen()->left()->CheckFlag(HInstruction::kUint32) ||
+      instr->hydrogen()->right()->CheckFlag(HInstruction::kUint32);
+  Condition cond = TokenToCondition(instr->op());
+
+  if (left->IsConstantOperand() && right->IsConstantOperand()) {
+    // We can statically evaluate the comparison.
+    double left_val = ToDouble(LConstantOperand::cast(left));
+    double right_val = ToDouble(LConstantOperand::cast(right));
+    int next_block = EvalComparison(instr->op(), left_val, right_val)
+                         ? instr->TrueDestination(chunk_)
+                         : instr->FalseDestination(chunk_);
+    EmitGoto(next_block);
+  } else {
+    if (instr->is_double()) {
+      // Compare left and right operands as doubles and load the
+      // resulting flags into the normal status register.
+      __ fcmpu(ToDoubleRegister(left), ToDoubleRegister(right));
+      // If a NaN is involved, i.e. the result is unordered,
+      // jump to false block label.
+      __ bunordered(instr->FalseLabel(chunk_));
+    } else {
+      if (right->IsConstantOperand()) {
+        int32_t value = ToInteger32(LConstantOperand::cast(right));
+        if (instr->hydrogen_value()->representation().IsSmi()) {
+          if (is_unsigned) {
+            __ CmplSmiLiteral(ToRegister(left), Smi::FromInt(value), r0);
+          } else {
+            __ CmpSmiLiteral(ToRegister(left), Smi::FromInt(value), r0);
+          }
+        } else {
+          if (is_unsigned) {
+            __ Cmplwi(ToRegister(left), Operand(value), r0);
+          } else {
+            __ Cmpwi(ToRegister(left), Operand(value), r0);
+          }
+        }
+      } else if (left->IsConstantOperand()) {
+        int32_t value = ToInteger32(LConstantOperand::cast(left));
+        if (instr->hydrogen_value()->representation().IsSmi()) {
+          if (is_unsigned) {
+            __ CmplSmiLiteral(ToRegister(right), Smi::FromInt(value), r0);
+          } else {
+            __ CmpSmiLiteral(ToRegister(right), Smi::FromInt(value), r0);
+          }
+        } else {
+          if (is_unsigned) {
+            __ Cmplwi(ToRegister(right), Operand(value), r0);
+          } else {
+            __ Cmpwi(ToRegister(right), Operand(value), r0);
+          }
+        }
+        // We commuted the operands, so commute the condition.
+        cond = CommuteCondition(cond);
+      } else if (instr->hydrogen_value()->representation().IsSmi()) {
+        if (is_unsigned) {
+          __ cmpl(ToRegister(left), ToRegister(right));
+        } else {
+          __ cmp(ToRegister(left), ToRegister(right));
+        }
+      } else {
+        if (is_unsigned) {
+          __ cmplw(ToRegister(left), ToRegister(right));
+        } else {
+          __ cmpw(ToRegister(left), ToRegister(right));
+        }
+      }
+    }
+    EmitBranch(instr, cond);
+  }
+}
+
+
+void LCodeGen::DoCmpObjectEqAndBranch(LCmpObjectEqAndBranch* instr) {
+  Register left = ToRegister(instr->left());
+  Register right = ToRegister(instr->right());
+
+  __ cmp(left, right);
+  EmitBranch(instr, eq);
+}
+
+
+void LCodeGen::DoCmpHoleAndBranch(LCmpHoleAndBranch* instr) {
+  if (instr->hydrogen()->representation().IsTagged()) {
+    Register input_reg = ToRegister(instr->object());
+    __ mov(ip, Operand(factory()->the_hole_value()));
+    __ cmp(input_reg, ip);
+    EmitBranch(instr, eq);
+    return;
+  }
+
+  DoubleRegister input_reg = ToDoubleRegister(instr->object());
+  __ fcmpu(input_reg, input_reg);
+  EmitFalseBranch(instr, ordered);
+
+  Register scratch = scratch0();
+  __ MovDoubleHighToInt(scratch, input_reg);
+  __ Cmpi(scratch, Operand(kHoleNanUpper32), r0);
+  EmitBranch(instr, eq);
+}
+
+
+void LCodeGen::DoCompareMinusZeroAndBranch(LCompareMinusZeroAndBranch* instr) {
+  Representation rep = instr->hydrogen()->value()->representation();
+  DCHECK(!rep.IsInteger32());
+  Register scratch = ToRegister(instr->temp());
+
+  if (rep.IsDouble()) {
+    DoubleRegister value = ToDoubleRegister(instr->value());
+    __ fcmpu(value, kDoubleRegZero);
+    EmitFalseBranch(instr, ne);
+#if V8_TARGET_ARCH_PPC64
+    __ MovDoubleToInt64(scratch, value);
+#else
+    __ MovDoubleHighToInt(scratch, value);
+#endif
+    __ cmpi(scratch, Operand::Zero());
+    EmitBranch(instr, lt);
+  } else {
+    Register value = ToRegister(instr->value());
+    __ CheckMap(value, scratch, Heap::kHeapNumberMapRootIndex,
+                instr->FalseLabel(chunk()), DO_SMI_CHECK);
+#if V8_TARGET_ARCH_PPC64
+    __ LoadP(scratch, FieldMemOperand(value, HeapNumber::kValueOffset));
+    __ li(ip, Operand(1));
+    __ rotrdi(ip, ip, 1);  // ip = 0x80000000_00000000
+    __ cmp(scratch, ip);
+#else
+    __ lwz(scratch, FieldMemOperand(value, HeapNumber::kExponentOffset));
+    __ lwz(ip, FieldMemOperand(value, HeapNumber::kMantissaOffset));
+    Label skip;
+    __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
+    __ cmp(scratch, r0);
+    __ bne(&skip);
+    __ cmpi(ip, Operand::Zero());
+    __ bind(&skip);
+#endif
+    EmitBranch(instr, eq);
+  }
+}
+
+
+Condition LCodeGen::EmitIsObject(Register input, Register temp1,
+                                 Label* is_not_object, Label* is_object) {
+  Register temp2 = scratch0();
+  __ JumpIfSmi(input, is_not_object);
+
+  __ LoadRoot(temp2, Heap::kNullValueRootIndex);
+  __ cmp(input, temp2);
+  __ beq(is_object);
+
+  // Load map.
+  __ LoadP(temp1, FieldMemOperand(input, HeapObject::kMapOffset));
+  // Undetectable objects behave like undefined.
+  __ lbz(temp2, FieldMemOperand(temp1, Map::kBitFieldOffset));
+  __ TestBit(temp2, Map::kIsUndetectable, r0);
+  __ bne(is_not_object, cr0);
+
+  // Load instance type and check that it is in object type range.
+  __ lbz(temp2, FieldMemOperand(temp1, Map::kInstanceTypeOffset));
+  __ cmpi(temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+  __ blt(is_not_object);
+  __ cmpi(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
+  return le;
+}
+
+
+void LCodeGen::DoIsObjectAndBranch(LIsObjectAndBranch* instr) {
+  Register reg = ToRegister(instr->value());
+  Register temp1 = ToRegister(instr->temp());
+
+  Condition true_cond = EmitIsObject(reg, temp1, instr->FalseLabel(chunk_),
+                                     instr->TrueLabel(chunk_));
+
+  EmitBranch(instr, true_cond);
+}
+
+
+Condition LCodeGen::EmitIsString(Register input, Register temp1,
+                                 Label* is_not_string,
+                                 SmiCheck check_needed = INLINE_SMI_CHECK) {
+  if (check_needed == INLINE_SMI_CHECK) {
+    __ JumpIfSmi(input, is_not_string);
+  }
+  __ CompareObjectType(input, temp1, temp1, FIRST_NONSTRING_TYPE);
+
+  return lt;
+}
+
+
+void LCodeGen::DoIsStringAndBranch(LIsStringAndBranch* instr) {
+  Register reg = ToRegister(instr->value());
+  Register temp1 = ToRegister(instr->temp());
+
+  SmiCheck check_needed = instr->hydrogen()->value()->type().IsHeapObject()
+                              ? OMIT_SMI_CHECK
+                              : INLINE_SMI_CHECK;
+  Condition true_cond =
+      EmitIsString(reg, temp1, instr->FalseLabel(chunk_), check_needed);
+
+  EmitBranch(instr, true_cond);
+}
+
+
+void LCodeGen::DoIsSmiAndBranch(LIsSmiAndBranch* instr) {
+  Register input_reg = EmitLoadRegister(instr->value(), ip);
+  __ TestIfSmi(input_reg, r0);
+  EmitBranch(instr, eq, cr0);
+}
+
+
+void LCodeGen::DoIsUndetectableAndBranch(LIsUndetectableAndBranch* instr) {
+  Register input = ToRegister(instr->value());
+  Register temp = ToRegister(instr->temp());
+
+  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
+    __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+  }
+  __ LoadP(temp, FieldMemOperand(input, HeapObject::kMapOffset));
+  __ lbz(temp, FieldMemOperand(temp, Map::kBitFieldOffset));
+  __ TestBit(temp, Map::kIsUndetectable, r0);
+  EmitBranch(instr, ne, cr0);
+}
+
+
+static Condition ComputeCompareCondition(Token::Value op) {
+  switch (op) {
+    case Token::EQ_STRICT:
+    case Token::EQ:
+      return eq;
+    case Token::LT:
+      return lt;
+    case Token::GT:
+      return gt;
+    case Token::LTE:
+      return le;
+    case Token::GTE:
+      return ge;
+    default:
+      UNREACHABLE();
+      return kNoCondition;
+  }
+}
+
+
+void LCodeGen::DoStringCompareAndBranch(LStringCompareAndBranch* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  Token::Value op = instr->op();
+
+  Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+  // This instruction also signals no smi code inlined
+  __ cmpi(r3, Operand::Zero());
+
+  Condition condition = ComputeCompareCondition(op);
+
+  EmitBranch(instr, condition);
+}
+
+
+static InstanceType TestType(HHasInstanceTypeAndBranch* instr) {
+  InstanceType from = instr->from();
+  InstanceType to = instr->to();
+  if (from == FIRST_TYPE) return to;
+  DCHECK(from == to || to == LAST_TYPE);
+  return from;
+}
+
+
+static Condition BranchCondition(HHasInstanceTypeAndBranch* instr) {
+  InstanceType from = instr->from();
+  InstanceType to = instr->to();
+  if (from == to) return eq;
+  if (to == LAST_TYPE) return ge;
+  if (from == FIRST_TYPE) return le;
+  UNREACHABLE();
+  return eq;
+}
+
+
+void LCodeGen::DoHasInstanceTypeAndBranch(LHasInstanceTypeAndBranch* instr) {
+  Register scratch = scratch0();
+  Register input = ToRegister(instr->value());
+
+  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
+    __ JumpIfSmi(input, instr->FalseLabel(chunk_));
+  }
+
+  __ CompareObjectType(input, scratch, scratch, TestType(instr->hydrogen()));
+  EmitBranch(instr, BranchCondition(instr->hydrogen()));
+}
+
+
+void LCodeGen::DoGetCachedArrayIndex(LGetCachedArrayIndex* instr) {
+  Register input = ToRegister(instr->value());
+  Register result = ToRegister(instr->result());
+
+  __ AssertString(input);
+
+  __ lwz(result, FieldMemOperand(input, String::kHashFieldOffset));
+  __ IndexFromHash(result, result);
+}
+
+
+void LCodeGen::DoHasCachedArrayIndexAndBranch(
+    LHasCachedArrayIndexAndBranch* instr) {
+  Register input = ToRegister(instr->value());
+  Register scratch = scratch0();
+
+  __ lwz(scratch, FieldMemOperand(input, String::kHashFieldOffset));
+  __ mov(r0, Operand(String::kContainsCachedArrayIndexMask));
+  __ and_(r0, scratch, r0, SetRC);
+  EmitBranch(instr, eq, cr0);
+}
+
+
+// Branches to a label or falls through with the answer in flags.  Trashes
+// the temp registers, but not the input.
+void LCodeGen::EmitClassOfTest(Label* is_true, Label* is_false,
+                               Handle<String> class_name, Register input,
+                               Register temp, Register temp2) {
+  DCHECK(!input.is(temp));
+  DCHECK(!input.is(temp2));
+  DCHECK(!temp.is(temp2));
+
+  __ JumpIfSmi(input, is_false);
+
+  if (String::Equals(isolate()->factory()->Function_string(), class_name)) {
+    // Assuming the following assertions, we can use the same compares to test
+    // for both being a function type and being in the object type range.
+    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+    STATIC_ASSERT(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+                  FIRST_SPEC_OBJECT_TYPE + 1);
+    STATIC_ASSERT(LAST_NONCALLABLE_SPEC_OBJECT_TYPE ==
+                  LAST_SPEC_OBJECT_TYPE - 1);
+    STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
+    __ CompareObjectType(input, temp, temp2, FIRST_SPEC_OBJECT_TYPE);
+    __ blt(is_false);
+    __ beq(is_true);
+    __ cmpi(temp2, Operand(LAST_SPEC_OBJECT_TYPE));
+    __ beq(is_true);
+  } else {
+    // Faster code path to avoid two compares: subtract lower bound from the
+    // actual type and do a signed compare with the width of the type range.
+    __ LoadP(temp, FieldMemOperand(input, HeapObject::kMapOffset));
+    __ lbz(temp2, FieldMemOperand(temp, Map::kInstanceTypeOffset));
+    __ subi(temp2, temp2, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+    __ cmpi(temp2, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE -
+                           FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+    __ bgt(is_false);
+  }
+
+  // Now we are in the FIRST-LAST_NONCALLABLE_SPEC_OBJECT_TYPE range.
+  // Check if the constructor in the map is a function.
+  __ LoadP(temp, FieldMemOperand(temp, Map::kConstructorOffset));
+
+  // Objects with a non-function constructor have class 'Object'.
+  __ CompareObjectType(temp, temp2, temp2, JS_FUNCTION_TYPE);
+  if (class_name->IsOneByteEqualTo(STATIC_CHAR_VECTOR("Object"))) {
+    __ bne(is_true);
+  } else {
+    __ bne(is_false);
+  }
+
+  // temp now contains the constructor function. Grab the
+  // instance class name from there.
+  __ LoadP(temp, FieldMemOperand(temp, JSFunction::kSharedFunctionInfoOffset));
+  __ LoadP(temp,
+           FieldMemOperand(temp, SharedFunctionInfo::kInstanceClassNameOffset));
+  // The class name we are testing against is internalized since it's a literal.
+  // The name in the constructor is internalized because of the way the context
+  // is booted.  This routine isn't expected to work for random API-created
+  // classes and it doesn't have to because you can't access it with natives
+  // syntax.  Since both sides are internalized it is sufficient to use an
+  // identity comparison.
+  __ Cmpi(temp, Operand(class_name), r0);
+  // End with the answer in flags.
+}
+
+
+void LCodeGen::DoClassOfTestAndBranch(LClassOfTestAndBranch* instr) {
+  Register input = ToRegister(instr->value());
+  Register temp = scratch0();
+  Register temp2 = ToRegister(instr->temp());
+  Handle<String> class_name = instr->hydrogen()->class_name();
+
+  EmitClassOfTest(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_),
+                  class_name, input, temp, temp2);
+
+  EmitBranch(instr, eq);
+}
+
+
+void LCodeGen::DoCmpMapAndBranch(LCmpMapAndBranch* instr) {
+  Register reg = ToRegister(instr->value());
+  Register temp = ToRegister(instr->temp());
+
+  __ LoadP(temp, FieldMemOperand(reg, HeapObject::kMapOffset));
+  __ Cmpi(temp, Operand(instr->map()), r0);
+  EmitBranch(instr, eq);
+}
+
+
+void LCodeGen::DoInstanceOf(LInstanceOf* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->left()).is(r3));   // Object is in r3.
+  DCHECK(ToRegister(instr->right()).is(r4));  // Function is in r4.
+
+  InstanceofStub stub(isolate(), InstanceofStub::kArgsInRegisters);
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+
+  Label equal, done;
+  __ cmpi(r3, Operand::Zero());
+  __ beq(&equal);
+  __ mov(r3, Operand(factory()->false_value()));
+  __ b(&done);
+
+  __ bind(&equal);
+  __ mov(r3, Operand(factory()->true_value()));
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
+  class DeferredInstanceOfKnownGlobal FINAL : public LDeferredCode {
+   public:
+    DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
+                                  LInstanceOfKnownGlobal* instr)
+        : LDeferredCode(codegen), instr_(instr) {}
+    void Generate() OVERRIDE {
+      codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
+    }
+    LInstruction* instr() OVERRIDE { return instr_; }
+    Label* map_check() { return &map_check_; }
+
+   private:
+    LInstanceOfKnownGlobal* instr_;
+    Label map_check_;
+  };
+
+  DeferredInstanceOfKnownGlobal* deferred;
+  deferred = new (zone()) DeferredInstanceOfKnownGlobal(this, instr);
+
+  Label done, false_result;
+  Register object = ToRegister(instr->value());
+  Register temp = ToRegister(instr->temp());
+  Register result = ToRegister(instr->result());
+
+  // A Smi is not instance of anything.
+  __ JumpIfSmi(object, &false_result);
+
+  // This is the inlined call site instanceof cache. The two occurences of the
+  // hole value will be patched to the last map/result pair generated by the
+  // instanceof stub.
+  Label cache_miss;
+  Register map = temp;
+  __ LoadP(map, FieldMemOperand(object, HeapObject::kMapOffset));
+  {
+    // Block constant pool emission to ensure the positions of instructions are
+    // as expected by the patcher. See InstanceofStub::Generate().
+    Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+    __ bind(deferred->map_check());  // Label for calculating code patching.
+    // We use Factory::the_hole_value() on purpose instead of loading from the
+    // root array to force relocation to be able to later patch with
+    // the cached map.
+    Handle<Cell> cell = factory()->NewCell(factory()->the_hole_value());
+    __ mov(ip, Operand(Handle<Object>(cell)));
+    __ LoadP(ip, FieldMemOperand(ip, PropertyCell::kValueOffset));
+    __ cmp(map, ip);
+    __ bne(&cache_miss);
+    // We use Factory::the_hole_value() on purpose instead of loading from the
+    // root array to force relocation to be able to later patch
+    // with true or false.
+    __ mov(result, Operand(factory()->the_hole_value()));
+  }
+  __ b(&done);
+
+  // The inlined call site cache did not match. Check null and string before
+  // calling the deferred code.
+  __ bind(&cache_miss);
+  // Null is not instance of anything.
+  __ LoadRoot(ip, Heap::kNullValueRootIndex);
+  __ cmp(object, ip);
+  __ beq(&false_result);
+
+  // String values is not instance of anything.
+  Condition is_string = masm_->IsObjectStringType(object, temp);
+  __ b(is_string, &false_result, cr0);
+
+  // Go to the deferred code.
+  __ b(deferred->entry());
+
+  __ bind(&false_result);
+  __ LoadRoot(result, Heap::kFalseValueRootIndex);
+
+  // Here result has either true or false. Deferred code also produces true or
+  // false object.
+  __ bind(deferred->exit());
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
+                                               Label* map_check) {
+  InstanceofStub::Flags flags = InstanceofStub::kNoFlags;
+  flags = static_cast<InstanceofStub::Flags>(flags |
+                                             InstanceofStub::kArgsInRegisters);
+  flags = static_cast<InstanceofStub::Flags>(
+      flags | InstanceofStub::kCallSiteInlineCheck);
+  flags = static_cast<InstanceofStub::Flags>(
+      flags | InstanceofStub::kReturnTrueFalseObject);
+  InstanceofStub stub(isolate(), flags);
+
+  PushSafepointRegistersScope scope(this);
+  LoadContextFromDeferred(instr->context());
+
+  __ Move(InstanceofStub::right(), instr->function());
+  // Include instructions below in delta: mov + call = mov + (mov + 2)
+  static const int kAdditionalDelta = (2 * Assembler::kMovInstructions) + 2;
+  int delta = masm_->InstructionsGeneratedSince(map_check) + kAdditionalDelta;
+  {
+    Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm_);
+    // r8 is used to communicate the offset to the location of the map check.
+    __ mov(r8, Operand(delta * Instruction::kInstrSize));
+  }
+  CallCodeGeneric(stub.GetCode(), RelocInfo::CODE_TARGET, instr,
+                  RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+  DCHECK(delta == masm_->InstructionsGeneratedSince(map_check));
+  LEnvironment* env = instr->GetDeferredLazyDeoptimizationEnvironment();
+  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+  // Put the result value (r3) into the result register slot and
+  // restore all registers.
+  __ StoreToSafepointRegisterSlot(r3, ToRegister(instr->result()));
+}
+
+
+void LCodeGen::DoCmpT(LCmpT* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  Token::Value op = instr->op();
+
+  Handle<Code> ic = CodeFactory::CompareIC(isolate(), op).code();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+  // This instruction also signals no smi code inlined
+  __ cmpi(r3, Operand::Zero());
+
+  Condition condition = ComputeCompareCondition(op);
+  Label true_value, done;
+
+  __ b(condition, &true_value);
+
+  __ LoadRoot(ToRegister(instr->result()), Heap::kFalseValueRootIndex);
+  __ b(&done);
+
+  __ bind(&true_value);
+  __ LoadRoot(ToRegister(instr->result()), Heap::kTrueValueRootIndex);
+
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoReturn(LReturn* instr) {
+  if (FLAG_trace && info()->IsOptimizing()) {
+    // Push the return value on the stack as the parameter.
+    // Runtime::TraceExit returns its parameter in r3.  We're leaving the code
+    // managed by the register allocator and tearing down the frame, it's
+    // safe to write to the context register.
+    __ push(r3);
+    __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+    __ CallRuntime(Runtime::kTraceExit, 1);
+  }
+  if (info()->saves_caller_doubles()) {
+    RestoreCallerDoubles();
+  }
+  int no_frame_start = -1;
+  if (instr->has_constant_parameter_count()) {
+    int parameter_count = ToInteger32(instr->constant_parameter_count());
+    int32_t sp_delta = (parameter_count + 1) * kPointerSize;
+    if (NeedsEagerFrame()) {
+      no_frame_start = masm_->LeaveFrame(StackFrame::JAVA_SCRIPT, sp_delta);
+    } else if (sp_delta != 0) {
+      __ addi(sp, sp, Operand(sp_delta));
+    }
+  } else {
+    Register reg = ToRegister(instr->parameter_count());
+    // The argument count parameter is a smi
+    if (NeedsEagerFrame()) {
+      no_frame_start = masm_->LeaveFrame(StackFrame::JAVA_SCRIPT);
+    }
+    __ SmiToPtrArrayOffset(r0, reg);
+    __ add(sp, sp, r0);
+  }
+
+  __ blr();
+
+  if (no_frame_start != -1) {
+    info_->AddNoFrameRange(no_frame_start, masm_->pc_offset());
+  }
+}
+
+
+void LCodeGen::DoLoadGlobalCell(LLoadGlobalCell* instr) {
+  Register result = ToRegister(instr->result());
+  __ mov(ip, Operand(Handle<Object>(instr->hydrogen()->cell().handle())));
+  __ LoadP(result, FieldMemOperand(ip, Cell::kValueOffset));
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+    __ cmp(result, ip);
+    DeoptimizeIf(eq, instr, "hole");
+  }
+}
+
+
+template <class T>
+void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
+  DCHECK(FLAG_vector_ics);
+  Register vector = ToRegister(instr->temp_vector());
+  DCHECK(vector.is(VectorLoadICDescriptor::VectorRegister()));
+  __ Move(vector, instr->hydrogen()->feedback_vector());
+  // No need to allocate this register.
+  DCHECK(VectorLoadICDescriptor::SlotRegister().is(r3));
+  __ mov(VectorLoadICDescriptor::SlotRegister(),
+         Operand(Smi::FromInt(instr->hydrogen()->slot())));
+}
+
+
+void LCodeGen::DoLoadGlobalGeneric(LLoadGlobalGeneric* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->global_object())
+             .is(LoadDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->result()).is(r3));
+
+  __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
+  if (FLAG_vector_ics) {
+    EmitVectorLoadICRegisters<LLoadGlobalGeneric>(instr);
+  }
+  ContextualMode mode = instr->for_typeof() ? NOT_CONTEXTUAL : CONTEXTUAL;
+  Handle<Code> ic = CodeFactory::LoadIC(isolate(), mode).code();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoStoreGlobalCell(LStoreGlobalCell* instr) {
+  Register value = ToRegister(instr->value());
+  Register cell = scratch0();
+
+  // Load the cell.
+  __ mov(cell, Operand(instr->hydrogen()->cell().handle()));
+
+  // If the cell we are storing to contains the hole it could have
+  // been deleted from the property dictionary. In that case, we need
+  // to update the property details in the property dictionary to mark
+  // it as no longer deleted.
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    // We use a temp to check the payload (CompareRoot might clobber ip).
+    Register payload = ToRegister(instr->temp());
+    __ LoadP(payload, FieldMemOperand(cell, Cell::kValueOffset));
+    __ CompareRoot(payload, Heap::kTheHoleValueRootIndex);
+    DeoptimizeIf(eq, instr, "hole");
+  }
+
+  // Store the value.
+  __ StoreP(value, FieldMemOperand(cell, Cell::kValueOffset), r0);
+  // Cells are always rescanned, so no write barrier here.
+}
+
+
+void LCodeGen::DoLoadContextSlot(LLoadContextSlot* instr) {
+  Register context = ToRegister(instr->context());
+  Register result = ToRegister(instr->result());
+  __ LoadP(result, ContextOperand(context, instr->slot_index()));
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+    __ cmp(result, ip);
+    if (instr->hydrogen()->DeoptimizesOnHole()) {
+      DeoptimizeIf(eq, instr, "hole");
+    } else {
+      Label skip;
+      __ bne(&skip);
+      __ mov(result, Operand(factory()->undefined_value()));
+      __ bind(&skip);
+    }
+  }
+}
+
+
+void LCodeGen::DoStoreContextSlot(LStoreContextSlot* instr) {
+  Register context = ToRegister(instr->context());
+  Register value = ToRegister(instr->value());
+  Register scratch = scratch0();
+  MemOperand target = ContextOperand(context, instr->slot_index());
+
+  Label skip_assignment;
+
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    __ LoadP(scratch, target);
+    __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+    __ cmp(scratch, ip);
+    if (instr->hydrogen()->DeoptimizesOnHole()) {
+      DeoptimizeIf(eq, instr, "hole");
+    } else {
+      __ bne(&skip_assignment);
+    }
+  }
+
+  __ StoreP(value, target, r0);
+  if (instr->hydrogen()->NeedsWriteBarrier()) {
+    SmiCheck check_needed = instr->hydrogen()->value()->type().IsHeapObject()
+                                ? OMIT_SMI_CHECK
+                                : INLINE_SMI_CHECK;
+    __ RecordWriteContextSlot(context, target.offset(), value, scratch,
+                              GetLinkRegisterState(), kSaveFPRegs,
+                              EMIT_REMEMBERED_SET, check_needed);
+  }
+
+  __ bind(&skip_assignment);
+}
+
+
+void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
+  HObjectAccess access = instr->hydrogen()->access();
+  int offset = access.offset();
+  Register object = ToRegister(instr->object());
+
+  if (access.IsExternalMemory()) {
+    Register result = ToRegister(instr->result());
+    MemOperand operand = MemOperand(object, offset);
+    __ LoadRepresentation(result, operand, access.representation(), r0);
+    return;
+  }
+
+  if (instr->hydrogen()->representation().IsDouble()) {
+    DoubleRegister result = ToDoubleRegister(instr->result());
+    __ lfd(result, FieldMemOperand(object, offset));
+    return;
+  }
+
+  Register result = ToRegister(instr->result());
+  if (!access.IsInobject()) {
+    __ LoadP(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
+    object = result;
+  }
+
+  Representation representation = access.representation();
+
+#if V8_TARGET_ARCH_PPC64
+  // 64-bit Smi optimization
+  if (representation.IsSmi() &&
+      instr->hydrogen()->representation().IsInteger32()) {
+    // Read int value directly from upper half of the smi.
+    STATIC_ASSERT(kSmiTag == 0);
+    STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
+#if V8_TARGET_LITTLE_ENDIAN
+    offset += kPointerSize / 2;
+#endif
+    representation = Representation::Integer32();
+  }
+#endif
+
+  __ LoadRepresentation(result, FieldMemOperand(object, offset), representation,
+                        r0);
+}
+
+
+void LCodeGen::DoLoadNamedGeneric(LLoadNamedGeneric* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->result()).is(r3));
+
+  // Name is always in r5.
+  __ mov(LoadDescriptor::NameRegister(), Operand(instr->name()));
+  if (FLAG_vector_ics) {
+    EmitVectorLoadICRegisters<LLoadNamedGeneric>(instr);
+  }
+  Handle<Code> ic = CodeFactory::LoadIC(isolate(), NOT_CONTEXTUAL).code();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoLoadFunctionPrototype(LLoadFunctionPrototype* instr) {
+  Register scratch = scratch0();
+  Register function = ToRegister(instr->function());
+  Register result = ToRegister(instr->result());
+
+  // Get the prototype or initial map from the function.
+  __ LoadP(result,
+           FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+
+  // Check that the function has a prototype or an initial map.
+  __ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
+  __ cmp(result, ip);
+  DeoptimizeIf(eq, instr, "hole");
+
+  // If the function does not have an initial map, we're done.
+  Label done;
+  __ CompareObjectType(result, scratch, scratch, MAP_TYPE);
+  __ bne(&done);
+
+  // Get the prototype from the initial map.
+  __ LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset));
+
+  // All done.
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoLoadRoot(LLoadRoot* instr) {
+  Register result = ToRegister(instr->result());
+  __ LoadRoot(result, instr->index());
+}
+
+
+void LCodeGen::DoAccessArgumentsAt(LAccessArgumentsAt* instr) {
+  Register arguments = ToRegister(instr->arguments());
+  Register result = ToRegister(instr->result());
+  // There are two words between the frame pointer and the last argument.
+  // Subtracting from length accounts for one of them add one more.
+  if (instr->length()->IsConstantOperand()) {
+    int const_length = ToInteger32(LConstantOperand::cast(instr->length()));
+    if (instr->index()->IsConstantOperand()) {
+      int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+      int index = (const_length - const_index) + 1;
+      __ LoadP(result, MemOperand(arguments, index * kPointerSize), r0);
+    } else {
+      Register index = ToRegister(instr->index());
+      __ subfic(result, index, Operand(const_length + 1));
+      __ ShiftLeftImm(result, result, Operand(kPointerSizeLog2));
+      __ LoadPX(result, MemOperand(arguments, result));
+    }
+  } else if (instr->index()->IsConstantOperand()) {
+    Register length = ToRegister(instr->length());
+    int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+    int loc = const_index - 1;
+    if (loc != 0) {
+      __ subi(result, length, Operand(loc));
+      __ ShiftLeftImm(result, result, Operand(kPointerSizeLog2));
+      __ LoadPX(result, MemOperand(arguments, result));
+    } else {
+      __ ShiftLeftImm(result, length, Operand(kPointerSizeLog2));
+      __ LoadPX(result, MemOperand(arguments, result));
+    }
+  } else {
+    Register length = ToRegister(instr->length());
+    Register index = ToRegister(instr->index());
+    __ sub(result, length, index);
+    __ addi(result, result, Operand(1));
+    __ ShiftLeftImm(result, result, Operand(kPointerSizeLog2));
+    __ LoadPX(result, MemOperand(arguments, result));
+  }
+}
+
+
+void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
+  Register external_pointer = ToRegister(instr->elements());
+  Register key = no_reg;
+  ElementsKind elements_kind = instr->elements_kind();
+  bool key_is_constant = instr->key()->IsConstantOperand();
+  int constant_key = 0;
+  if (key_is_constant) {
+    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+    if (constant_key & 0xF0000000) {
+      Abort(kArrayIndexConstantValueTooBig);
+    }
+  } else {
+    key = ToRegister(instr->key());
+  }
+  int element_size_shift = ElementsKindToShiftSize(elements_kind);
+  bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
+  int base_offset = instr->base_offset();
+
+  if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
+      elements_kind == FLOAT32_ELEMENTS ||
+      elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
+      elements_kind == FLOAT64_ELEMENTS) {
+    DoubleRegister result = ToDoubleRegister(instr->result());
+    if (key_is_constant) {
+      __ Add(scratch0(), external_pointer, constant_key << element_size_shift,
+             r0);
+    } else {
+      __ IndexToArrayOffset(r0, key, element_size_shift, key_is_smi);
+      __ add(scratch0(), external_pointer, r0);
+    }
+    if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
+        elements_kind == FLOAT32_ELEMENTS) {
+      __ lfs(result, MemOperand(scratch0(), base_offset));
+    } else {  // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
+      __ lfd(result, MemOperand(scratch0(), base_offset));
+    }
+  } else {
+    Register result = ToRegister(instr->result());
+    MemOperand mem_operand =
+        PrepareKeyedOperand(key, external_pointer, key_is_constant, key_is_smi,
+                            constant_key, element_size_shift, base_offset);
+    switch (elements_kind) {
+      case EXTERNAL_INT8_ELEMENTS:
+      case INT8_ELEMENTS:
+        if (key_is_constant) {
+          __ LoadByte(result, mem_operand, r0);
+        } else {
+          __ lbzx(result, mem_operand);
+        }
+        __ extsb(result, result);
+        break;
+      case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
+      case EXTERNAL_UINT8_ELEMENTS:
+      case UINT8_ELEMENTS:
+      case UINT8_CLAMPED_ELEMENTS:
+        if (key_is_constant) {
+          __ LoadByte(result, mem_operand, r0);
+        } else {
+          __ lbzx(result, mem_operand);
+        }
+        break;
+      case EXTERNAL_INT16_ELEMENTS:
+      case INT16_ELEMENTS:
+        if (key_is_constant) {
+          __ LoadHalfWord(result, mem_operand, r0);
+        } else {
+          __ lhzx(result, mem_operand);
+        }
+        __ extsh(result, result);
+        break;
+      case EXTERNAL_UINT16_ELEMENTS:
+      case UINT16_ELEMENTS:
+        if (key_is_constant) {
+          __ LoadHalfWord(result, mem_operand, r0);
+        } else {
+          __ lhzx(result, mem_operand);
+        }
+        break;
+      case EXTERNAL_INT32_ELEMENTS:
+      case INT32_ELEMENTS:
+        if (key_is_constant) {
+          __ LoadWord(result, mem_operand, r0);
+        } else {
+          __ lwzx(result, mem_operand);
+        }
+#if V8_TARGET_ARCH_PPC64
+        __ extsw(result, result);
+#endif
+        break;
+      case EXTERNAL_UINT32_ELEMENTS:
+      case UINT32_ELEMENTS:
+        if (key_is_constant) {
+          __ LoadWord(result, mem_operand, r0);
+        } else {
+          __ lwzx(result, mem_operand);
+        }
+        if (!instr->hydrogen()->CheckFlag(HInstruction::kUint32)) {
+          __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
+          __ cmplw(result, r0);
+          DeoptimizeIf(ge, instr, "negative value");
+        }
+        break;
+      case FLOAT32_ELEMENTS:
+      case FLOAT64_ELEMENTS:
+      case EXTERNAL_FLOAT32_ELEMENTS:
+      case EXTERNAL_FLOAT64_ELEMENTS:
+      case FAST_HOLEY_DOUBLE_ELEMENTS:
+      case FAST_HOLEY_ELEMENTS:
+      case FAST_HOLEY_SMI_ELEMENTS:
+      case FAST_DOUBLE_ELEMENTS:
+      case FAST_ELEMENTS:
+      case FAST_SMI_ELEMENTS:
+      case DICTIONARY_ELEMENTS:
+      case SLOPPY_ARGUMENTS_ELEMENTS:
+        UNREACHABLE();
+        break;
+    }
+  }
+}
+
+
+void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
+  Register elements = ToRegister(instr->elements());
+  bool key_is_constant = instr->key()->IsConstantOperand();
+  Register key = no_reg;
+  DoubleRegister result = ToDoubleRegister(instr->result());
+  Register scratch = scratch0();
+
+  int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
+  bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
+  int constant_key = 0;
+  if (key_is_constant) {
+    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+    if (constant_key & 0xF0000000) {
+      Abort(kArrayIndexConstantValueTooBig);
+    }
+  } else {
+    key = ToRegister(instr->key());
+  }
+
+  int base_offset = instr->base_offset() + constant_key * kDoubleSize;
+  if (!key_is_constant) {
+    __ IndexToArrayOffset(r0, key, element_size_shift, key_is_smi);
+    __ add(scratch, elements, r0);
+    elements = scratch;
+  }
+  if (!is_int16(base_offset)) {
+    __ Add(scratch, elements, base_offset, r0);
+    base_offset = 0;
+    elements = scratch;
+  }
+  __ lfd(result, MemOperand(elements, base_offset));
+
+  if (instr->hydrogen()->RequiresHoleCheck()) {
+    if (is_int16(base_offset + Register::kExponentOffset)) {
+      __ lwz(scratch,
+             MemOperand(elements, base_offset + Register::kExponentOffset));
+    } else {
+      __ addi(scratch, elements, Operand(base_offset));
+      __ lwz(scratch, MemOperand(scratch, Register::kExponentOffset));
+    }
+    __ Cmpi(scratch, Operand(kHoleNanUpper32), r0);
+    DeoptimizeIf(eq, instr, "hole");
+  }
+}
+
+
+void LCodeGen::DoLoadKeyedFixedArray(LLoadKeyed* instr) {
+  HLoadKeyed* hinstr = instr->hydrogen();
+  Register elements = ToRegister(instr->elements());
+  Register result = ToRegister(instr->result());
+  Register scratch = scratch0();
+  Register store_base = scratch;
+  int offset = instr->base_offset();
+
+  if (instr->key()->IsConstantOperand()) {
+    LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
+    offset += ToInteger32(const_operand) * kPointerSize;
+    store_base = elements;
+  } else {
+    Register key = ToRegister(instr->key());
+    // Even though the HLoadKeyed instruction forces the input
+    // representation for the key to be an integer, the input gets replaced
+    // during bound check elimination with the index argument to the bounds
+    // check, which can be tagged, so that case must be handled here, too.
+    if (hinstr->key()->representation().IsSmi()) {
+      __ SmiToPtrArrayOffset(r0, key);
+    } else {
+      __ ShiftLeftImm(r0, key, Operand(kPointerSizeLog2));
+    }
+    __ add(scratch, elements, r0);
+  }
+
+  bool requires_hole_check = hinstr->RequiresHoleCheck();
+  Representation representation = hinstr->representation();
+
+#if V8_TARGET_ARCH_PPC64
+  // 64-bit Smi optimization
+  if (representation.IsInteger32() &&
+      hinstr->elements_kind() == FAST_SMI_ELEMENTS) {
+    DCHECK(!requires_hole_check);
+    // Read int value directly from upper half of the smi.
+    STATIC_ASSERT(kSmiTag == 0);
+    STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
+#if V8_TARGET_LITTLE_ENDIAN
+    offset += kPointerSize / 2;
+#endif
+  }
+#endif
+
+  __ LoadRepresentation(result, MemOperand(store_base, offset), representation,
+                        r0);
+
+  // Check for the hole value.
+  if (requires_hole_check) {
+    if (IsFastSmiElementsKind(hinstr->elements_kind())) {
+      __ TestIfSmi(result, r0);
+      DeoptimizeIf(ne, instr, "not a Smi", cr0);
+    } else {
+      __ LoadRoot(scratch, Heap::kTheHoleValueRootIndex);
+      __ cmp(result, scratch);
+      DeoptimizeIf(eq, instr, "hole");
+    }
+  }
+}
+
+
+void LCodeGen::DoLoadKeyed(LLoadKeyed* instr) {
+  if (instr->is_typed_elements()) {
+    DoLoadKeyedExternalArray(instr);
+  } else if (instr->hydrogen()->representation().IsDouble()) {
+    DoLoadKeyedFixedDoubleArray(instr);
+  } else {
+    DoLoadKeyedFixedArray(instr);
+  }
+}
+
+
+MemOperand LCodeGen::PrepareKeyedOperand(Register key, Register base,
+                                         bool key_is_constant, bool key_is_smi,
+                                         int constant_key,
+                                         int element_size_shift,
+                                         int base_offset) {
+  Register scratch = scratch0();
+
+  if (key_is_constant) {
+    return MemOperand(base, (constant_key << element_size_shift) + base_offset);
+  }
+
+  bool needs_shift =
+      (element_size_shift != (key_is_smi ? kSmiTagSize + kSmiShiftSize : 0));
+
+  if (!(base_offset || needs_shift)) {
+    return MemOperand(base, key);
+  }
+
+  if (needs_shift) {
+    __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi);
+    key = scratch;
+  }
+
+  if (base_offset) {
+    __ Add(scratch, key, base_offset, r0);
+  }
+
+  return MemOperand(base, scratch);
+}
+
+
+void LCodeGen::DoLoadKeyedGeneric(LLoadKeyedGeneric* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->object()).is(LoadDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->key()).is(LoadDescriptor::NameRegister()));
+
+  if (FLAG_vector_ics) {
+    EmitVectorLoadICRegisters<LLoadKeyedGeneric>(instr);
+  }
+
+  Handle<Code> ic = CodeFactory::KeyedLoadIC(isolate()).code();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoArgumentsElements(LArgumentsElements* instr) {
+  Register scratch = scratch0();
+  Register result = ToRegister(instr->result());
+
+  if (instr->hydrogen()->from_inlined()) {
+    __ subi(result, sp, Operand(2 * kPointerSize));
+  } else {
+    // Check if the calling frame is an arguments adaptor frame.
+    Label done, adapted;
+    __ LoadP(scratch, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+    __ LoadP(result,
+             MemOperand(scratch, StandardFrameConstants::kContextOffset));
+    __ CmpSmiLiteral(result, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+
+    // Result is the frame pointer for the frame if not adapted and for the real
+    // frame below the adaptor frame if adapted.
+    __ beq(&adapted);
+    __ mr(result, fp);
+    __ b(&done);
+
+    __ bind(&adapted);
+    __ mr(result, scratch);
+    __ bind(&done);
+  }
+}
+
+
+void LCodeGen::DoArgumentsLength(LArgumentsLength* instr) {
+  Register elem = ToRegister(instr->elements());
+  Register result = ToRegister(instr->result());
+
+  Label done;
+
+  // If no arguments adaptor frame the number of arguments is fixed.
+  __ cmp(fp, elem);
+  __ mov(result, Operand(scope()->num_parameters()));
+  __ beq(&done);
+
+  // Arguments adaptor frame present. Get argument length from there.
+  __ LoadP(result, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+  __ LoadP(result,
+           MemOperand(result, ArgumentsAdaptorFrameConstants::kLengthOffset));
+  __ SmiUntag(result);
+
+  // Argument length is in result register.
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoWrapReceiver(LWrapReceiver* instr) {
+  Register receiver = ToRegister(instr->receiver());
+  Register function = ToRegister(instr->function());
+  Register result = ToRegister(instr->result());
+  Register scratch = scratch0();
+
+  // If the receiver is null or undefined, we have to pass the global
+  // object as a receiver to normal functions. Values have to be
+  // passed unchanged to builtins and strict-mode functions.
+  Label global_object, result_in_receiver;
+
+  if (!instr->hydrogen()->known_function()) {
+    // Do not transform the receiver to object for strict mode
+    // functions.
+    __ LoadP(scratch,
+             FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+    __ lwz(scratch,
+           FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
+    __ TestBit(scratch,
+#if V8_TARGET_ARCH_PPC64
+               SharedFunctionInfo::kStrictModeFunction,
+#else
+               SharedFunctionInfo::kStrictModeFunction + kSmiTagSize,
+#endif
+               r0);
+    __ bne(&result_in_receiver, cr0);
+
+    // Do not transform the receiver to object for builtins.
+    __ TestBit(scratch,
+#if V8_TARGET_ARCH_PPC64
+               SharedFunctionInfo::kNative,
+#else
+               SharedFunctionInfo::kNative + kSmiTagSize,
+#endif
+               r0);
+    __ bne(&result_in_receiver, cr0);
+  }
+
+  // Normal function. Replace undefined or null with global receiver.
+  __ LoadRoot(scratch, Heap::kNullValueRootIndex);
+  __ cmp(receiver, scratch);
+  __ beq(&global_object);
+  __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
+  __ cmp(receiver, scratch);
+  __ beq(&global_object);
+
+  // Deoptimize if the receiver is not a JS object.
+  __ TestIfSmi(receiver, r0);
+  DeoptimizeIf(eq, instr, "Smi");
+  __ CompareObjectType(receiver, scratch, scratch, FIRST_SPEC_OBJECT_TYPE);
+  DeoptimizeIf(lt, instr, "not a JavaScript object");
+
+  __ b(&result_in_receiver);
+  __ bind(&global_object);
+  __ LoadP(result, FieldMemOperand(function, JSFunction::kContextOffset));
+  __ LoadP(result, ContextOperand(result, Context::GLOBAL_OBJECT_INDEX));
+  __ LoadP(result, FieldMemOperand(result, GlobalObject::kGlobalProxyOffset));
+  if (result.is(receiver)) {
+    __ bind(&result_in_receiver);
+  } else {
+    Label result_ok;
+    __ b(&result_ok);
+    __ bind(&result_in_receiver);
+    __ mr(result, receiver);
+    __ bind(&result_ok);
+  }
+}
+
+
+void LCodeGen::DoApplyArguments(LApplyArguments* instr) {
+  Register receiver = ToRegister(instr->receiver());
+  Register function = ToRegister(instr->function());
+  Register length = ToRegister(instr->length());
+  Register elements = ToRegister(instr->elements());
+  Register scratch = scratch0();
+  DCHECK(receiver.is(r3));  // Used for parameter count.
+  DCHECK(function.is(r4));  // Required by InvokeFunction.
+  DCHECK(ToRegister(instr->result()).is(r3));
+
+  // Copy the arguments to this function possibly from the
+  // adaptor frame below it.
+  const uint32_t kArgumentsLimit = 1 * KB;
+  __ cmpli(length, Operand(kArgumentsLimit));
+  DeoptimizeIf(gt, instr, "too many arguments");
+
+  // Push the receiver and use the register to keep the original
+  // number of arguments.
+  __ push(receiver);
+  __ mr(receiver, length);
+  // The arguments are at a one pointer size offset from elements.
+  __ addi(elements, elements, Operand(1 * kPointerSize));
+
+  // Loop through the arguments pushing them onto the execution
+  // stack.
+  Label invoke, loop;
+  // length is a small non-negative integer, due to the test above.
+  __ cmpi(length, Operand::Zero());
+  __ beq(&invoke);
+  __ mtctr(length);
+  __ bind(&loop);
+  __ ShiftLeftImm(r0, length, Operand(kPointerSizeLog2));
+  __ LoadPX(scratch, MemOperand(elements, r0));
+  __ push(scratch);
+  __ addi(length, length, Operand(-1));
+  __ bdnz(&loop);
+
+  __ bind(&invoke);
+  DCHECK(instr->HasPointerMap());
+  LPointerMap* pointers = instr->pointer_map();
+  SafepointGenerator safepoint_generator(this, pointers, Safepoint::kLazyDeopt);
+  // The number of arguments is stored in receiver which is r3, as expected
+  // by InvokeFunction.
+  ParameterCount actual(receiver);
+  __ InvokeFunction(function, actual, CALL_FUNCTION, safepoint_generator);
+}
+
+
+void LCodeGen::DoPushArgument(LPushArgument* instr) {
+  LOperand* argument = instr->value();
+  if (argument->IsDoubleRegister() || argument->IsDoubleStackSlot()) {
+    Abort(kDoPushArgumentNotImplementedForDoubleType);
+  } else {
+    Register argument_reg = EmitLoadRegister(argument, ip);
+    __ push(argument_reg);
+  }
+}
+
+
+void LCodeGen::DoDrop(LDrop* instr) { __ Drop(instr->count()); }
+
+
+void LCodeGen::DoThisFunction(LThisFunction* instr) {
+  Register result = ToRegister(instr->result());
+  __ LoadP(result, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
+}
+
+
+void LCodeGen::DoContext(LContext* instr) {
+  // If there is a non-return use, the context must be moved to a register.
+  Register result = ToRegister(instr->result());
+  if (info()->IsOptimizing()) {
+    __ LoadP(result, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  } else {
+    // If there is no frame, the context must be in cp.
+    DCHECK(result.is(cp));
+  }
+}
+
+
+void LCodeGen::DoDeclareGlobals(LDeclareGlobals* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  __ push(cp);  // The context is the first argument.
+  __ Move(scratch0(), instr->hydrogen()->pairs());
+  __ push(scratch0());
+  __ LoadSmiLiteral(scratch0(), Smi::FromInt(instr->hydrogen()->flags()));
+  __ push(scratch0());
+  CallRuntime(Runtime::kDeclareGlobals, 3, instr);
+}
+
+
+void LCodeGen::CallKnownFunction(Handle<JSFunction> function,
+                                 int formal_parameter_count, int arity,
+                                 LInstruction* instr, R4State r4_state) {
+  bool dont_adapt_arguments =
+      formal_parameter_count == SharedFunctionInfo::kDontAdaptArgumentsSentinel;
+  bool can_invoke_directly =
+      dont_adapt_arguments || formal_parameter_count == arity;
+
+  LPointerMap* pointers = instr->pointer_map();
+
+  if (can_invoke_directly) {
+    if (r4_state == R4_UNINITIALIZED) {
+      __ Move(r4, function);
+    }
+
+    // Change context.
+    __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
+
+    // Set r3 to arguments count if adaption is not needed. Assumes that r3
+    // is available to write to at this point.
+    if (dont_adapt_arguments) {
+      __ mov(r3, Operand(arity));
+    }
+
+    bool is_self_call = function.is_identical_to(info()->closure());
+
+    // Invoke function.
+    if (is_self_call) {
+      __ CallSelf();
+    } else {
+      __ LoadP(ip, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
+      __ CallJSEntry(ip);
+    }
+
+    // Set up deoptimization.
+    RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+  } else {
+    SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+    ParameterCount count(arity);
+    ParameterCount expected(formal_parameter_count);
+    __ InvokeFunction(function, expected, count, CALL_FUNCTION, generator);
+  }
+}
+
+
+void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
+  DCHECK(instr->context() != NULL);
+  DCHECK(ToRegister(instr->context()).is(cp));
+  Register input = ToRegister(instr->value());
+  Register result = ToRegister(instr->result());
+  Register scratch = scratch0();
+
+  // Deoptimize if not a heap number.
+  __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+  __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
+  __ cmp(scratch, ip);
+  DeoptimizeIf(ne, instr, "not a heap number");
+
+  Label done;
+  Register exponent = scratch0();
+  scratch = no_reg;
+  __ lwz(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
+  // Check the sign of the argument. If the argument is positive, just
+  // return it.
+  __ cmpwi(exponent, Operand::Zero());
+  // Move the input to the result if necessary.
+  __ Move(result, input);
+  __ bge(&done);
+
+  // Input is negative. Reverse its sign.
+  // Preserve the value of all registers.
+  {
+    PushSafepointRegistersScope scope(this);
+
+    // Registers were saved at the safepoint, so we can use
+    // many scratch registers.
+    Register tmp1 = input.is(r4) ? r3 : r4;
+    Register tmp2 = input.is(r5) ? r3 : r5;
+    Register tmp3 = input.is(r6) ? r3 : r6;
+    Register tmp4 = input.is(r7) ? r3 : r7;
+
+    // exponent: floating point exponent value.
+
+    Label allocated, slow;
+    __ LoadRoot(tmp4, Heap::kHeapNumberMapRootIndex);
+    __ AllocateHeapNumber(tmp1, tmp2, tmp3, tmp4, &slow);
+    __ b(&allocated);
+
+    // Slow case: Call the runtime system to do the number allocation.
+    __ bind(&slow);
+
+    CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr,
+                            instr->context());
+    // Set the pointer to the new heap number in tmp.
+    if (!tmp1.is(r3)) __ mr(tmp1, r3);
+    // Restore input_reg after call to runtime.
+    __ LoadFromSafepointRegisterSlot(input, input);
+    __ lwz(exponent, FieldMemOperand(input, HeapNumber::kExponentOffset));
+
+    __ bind(&allocated);
+    // exponent: floating point exponent value.
+    // tmp1: allocated heap number.
+    STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
+    __ clrlwi(exponent, exponent, Operand(1));  // clear sign bit
+    __ stw(exponent, FieldMemOperand(tmp1, HeapNumber::kExponentOffset));
+    __ lwz(tmp2, FieldMemOperand(input, HeapNumber::kMantissaOffset));
+    __ stw(tmp2, FieldMemOperand(tmp1, HeapNumber::kMantissaOffset));
+
+    __ StoreToSafepointRegisterSlot(tmp1, result);
+  }
+
+  __ bind(&done);
+}
+
+
+void LCodeGen::EmitMathAbs(LMathAbs* instr) {
+  Register input = ToRegister(instr->value());
+  Register result = ToRegister(instr->result());
+  Label done;
+  __ cmpi(input, Operand::Zero());
+  __ Move(result, input);
+  __ bge(&done);
+  __ li(r0, Operand::Zero());  // clear xer
+  __ mtxer(r0);
+  __ neg(result, result, SetOE, SetRC);
+  // Deoptimize on overflow.
+  DeoptimizeIf(overflow, instr, "overflow", cr0);
+  __ bind(&done);
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void LCodeGen::EmitInteger32MathAbs(LMathAbs* instr) {
+  Register input = ToRegister(instr->value());
+  Register result = ToRegister(instr->result());
+  Label done;
+  __ cmpwi(input, Operand::Zero());
+  __ Move(result, input);
+  __ bge(&done);
+
+  // Deoptimize on overflow.
+  __ lis(r0, Operand(SIGN_EXT_IMM16(0x8000)));
+  __ cmpw(input, r0);
+  DeoptimizeIf(eq, instr, "overflow");
+
+  __ neg(result, result);
+  __ bind(&done);
+}
+#endif
+
+
+void LCodeGen::DoMathAbs(LMathAbs* instr) {
+  // Class for deferred case.
+  class DeferredMathAbsTaggedHeapNumber FINAL : public LDeferredCode {
+   public:
+    DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
+        : LDeferredCode(codegen), instr_(instr) {}
+    void Generate() OVERRIDE {
+      codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
+    }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
+   private:
+    LMathAbs* instr_;
+  };
+
+  Representation r = instr->hydrogen()->value()->representation();
+  if (r.IsDouble()) {
+    DoubleRegister input = ToDoubleRegister(instr->value());
+    DoubleRegister result = ToDoubleRegister(instr->result());
+    __ fabs(result, input);
+#if V8_TARGET_ARCH_PPC64
+  } else if (r.IsInteger32()) {
+    EmitInteger32MathAbs(instr);
+  } else if (r.IsSmi()) {
+#else
+  } else if (r.IsSmiOrInteger32()) {
+#endif
+    EmitMathAbs(instr);
+  } else {
+    // Representation is tagged.
+    DeferredMathAbsTaggedHeapNumber* deferred =
+        new (zone()) DeferredMathAbsTaggedHeapNumber(this, instr);
+    Register input = ToRegister(instr->value());
+    // Smi check.
+    __ JumpIfNotSmi(input, deferred->entry());
+    // If smi, handle it directly.
+    EmitMathAbs(instr);
+    __ bind(deferred->exit());
+  }
+}
+
+
+void LCodeGen::DoMathFloor(LMathFloor* instr) {
+  DoubleRegister input = ToDoubleRegister(instr->value());
+  Register result = ToRegister(instr->result());
+  Register input_high = scratch0();
+  Register scratch = ip;
+  Label done, exact;
+
+  __ TryInt32Floor(result, input, input_high, scratch, double_scratch0(), &done,
+                   &exact);
+  DeoptimizeIf(al, instr, "lost precision or NaN");
+
+  __ bind(&exact);
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    // Test for -0.
+    __ cmpi(result, Operand::Zero());
+    __ bne(&done);
+    __ cmpwi(input_high, Operand::Zero());
+    DeoptimizeIf(lt, instr, "minus zero");
+  }
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoMathRound(LMathRound* instr) {
+  DoubleRegister input = ToDoubleRegister(instr->value());
+  Register result = ToRegister(instr->result());
+  DoubleRegister double_scratch1 = ToDoubleRegister(instr->temp());
+  DoubleRegister input_plus_dot_five = double_scratch1;
+  Register scratch1 = scratch0();
+  Register scratch2 = ip;
+  DoubleRegister dot_five = double_scratch0();
+  Label convert, done;
+
+  __ LoadDoubleLiteral(dot_five, 0.5, r0);
+  __ fabs(double_scratch1, input);
+  __ fcmpu(double_scratch1, dot_five);
+  DeoptimizeIf(unordered, instr, "lost precision or NaN");
+  // If input is in [-0.5, -0], the result is -0.
+  // If input is in [+0, +0.5[, the result is +0.
+  // If the input is +0.5, the result is 1.
+  __ bgt(&convert);  // Out of [-0.5, +0.5].
+  if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+#if V8_TARGET_ARCH_PPC64
+    __ MovDoubleToInt64(scratch1, input);
+#else
+    __ MovDoubleHighToInt(scratch1, input);
+#endif
+    __ cmpi(scratch1, Operand::Zero());
+    // [-0.5, -0].
+    DeoptimizeIf(lt, instr, "minus zero");
+  }
+  Label return_zero;
+  __ fcmpu(input, dot_five);
+  __ bne(&return_zero);
+  __ li(result, Operand(1));  // +0.5.
+  __ b(&done);
+  // Remaining cases: [+0, +0.5[ or [-0.5, +0.5[, depending on
+  // flag kBailoutOnMinusZero.
+  __ bind(&return_zero);
+  __ li(result, Operand::Zero());
+  __ b(&done);
+
+  __ bind(&convert);
+  __ fadd(input_plus_dot_five, input, dot_five);
+  // Reuse dot_five (double_scratch0) as we no longer need this value.
+  __ TryInt32Floor(result, input_plus_dot_five, scratch1, scratch2,
+                   double_scratch0(), &done, &done);
+  DeoptimizeIf(al, instr, "lost precision or NaN");
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoMathFround(LMathFround* instr) {
+  DoubleRegister input_reg = ToDoubleRegister(instr->value());
+  DoubleRegister output_reg = ToDoubleRegister(instr->result());
+  __ frsp(output_reg, input_reg);
+}
+
+
+void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
+  DoubleRegister input = ToDoubleRegister(instr->value());
+  DoubleRegister result = ToDoubleRegister(instr->result());
+  __ fsqrt(result, input);
+}
+
+
+void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
+  DoubleRegister input = ToDoubleRegister(instr->value());
+  DoubleRegister result = ToDoubleRegister(instr->result());
+  DoubleRegister temp = double_scratch0();
+
+  // Note that according to ECMA-262 15.8.2.13:
+  // Math.pow(-Infinity, 0.5) == Infinity
+  // Math.sqrt(-Infinity) == NaN
+  Label skip, done;
+
+  __ LoadDoubleLiteral(temp, -V8_INFINITY, scratch0());
+  __ fcmpu(input, temp);
+  __ bne(&skip);
+  __ fneg(result, temp);
+  __ b(&done);
+
+  // Add +0 to convert -0 to +0.
+  __ bind(&skip);
+  __ fadd(result, input, kDoubleRegZero);
+  __ fsqrt(result, result);
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoPower(LPower* instr) {
+  Representation exponent_type = instr->hydrogen()->right()->representation();
+// Having marked this as a call, we can use any registers.
+// Just make sure that the input/output registers are the expected ones.
+#ifdef DEBUG
+  Register tagged_exponent = MathPowTaggedDescriptor::exponent();
+#endif
+  DCHECK(!instr->right()->IsDoubleRegister() ||
+         ToDoubleRegister(instr->right()).is(d2));
+  DCHECK(!instr->right()->IsRegister() ||
+         ToRegister(instr->right()).is(tagged_exponent));
+  DCHECK(ToDoubleRegister(instr->left()).is(d1));
+  DCHECK(ToDoubleRegister(instr->result()).is(d3));
+
+  if (exponent_type.IsSmi()) {
+    MathPowStub stub(isolate(), MathPowStub::TAGGED);
+    __ CallStub(&stub);
+  } else if (exponent_type.IsTagged()) {
+    Label no_deopt;
+    __ JumpIfSmi(r5, &no_deopt);
+    __ LoadP(r10, FieldMemOperand(r5, HeapObject::kMapOffset));
+    __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
+    __ cmp(r10, ip);
+    DeoptimizeIf(ne, instr, "not a heap number");
+    __ bind(&no_deopt);
+    MathPowStub stub(isolate(), MathPowStub::TAGGED);
+    __ CallStub(&stub);
+  } else if (exponent_type.IsInteger32()) {
+    MathPowStub stub(isolate(), MathPowStub::INTEGER);
+    __ CallStub(&stub);
+  } else {
+    DCHECK(exponent_type.IsDouble());
+    MathPowStub stub(isolate(), MathPowStub::DOUBLE);
+    __ CallStub(&stub);
+  }
+}
+
+
+void LCodeGen::DoMathExp(LMathExp* instr) {
+  DoubleRegister input = ToDoubleRegister(instr->value());
+  DoubleRegister result = ToDoubleRegister(instr->result());
+  DoubleRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
+  DoubleRegister double_scratch2 = double_scratch0();
+  Register temp1 = ToRegister(instr->temp1());
+  Register temp2 = ToRegister(instr->temp2());
+
+  MathExpGenerator::EmitMathExp(masm(), input, result, double_scratch1,
+                                double_scratch2, temp1, temp2, scratch0());
+}
+
+
+void LCodeGen::DoMathLog(LMathLog* instr) {
+  __ PrepareCallCFunction(0, 1, scratch0());
+  __ MovToFloatParameter(ToDoubleRegister(instr->value()));
+  __ CallCFunction(ExternalReference::math_log_double_function(isolate()), 0,
+                   1);
+  __ MovFromFloatResult(ToDoubleRegister(instr->result()));
+}
+
+
+void LCodeGen::DoMathClz32(LMathClz32* instr) {
+  Register input = ToRegister(instr->value());
+  Register result = ToRegister(instr->result());
+  __ cntlzw_(result, input);
+}
+
+
+void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->function()).is(r4));
+  DCHECK(instr->HasPointerMap());
+
+  Handle<JSFunction> known_function = instr->hydrogen()->known_function();
+  if (known_function.is_null()) {
+    LPointerMap* pointers = instr->pointer_map();
+    SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+    ParameterCount count(instr->arity());
+    __ InvokeFunction(r4, count, CALL_FUNCTION, generator);
+  } else {
+    CallKnownFunction(known_function,
+                      instr->hydrogen()->formal_parameter_count(),
+                      instr->arity(), instr, R4_CONTAINS_TARGET);
+  }
+}
+
+
+void LCodeGen::DoTailCallThroughMegamorphicCache(
+    LTailCallThroughMegamorphicCache* instr) {
+  Register receiver = ToRegister(instr->receiver());
+  Register name = ToRegister(instr->name());
+  DCHECK(receiver.is(LoadDescriptor::ReceiverRegister()));
+  DCHECK(name.is(LoadDescriptor::NameRegister()));
+  DCHECK(receiver.is(r4));
+  DCHECK(name.is(r5));
+
+  Register scratch = r6;
+  Register extra = r7;
+  Register extra2 = r8;
+  Register extra3 = r9;
+
+  // Important for the tail-call.
+  bool must_teardown_frame = NeedsEagerFrame();
+
+  // The probe will tail call to a handler if found.
+  isolate()->stub_cache()->GenerateProbe(masm(), instr->hydrogen()->flags(),
+                                         must_teardown_frame, receiver, name,
+                                         scratch, extra, extra2, extra3);
+
+  // Tail call to miss if we ended up here.
+  if (must_teardown_frame) __ LeaveFrame(StackFrame::INTERNAL);
+  LoadIC::GenerateMiss(masm());
+}
+
+
+void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
+  DCHECK(ToRegister(instr->result()).is(r3));
+
+  LPointerMap* pointers = instr->pointer_map();
+  SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+
+  if (instr->target()->IsConstantOperand()) {
+    LConstantOperand* target = LConstantOperand::cast(instr->target());
+    Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+    generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
+    __ Call(code, RelocInfo::CODE_TARGET);
+  } else {
+    DCHECK(instr->target()->IsRegister());
+    Register target = ToRegister(instr->target());
+    generator.BeforeCall(__ CallSize(target));
+    __ addi(ip, target, Operand(Code::kHeaderSize - kHeapObjectTag));
+    __ CallJSEntry(ip);
+  }
+  generator.AfterCall();
+}
+
+
+void LCodeGen::DoCallJSFunction(LCallJSFunction* instr) {
+  DCHECK(ToRegister(instr->function()).is(r4));
+  DCHECK(ToRegister(instr->result()).is(r3));
+
+  if (instr->hydrogen()->pass_argument_count()) {
+    __ mov(r3, Operand(instr->arity()));
+  }
+
+  // Change context.
+  __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
+
+  bool is_self_call = false;
+  if (instr->hydrogen()->function()->IsConstant()) {
+    HConstant* fun_const = HConstant::cast(instr->hydrogen()->function());
+    Handle<JSFunction> jsfun =
+        Handle<JSFunction>::cast(fun_const->handle(isolate()));
+    is_self_call = jsfun.is_identical_to(info()->closure());
+  }
+
+  if (is_self_call) {
+    __ CallSelf();
+  } else {
+    __ LoadP(ip, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
+    __ CallJSEntry(ip);
+  }
+
+  RecordSafepointWithLazyDeopt(instr, RECORD_SIMPLE_SAFEPOINT);
+}
+
+
+void LCodeGen::DoCallFunction(LCallFunction* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->function()).is(r4));
+  DCHECK(ToRegister(instr->result()).is(r3));
+
+  int arity = instr->arity();
+  CallFunctionStub stub(isolate(), arity, instr->hydrogen()->function_flags());
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoCallNew(LCallNew* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->constructor()).is(r4));
+  DCHECK(ToRegister(instr->result()).is(r3));
+
+  __ mov(r3, Operand(instr->arity()));
+  // No cell in r5 for construct type feedback in optimized code
+  __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+  CallConstructStub stub(isolate(), NO_CALL_CONSTRUCTOR_FLAGS);
+  CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+}
+
+
+void LCodeGen::DoCallNewArray(LCallNewArray* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->constructor()).is(r4));
+  DCHECK(ToRegister(instr->result()).is(r3));
+
+  __ mov(r3, Operand(instr->arity()));
+  __ LoadRoot(r5, Heap::kUndefinedValueRootIndex);
+  ElementsKind kind = instr->hydrogen()->elements_kind();
+  AllocationSiteOverrideMode override_mode =
+      (AllocationSite::GetMode(kind) == TRACK_ALLOCATION_SITE)
+          ? DISABLE_ALLOCATION_SITES
+          : DONT_OVERRIDE;
+
+  if (instr->arity() == 0) {
+    ArrayNoArgumentConstructorStub stub(isolate(), kind, override_mode);
+    CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+  } else if (instr->arity() == 1) {
+    Label done;
+    if (IsFastPackedElementsKind(kind)) {
+      Label packed_case;
+      // We might need a change here
+      // look at the first argument
+      __ LoadP(r8, MemOperand(sp, 0));
+      __ cmpi(r8, Operand::Zero());
+      __ beq(&packed_case);
+
+      ElementsKind holey_kind = GetHoleyElementsKind(kind);
+      ArraySingleArgumentConstructorStub stub(isolate(), holey_kind,
+                                              override_mode);
+      CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+      __ b(&done);
+      __ bind(&packed_case);
+    }
+
+    ArraySingleArgumentConstructorStub stub(isolate(), kind, override_mode);
+    CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+    __ bind(&done);
+  } else {
+    ArrayNArgumentsConstructorStub stub(isolate(), kind, override_mode);
+    CallCode(stub.GetCode(), RelocInfo::CONSTRUCT_CALL, instr);
+  }
+}
+
+
+void LCodeGen::DoCallRuntime(LCallRuntime* instr) {
+  CallRuntime(instr->function(), instr->arity(), instr);
+}
+
+
+void LCodeGen::DoStoreCodeEntry(LStoreCodeEntry* instr) {
+  Register function = ToRegister(instr->function());
+  Register code_object = ToRegister(instr->code_object());
+  __ addi(code_object, code_object,
+          Operand(Code::kHeaderSize - kHeapObjectTag));
+  __ StoreP(code_object,
+            FieldMemOperand(function, JSFunction::kCodeEntryOffset), r0);
+}
+
+
+void LCodeGen::DoInnerAllocatedObject(LInnerAllocatedObject* instr) {
+  Register result = ToRegister(instr->result());
+  Register base = ToRegister(instr->base_object());
+  if (instr->offset()->IsConstantOperand()) {
+    LConstantOperand* offset = LConstantOperand::cast(instr->offset());
+    __ Add(result, base, ToInteger32(offset), r0);
+  } else {
+    Register offset = ToRegister(instr->offset());
+    __ add(result, base, offset);
+  }
+}
+
+
+void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
+  HStoreNamedField* hinstr = instr->hydrogen();
+  Representation representation = instr->representation();
+
+  Register object = ToRegister(instr->object());
+  Register scratch = scratch0();
+  HObjectAccess access = hinstr->access();
+  int offset = access.offset();
+
+  if (access.IsExternalMemory()) {
+    Register value = ToRegister(instr->value());
+    MemOperand operand = MemOperand(object, offset);
+    __ StoreRepresentation(value, operand, representation, r0);
+    return;
+  }
+
+  __ AssertNotSmi(object);
+
+#if V8_TARGET_ARCH_PPC64
+  DCHECK(!representation.IsSmi() || !instr->value()->IsConstantOperand() ||
+         IsInteger32(LConstantOperand::cast(instr->value())));
+#else
+  DCHECK(!representation.IsSmi() || !instr->value()->IsConstantOperand() ||
+         IsSmi(LConstantOperand::cast(instr->value())));
+#endif
+  if (representation.IsDouble()) {
+    DCHECK(access.IsInobject());
+    DCHECK(!hinstr->has_transition());
+    DCHECK(!hinstr->NeedsWriteBarrier());
+    DoubleRegister value = ToDoubleRegister(instr->value());
+    __ stfd(value, FieldMemOperand(object, offset));
+    return;
+  }
+
+  if (hinstr->has_transition()) {
+    Handle<Map> transition = hinstr->transition_map();
+    AddDeprecationDependency(transition);
+    __ mov(scratch, Operand(transition));
+    __ StoreP(scratch, FieldMemOperand(object, HeapObject::kMapOffset), r0);
+    if (hinstr->NeedsWriteBarrierForMap()) {
+      Register temp = ToRegister(instr->temp());
+      // Update the write barrier for the map field.
+      __ RecordWriteForMap(object, scratch, temp, GetLinkRegisterState(),
+                           kSaveFPRegs);
+    }
+  }
+
+  // Do the store.
+  Register value = ToRegister(instr->value());
+
+#if V8_TARGET_ARCH_PPC64
+  // 64-bit Smi optimization
+  if (representation.IsSmi() &&
+      hinstr->value()->representation().IsInteger32()) {
+    DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
+    // Store int value directly to upper half of the smi.
+    STATIC_ASSERT(kSmiTag == 0);
+    STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
+#if V8_TARGET_LITTLE_ENDIAN
+    offset += kPointerSize / 2;
+#endif
+    representation = Representation::Integer32();
+  }
+#endif
+
+  if (access.IsInobject()) {
+    MemOperand operand = FieldMemOperand(object, offset);
+    __ StoreRepresentation(value, operand, representation, r0);
+    if (hinstr->NeedsWriteBarrier()) {
+      // Update the write barrier for the object for in-object properties.
+      __ RecordWriteField(
+          object, offset, value, scratch, GetLinkRegisterState(), kSaveFPRegs,
+          EMIT_REMEMBERED_SET, hinstr->SmiCheckForWriteBarrier(),
+          hinstr->PointersToHereCheckForValue());
+    }
+  } else {
+    __ LoadP(scratch, FieldMemOperand(object, JSObject::kPropertiesOffset));
+    MemOperand operand = FieldMemOperand(scratch, offset);
+    __ StoreRepresentation(value, operand, representation, r0);
+    if (hinstr->NeedsWriteBarrier()) {
+      // Update the write barrier for the properties array.
+      // object is used as a scratch register.
+      __ RecordWriteField(
+          scratch, offset, value, object, GetLinkRegisterState(), kSaveFPRegs,
+          EMIT_REMEMBERED_SET, hinstr->SmiCheckForWriteBarrier(),
+          hinstr->PointersToHereCheckForValue());
+    }
+  }
+}
+
+
+void LCodeGen::DoStoreNamedGeneric(LStoreNamedGeneric* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
+
+  __ mov(StoreDescriptor::NameRegister(), Operand(instr->name()));
+  Handle<Code> ic = StoreIC::initialize_stub(isolate(), instr->strict_mode());
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
+  Representation representation = instr->hydrogen()->length()->representation();
+  DCHECK(representation.Equals(instr->hydrogen()->index()->representation()));
+  DCHECK(representation.IsSmiOrInteger32());
+
+  Condition cc = instr->hydrogen()->allow_equality() ? lt : le;
+  if (instr->length()->IsConstantOperand()) {
+    int32_t length = ToInteger32(LConstantOperand::cast(instr->length()));
+    Register index = ToRegister(instr->index());
+    if (representation.IsSmi()) {
+      __ Cmpli(index, Operand(Smi::FromInt(length)), r0);
+    } else {
+      __ Cmplwi(index, Operand(length), r0);
+    }
+    cc = CommuteCondition(cc);
+  } else if (instr->index()->IsConstantOperand()) {
+    int32_t index = ToInteger32(LConstantOperand::cast(instr->index()));
+    Register length = ToRegister(instr->length());
+    if (representation.IsSmi()) {
+      __ Cmpli(length, Operand(Smi::FromInt(index)), r0);
+    } else {
+      __ Cmplwi(length, Operand(index), r0);
+    }
+  } else {
+    Register index = ToRegister(instr->index());
+    Register length = ToRegister(instr->length());
+    if (representation.IsSmi()) {
+      __ cmpl(length, index);
+    } else {
+      __ cmplw(length, index);
+    }
+  }
+  if (FLAG_debug_code && instr->hydrogen()->skip_check()) {
+    Label done;
+    __ b(NegateCondition(cc), &done);
+    __ stop("eliminated bounds check failed");
+    __ bind(&done);
+  } else {
+    DeoptimizeIf(cc, instr, "out of bounds");
+  }
+}
+
+
+void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
+  Register external_pointer = ToRegister(instr->elements());
+  Register key = no_reg;
+  ElementsKind elements_kind = instr->elements_kind();
+  bool key_is_constant = instr->key()->IsConstantOperand();
+  int constant_key = 0;
+  if (key_is_constant) {
+    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+    if (constant_key & 0xF0000000) {
+      Abort(kArrayIndexConstantValueTooBig);
+    }
+  } else {
+    key = ToRegister(instr->key());
+  }
+  int element_size_shift = ElementsKindToShiftSize(elements_kind);
+  bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
+  int base_offset = instr->base_offset();
+
+  if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
+      elements_kind == FLOAT32_ELEMENTS ||
+      elements_kind == EXTERNAL_FLOAT64_ELEMENTS ||
+      elements_kind == FLOAT64_ELEMENTS) {
+    Register address = scratch0();
+    DoubleRegister value(ToDoubleRegister(instr->value()));
+    if (key_is_constant) {
+      if (constant_key != 0) {
+        __ Add(address, external_pointer, constant_key << element_size_shift,
+               r0);
+      } else {
+        address = external_pointer;
+      }
+    } else {
+      __ IndexToArrayOffset(r0, key, element_size_shift, key_is_smi);
+      __ add(address, external_pointer, r0);
+    }
+    if (elements_kind == EXTERNAL_FLOAT32_ELEMENTS ||
+        elements_kind == FLOAT32_ELEMENTS) {
+      __ frsp(double_scratch0(), value);
+      __ stfs(double_scratch0(), MemOperand(address, base_offset));
+    } else {  // Storing doubles, not floats.
+      __ stfd(value, MemOperand(address, base_offset));
+    }
+  } else {
+    Register value(ToRegister(instr->value()));
+    MemOperand mem_operand =
+        PrepareKeyedOperand(key, external_pointer, key_is_constant, key_is_smi,
+                            constant_key, element_size_shift, base_offset);
+    switch (elements_kind) {
+      case EXTERNAL_UINT8_CLAMPED_ELEMENTS:
+      case EXTERNAL_INT8_ELEMENTS:
+      case EXTERNAL_UINT8_ELEMENTS:
+      case UINT8_ELEMENTS:
+      case UINT8_CLAMPED_ELEMENTS:
+      case INT8_ELEMENTS:
+        if (key_is_constant) {
+          __ StoreByte(value, mem_operand, r0);
+        } else {
+          __ stbx(value, mem_operand);
+        }
+        break;
+      case EXTERNAL_INT16_ELEMENTS:
+      case EXTERNAL_UINT16_ELEMENTS:
+      case INT16_ELEMENTS:
+      case UINT16_ELEMENTS:
+        if (key_is_constant) {
+          __ StoreHalfWord(value, mem_operand, r0);
+        } else {
+          __ sthx(value, mem_operand);
+        }
+        break;
+      case EXTERNAL_INT32_ELEMENTS:
+      case EXTERNAL_UINT32_ELEMENTS:
+      case INT32_ELEMENTS:
+      case UINT32_ELEMENTS:
+        if (key_is_constant) {
+          __ StoreWord(value, mem_operand, r0);
+        } else {
+          __ stwx(value, mem_operand);
+        }
+        break;
+      case FLOAT32_ELEMENTS:
+      case FLOAT64_ELEMENTS:
+      case EXTERNAL_FLOAT32_ELEMENTS:
+      case EXTERNAL_FLOAT64_ELEMENTS:
+      case FAST_DOUBLE_ELEMENTS:
+      case FAST_ELEMENTS:
+      case FAST_SMI_ELEMENTS:
+      case FAST_HOLEY_DOUBLE_ELEMENTS:
+      case FAST_HOLEY_ELEMENTS:
+      case FAST_HOLEY_SMI_ELEMENTS:
+      case DICTIONARY_ELEMENTS:
+      case SLOPPY_ARGUMENTS_ELEMENTS:
+        UNREACHABLE();
+        break;
+    }
+  }
+}
+
+
+void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
+  DoubleRegister value = ToDoubleRegister(instr->value());
+  Register elements = ToRegister(instr->elements());
+  Register key = no_reg;
+  Register scratch = scratch0();
+  DoubleRegister double_scratch = double_scratch0();
+  bool key_is_constant = instr->key()->IsConstantOperand();
+  int constant_key = 0;
+
+  // Calculate the effective address of the slot in the array to store the
+  // double value.
+  if (key_is_constant) {
+    constant_key = ToInteger32(LConstantOperand::cast(instr->key()));
+    if (constant_key & 0xF0000000) {
+      Abort(kArrayIndexConstantValueTooBig);
+    }
+  } else {
+    key = ToRegister(instr->key());
+  }
+  int element_size_shift = ElementsKindToShiftSize(FAST_DOUBLE_ELEMENTS);
+  bool key_is_smi = instr->hydrogen()->key()->representation().IsSmi();
+  int base_offset = instr->base_offset() + constant_key * kDoubleSize;
+  if (!key_is_constant) {
+    __ IndexToArrayOffset(scratch, key, element_size_shift, key_is_smi);
+    __ add(scratch, elements, scratch);
+    elements = scratch;
+  }
+  if (!is_int16(base_offset)) {
+    __ Add(scratch, elements, base_offset, r0);
+    base_offset = 0;
+    elements = scratch;
+  }
+
+  if (instr->NeedsCanonicalization()) {
+    // Force a canonical NaN.
+    __ CanonicalizeNaN(double_scratch, value);
+    __ stfd(double_scratch, MemOperand(elements, base_offset));
+  } else {
+    __ stfd(value, MemOperand(elements, base_offset));
+  }
+}
+
+
+void LCodeGen::DoStoreKeyedFixedArray(LStoreKeyed* instr) {
+  HStoreKeyed* hinstr = instr->hydrogen();
+  Register value = ToRegister(instr->value());
+  Register elements = ToRegister(instr->elements());
+  Register key = instr->key()->IsRegister() ? ToRegister(instr->key()) : no_reg;
+  Register scratch = scratch0();
+  Register store_base = scratch;
+  int offset = instr->base_offset();
+
+  // Do the store.
+  if (instr->key()->IsConstantOperand()) {
+    DCHECK(!hinstr->NeedsWriteBarrier());
+    LConstantOperand* const_operand = LConstantOperand::cast(instr->key());
+    offset += ToInteger32(const_operand) * kPointerSize;
+    store_base = elements;
+  } else {
+    // Even though the HLoadKeyed instruction forces the input
+    // representation for the key to be an integer, the input gets replaced
+    // during bound check elimination with the index argument to the bounds
+    // check, which can be tagged, so that case must be handled here, too.
+    if (hinstr->key()->representation().IsSmi()) {
+      __ SmiToPtrArrayOffset(scratch, key);
+    } else {
+      __ ShiftLeftImm(scratch, key, Operand(kPointerSizeLog2));
+    }
+    __ add(scratch, elements, scratch);
+  }
+
+  Representation representation = hinstr->value()->representation();
+
+#if V8_TARGET_ARCH_PPC64
+  // 64-bit Smi optimization
+  if (representation.IsInteger32()) {
+    DCHECK(hinstr->store_mode() == STORE_TO_INITIALIZED_ENTRY);
+    DCHECK(hinstr->elements_kind() == FAST_SMI_ELEMENTS);
+    // Store int value directly to upper half of the smi.
+    STATIC_ASSERT(kSmiTag == 0);
+    STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 32);
+#if V8_TARGET_LITTLE_ENDIAN
+    offset += kPointerSize / 2;
+#endif
+  }
+#endif
+
+  __ StoreRepresentation(value, MemOperand(store_base, offset), representation,
+                         r0);
+
+  if (hinstr->NeedsWriteBarrier()) {
+    SmiCheck check_needed = hinstr->value()->type().IsHeapObject()
+                                ? OMIT_SMI_CHECK
+                                : INLINE_SMI_CHECK;
+    // Compute address of modified element and store it into key register.
+    __ Add(key, store_base, offset, r0);
+    __ RecordWrite(elements, key, value, GetLinkRegisterState(), kSaveFPRegs,
+                   EMIT_REMEMBERED_SET, check_needed,
+                   hinstr->PointersToHereCheckForValue());
+  }
+}
+
+
+void LCodeGen::DoStoreKeyed(LStoreKeyed* instr) {
+  // By cases: external, fast double
+  if (instr->is_typed_elements()) {
+    DoStoreKeyedExternalArray(instr);
+  } else if (instr->hydrogen()->value()->representation().IsDouble()) {
+    DoStoreKeyedFixedDoubleArray(instr);
+  } else {
+    DoStoreKeyedFixedArray(instr);
+  }
+}
+
+
+void LCodeGen::DoStoreKeyedGeneric(LStoreKeyedGeneric* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->object()).is(StoreDescriptor::ReceiverRegister()));
+  DCHECK(ToRegister(instr->key()).is(StoreDescriptor::NameRegister()));
+  DCHECK(ToRegister(instr->value()).is(StoreDescriptor::ValueRegister()));
+
+  Handle<Code> ic =
+      CodeFactory::KeyedStoreIC(isolate(), instr->strict_mode()).code();
+  CallCode(ic, RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoTransitionElementsKind(LTransitionElementsKind* instr) {
+  Register object_reg = ToRegister(instr->object());
+  Register scratch = scratch0();
+
+  Handle<Map> from_map = instr->original_map();
+  Handle<Map> to_map = instr->transitioned_map();
+  ElementsKind from_kind = instr->from_kind();
+  ElementsKind to_kind = instr->to_kind();
+
+  Label not_applicable;
+  __ LoadP(scratch, FieldMemOperand(object_reg, HeapObject::kMapOffset));
+  __ Cmpi(scratch, Operand(from_map), r0);
+  __ bne(&not_applicable);
+
+  if (IsSimpleMapChangeTransition(from_kind, to_kind)) {
+    Register new_map_reg = ToRegister(instr->new_map_temp());
+    __ mov(new_map_reg, Operand(to_map));
+    __ StoreP(new_map_reg, FieldMemOperand(object_reg, HeapObject::kMapOffset),
+              r0);
+    // Write barrier.
+    __ RecordWriteForMap(object_reg, new_map_reg, scratch,
+                         GetLinkRegisterState(), kDontSaveFPRegs);
+  } else {
+    DCHECK(ToRegister(instr->context()).is(cp));
+    DCHECK(object_reg.is(r3));
+    PushSafepointRegistersScope scope(this);
+    __ Move(r4, to_map);
+    bool is_js_array = from_map->instance_type() == JS_ARRAY_TYPE;
+    TransitionElementsKindStub stub(isolate(), from_kind, to_kind, is_js_array);
+    __ CallStub(&stub);
+    RecordSafepointWithRegisters(instr->pointer_map(), 0,
+                                 Safepoint::kLazyDeopt);
+  }
+  __ bind(&not_applicable);
+}
+
+
+void LCodeGen::DoTrapAllocationMemento(LTrapAllocationMemento* instr) {
+  Register object = ToRegister(instr->object());
+  Register temp = ToRegister(instr->temp());
+  Label no_memento_found;
+  __ TestJSArrayForAllocationMemento(object, temp, &no_memento_found);
+  DeoptimizeIf(eq, instr, "memento found");
+  __ bind(&no_memento_found);
+}
+
+
+void LCodeGen::DoStringAdd(LStringAdd* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  DCHECK(ToRegister(instr->left()).is(r4));
+  DCHECK(ToRegister(instr->right()).is(r3));
+  StringAddStub stub(isolate(), instr->hydrogen()->flags(),
+                     instr->hydrogen()->pretenure_flag());
+  CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+}
+
+
+void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
+  class DeferredStringCharCodeAt FINAL : public LDeferredCode {
+   public:
+    DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
+        : LDeferredCode(codegen), instr_(instr) {}
+    void Generate() OVERRIDE { codegen()->DoDeferredStringCharCodeAt(instr_); }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
+   private:
+    LStringCharCodeAt* instr_;
+  };
+
+  DeferredStringCharCodeAt* deferred =
+      new (zone()) DeferredStringCharCodeAt(this, instr);
+
+  StringCharLoadGenerator::Generate(
+      masm(), ToRegister(instr->string()), ToRegister(instr->index()),
+      ToRegister(instr->result()), deferred->entry());
+  __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredStringCharCodeAt(LStringCharCodeAt* instr) {
+  Register string = ToRegister(instr->string());
+  Register result = ToRegister(instr->result());
+  Register scratch = scratch0();
+
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  __ li(result, Operand::Zero());
+
+  PushSafepointRegistersScope scope(this);
+  __ push(string);
+  // Push the index as a smi. This is safe because of the checks in
+  // DoStringCharCodeAt above.
+  if (instr->index()->IsConstantOperand()) {
+    int const_index = ToInteger32(LConstantOperand::cast(instr->index()));
+    __ LoadSmiLiteral(scratch, Smi::FromInt(const_index));
+    __ push(scratch);
+  } else {
+    Register index = ToRegister(instr->index());
+    __ SmiTag(index);
+    __ push(index);
+  }
+  CallRuntimeFromDeferred(Runtime::kStringCharCodeAtRT, 2, instr,
+                          instr->context());
+  __ AssertSmi(r3);
+  __ SmiUntag(r3);
+  __ StoreToSafepointRegisterSlot(r3, result);
+}
+
+
+void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
+  class DeferredStringCharFromCode FINAL : public LDeferredCode {
+   public:
+    DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
+        : LDeferredCode(codegen), instr_(instr) {}
+    void Generate() OVERRIDE {
+      codegen()->DoDeferredStringCharFromCode(instr_);
+    }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
+   private:
+    LStringCharFromCode* instr_;
+  };
+
+  DeferredStringCharFromCode* deferred =
+      new (zone()) DeferredStringCharFromCode(this, instr);
+
+  DCHECK(instr->hydrogen()->value()->representation().IsInteger32());
+  Register char_code = ToRegister(instr->char_code());
+  Register result = ToRegister(instr->result());
+  DCHECK(!char_code.is(result));
+
+  __ cmpli(char_code, Operand(String::kMaxOneByteCharCode));
+  __ bgt(deferred->entry());
+  __ LoadRoot(result, Heap::kSingleCharacterStringCacheRootIndex);
+  __ ShiftLeftImm(r0, char_code, Operand(kPointerSizeLog2));
+  __ add(result, result, r0);
+  __ LoadP(result, FieldMemOperand(result, FixedArray::kHeaderSize));
+  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+  __ cmp(result, ip);
+  __ beq(deferred->entry());
+  __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredStringCharFromCode(LStringCharFromCode* instr) {
+  Register char_code = ToRegister(instr->char_code());
+  Register result = ToRegister(instr->result());
+
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  __ li(result, Operand::Zero());
+
+  PushSafepointRegistersScope scope(this);
+  __ SmiTag(char_code);
+  __ push(char_code);
+  CallRuntimeFromDeferred(Runtime::kCharFromCode, 1, instr, instr->context());
+  __ StoreToSafepointRegisterSlot(r3, result);
+}
+
+
+void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
+  LOperand* input = instr->value();
+  DCHECK(input->IsRegister() || input->IsStackSlot());
+  LOperand* output = instr->result();
+  DCHECK(output->IsDoubleRegister());
+  if (input->IsStackSlot()) {
+    Register scratch = scratch0();
+    __ LoadP(scratch, ToMemOperand(input));
+    __ ConvertIntToDouble(scratch, ToDoubleRegister(output));
+  } else {
+    __ ConvertIntToDouble(ToRegister(input), ToDoubleRegister(output));
+  }
+}
+
+
+void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
+  LOperand* input = instr->value();
+  LOperand* output = instr->result();
+  __ ConvertUnsignedIntToDouble(ToRegister(input), ToDoubleRegister(output));
+}
+
+
+void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
+  class DeferredNumberTagI FINAL : public LDeferredCode {
+   public:
+    DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
+        : LDeferredCode(codegen), instr_(instr) {}
+    void Generate() OVERRIDE {
+      codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
+                                       instr_->temp2(), SIGNED_INT32);
+    }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
+   private:
+    LNumberTagI* instr_;
+  };
+
+  Register src = ToRegister(instr->value());
+  Register dst = ToRegister(instr->result());
+
+  DeferredNumberTagI* deferred = new (zone()) DeferredNumberTagI(this, instr);
+#if V8_TARGET_ARCH_PPC64
+  __ SmiTag(dst, src);
+#else
+  __ SmiTagCheckOverflow(dst, src, r0);
+  __ BranchOnOverflow(deferred->entry());
+#endif
+  __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
+  class DeferredNumberTagU FINAL : public LDeferredCode {
+   public:
+    DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
+        : LDeferredCode(codegen), instr_(instr) {}
+    void Generate() OVERRIDE {
+      codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
+                                       instr_->temp2(), UNSIGNED_INT32);
+    }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
+   private:
+    LNumberTagU* instr_;
+  };
+
+  Register input = ToRegister(instr->value());
+  Register result = ToRegister(instr->result());
+
+  DeferredNumberTagU* deferred = new (zone()) DeferredNumberTagU(this, instr);
+  __ Cmpli(input, Operand(Smi::kMaxValue), r0);
+  __ bgt(deferred->entry());
+  __ SmiTag(result, input);
+  __ bind(deferred->exit());
+}
+
+
+void LCodeGen::DoDeferredNumberTagIU(LInstruction* instr, LOperand* value,
+                                     LOperand* temp1, LOperand* temp2,
+                                     IntegerSignedness signedness) {
+  Label done, slow;
+  Register src = ToRegister(value);
+  Register dst = ToRegister(instr->result());
+  Register tmp1 = scratch0();
+  Register tmp2 = ToRegister(temp1);
+  Register tmp3 = ToRegister(temp2);
+  DoubleRegister dbl_scratch = double_scratch0();
+
+  if (signedness == SIGNED_INT32) {
+    // There was overflow, so bits 30 and 31 of the original integer
+    // disagree. Try to allocate a heap number in new space and store
+    // the value in there. If that fails, call the runtime system.
+    if (dst.is(src)) {
+      __ SmiUntag(src, dst);
+      __ xoris(src, src, Operand(HeapNumber::kSignMask >> 16));
+    }
+    __ ConvertIntToDouble(src, dbl_scratch);
+  } else {
+    __ ConvertUnsignedIntToDouble(src, dbl_scratch);
+  }
+
+  if (FLAG_inline_new) {
+    __ LoadRoot(tmp3, Heap::kHeapNumberMapRootIndex);
+    __ AllocateHeapNumber(dst, tmp1, tmp2, tmp3, &slow);
+    __ b(&done);
+  }
+
+  // Slow case: Call the runtime system to do the number allocation.
+  __ bind(&slow);
+  {
+    // TODO(3095996): Put a valid pointer value in the stack slot where the
+    // result register is stored, as this register is in the pointer map, but
+    // contains an integer value.
+    __ li(dst, Operand::Zero());
+
+    // Preserve the value of all registers.
+    PushSafepointRegistersScope scope(this);
+
+    // NumberTagI and NumberTagD use the context from the frame, rather than
+    // the environment's HContext or HInlinedContext value.
+    // They only call Runtime::kAllocateHeapNumber.
+    // The corresponding HChange instructions are added in a phase that does
+    // not have easy access to the local context.
+    __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+    __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+    RecordSafepointWithRegisters(instr->pointer_map(), 0,
+                                 Safepoint::kNoLazyDeopt);
+    __ StoreToSafepointRegisterSlot(r3, dst);
+  }
+
+  // Done. Put the value in dbl_scratch into the value of the allocated heap
+  // number.
+  __ bind(&done);
+  __ stfd(dbl_scratch, FieldMemOperand(dst, HeapNumber::kValueOffset));
+}
+
+
+void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
+  class DeferredNumberTagD FINAL : public LDeferredCode {
+   public:
+    DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
+        : LDeferredCode(codegen), instr_(instr) {}
+    void Generate() OVERRIDE { codegen()->DoDeferredNumberTagD(instr_); }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
+   private:
+    LNumberTagD* instr_;
+  };
+
+  DoubleRegister input_reg = ToDoubleRegister(instr->value());
+  Register scratch = scratch0();
+  Register reg = ToRegister(instr->result());
+  Register temp1 = ToRegister(instr->temp());
+  Register temp2 = ToRegister(instr->temp2());
+
+  DeferredNumberTagD* deferred = new (zone()) DeferredNumberTagD(this, instr);
+  if (FLAG_inline_new) {
+    __ LoadRoot(scratch, Heap::kHeapNumberMapRootIndex);
+    __ AllocateHeapNumber(reg, temp1, temp2, scratch, deferred->entry());
+  } else {
+    __ b(deferred->entry());
+  }
+  __ bind(deferred->exit());
+  __ stfd(input_reg, FieldMemOperand(reg, HeapNumber::kValueOffset));
+}
+
+
+void LCodeGen::DoDeferredNumberTagD(LNumberTagD* instr) {
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  Register reg = ToRegister(instr->result());
+  __ li(reg, Operand::Zero());
+
+  PushSafepointRegistersScope scope(this);
+  // NumberTagI and NumberTagD use the context from the frame, rather than
+  // the environment's HContext or HInlinedContext value.
+  // They only call Runtime::kAllocateHeapNumber.
+  // The corresponding HChange instructions are added in a phase that does
+  // not have easy access to the local context.
+  __ LoadP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber);
+  RecordSafepointWithRegisters(instr->pointer_map(), 0,
+                               Safepoint::kNoLazyDeopt);
+  __ StoreToSafepointRegisterSlot(r3, reg);
+}
+
+
+void LCodeGen::DoSmiTag(LSmiTag* instr) {
+  HChange* hchange = instr->hydrogen();
+  Register input = ToRegister(instr->value());
+  Register output = ToRegister(instr->result());
+  if (hchange->CheckFlag(HValue::kCanOverflow) &&
+      hchange->value()->CheckFlag(HValue::kUint32)) {
+    __ TestUnsignedSmiCandidate(input, r0);
+    DeoptimizeIf(ne, instr, "overflow", cr0);
+  }
+#if !V8_TARGET_ARCH_PPC64
+  if (hchange->CheckFlag(HValue::kCanOverflow) &&
+      !hchange->value()->CheckFlag(HValue::kUint32)) {
+    __ SmiTagCheckOverflow(output, input, r0);
+    DeoptimizeIf(lt, instr, "overflow", cr0);
+  } else {
+#endif
+    __ SmiTag(output, input);
+#if !V8_TARGET_ARCH_PPC64
+  }
+#endif
+}
+
+
+void LCodeGen::DoSmiUntag(LSmiUntag* instr) {
+  Register scratch = scratch0();
+  Register input = ToRegister(instr->value());
+  Register result = ToRegister(instr->result());
+  if (instr->needs_check()) {
+    STATIC_ASSERT(kHeapObjectTag == 1);
+    // If the input is a HeapObject, value of scratch won't be zero.
+    __ andi(scratch, input, Operand(kHeapObjectTag));
+    __ SmiUntag(result, input);
+    DeoptimizeIf(ne, instr, "not a Smi", cr0);
+  } else {
+    __ SmiUntag(result, input);
+  }
+}
+
+
+void LCodeGen::EmitNumberUntagD(LNumberUntagD* instr, Register input_reg,
+                                DoubleRegister result_reg,
+                                NumberUntagDMode mode) {
+  bool can_convert_undefined_to_nan =
+      instr->hydrogen()->can_convert_undefined_to_nan();
+  bool deoptimize_on_minus_zero = instr->hydrogen()->deoptimize_on_minus_zero();
+
+  Register scratch = scratch0();
+  DCHECK(!result_reg.is(double_scratch0()));
+
+  Label convert, load_smi, done;
+
+  if (mode == NUMBER_CANDIDATE_IS_ANY_TAGGED) {
+    // Smi check.
+    __ UntagAndJumpIfSmi(scratch, input_reg, &load_smi);
+
+    // Heap number map check.
+    __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+    __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
+    __ cmp(scratch, ip);
+    if (can_convert_undefined_to_nan) {
+      __ bne(&convert);
+    } else {
+      DeoptimizeIf(ne, instr, "not a heap number");
+    }
+    // load heap number
+    __ lfd(result_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
+    if (deoptimize_on_minus_zero) {
+#if V8_TARGET_ARCH_PPC64
+      __ MovDoubleToInt64(scratch, result_reg);
+      // rotate left by one for simple compare.
+      __ rldicl(scratch, scratch, 1, 0);
+      __ cmpi(scratch, Operand(1));
+#else
+      __ MovDoubleToInt64(scratch, ip, result_reg);
+      __ cmpi(ip, Operand::Zero());
+      __ bne(&done);
+      __ Cmpi(scratch, Operand(HeapNumber::kSignMask), r0);
+#endif
+      DeoptimizeIf(eq, instr, "minus zero");
+    }
+    __ b(&done);
+    if (can_convert_undefined_to_nan) {
+      __ bind(&convert);
+      // Convert undefined (and hole) to NaN.
+      __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+      __ cmp(input_reg, ip);
+      DeoptimizeIf(ne, instr, "not a heap number/undefined");
+      __ LoadRoot(scratch, Heap::kNanValueRootIndex);
+      __ lfd(result_reg, FieldMemOperand(scratch, HeapNumber::kValueOffset));
+      __ b(&done);
+    }
+  } else {
+    __ SmiUntag(scratch, input_reg);
+    DCHECK(mode == NUMBER_CANDIDATE_IS_SMI);
+  }
+  // Smi to double register conversion
+  __ bind(&load_smi);
+  // scratch: untagged value of input_reg
+  __ ConvertIntToDouble(scratch, result_reg);
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
+  Register input_reg = ToRegister(instr->value());
+  Register scratch1 = scratch0();
+  Register scratch2 = ToRegister(instr->temp());
+  DoubleRegister double_scratch = double_scratch0();
+  DoubleRegister double_scratch2 = ToDoubleRegister(instr->temp2());
+
+  DCHECK(!scratch1.is(input_reg) && !scratch1.is(scratch2));
+  DCHECK(!scratch2.is(input_reg) && !scratch2.is(scratch1));
+
+  Label done;
+
+  // Heap number map check.
+  __ LoadP(scratch1, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+  __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex);
+  __ cmp(scratch1, ip);
+
+  if (instr->truncating()) {
+    // Performs a truncating conversion of a floating point number as used by
+    // the JS bitwise operations.
+    Label no_heap_number, check_bools, check_false;
+    __ bne(&no_heap_number);
+    __ mr(scratch2, input_reg);
+    __ TruncateHeapNumberToI(input_reg, scratch2);
+    __ b(&done);
+
+    // Check for Oddballs. Undefined/False is converted to zero and True to one
+    // for truncating conversions.
+    __ bind(&no_heap_number);
+    __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+    __ cmp(input_reg, ip);
+    __ bne(&check_bools);
+    __ li(input_reg, Operand::Zero());
+    __ b(&done);
+
+    __ bind(&check_bools);
+    __ LoadRoot(ip, Heap::kTrueValueRootIndex);
+    __ cmp(input_reg, ip);
+    __ bne(&check_false);
+    __ li(input_reg, Operand(1));
+    __ b(&done);
+
+    __ bind(&check_false);
+    __ LoadRoot(ip, Heap::kFalseValueRootIndex);
+    __ cmp(input_reg, ip);
+    DeoptimizeIf(ne, instr, "not a heap number/undefined/true/false", cr7);
+    __ li(input_reg, Operand::Zero());
+  } else {
+    DeoptimizeIf(ne, instr, "not a heap number", cr7);
+
+    __ lfd(double_scratch2,
+           FieldMemOperand(input_reg, HeapNumber::kValueOffset));
+    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      // preserve heap number pointer in scratch2 for minus zero check below
+      __ mr(scratch2, input_reg);
+    }
+    __ TryDoubleToInt32Exact(input_reg, double_scratch2, scratch1,
+                             double_scratch);
+    DeoptimizeIf(ne, instr, "lost precision or NaN", cr7);
+
+    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      __ cmpi(input_reg, Operand::Zero());
+      __ bne(&done);
+      __ lwz(scratch1,
+             FieldMemOperand(scratch2, HeapNumber::kValueOffset +
+                                           Register::kExponentOffset));
+      __ cmpwi(scratch1, Operand::Zero());
+      DeoptimizeIf(lt, instr, "minus zero", cr7);
+    }
+  }
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
+  class DeferredTaggedToI FINAL : public LDeferredCode {
+   public:
+    DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
+        : LDeferredCode(codegen), instr_(instr) {}
+    void Generate() OVERRIDE { codegen()->DoDeferredTaggedToI(instr_); }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
+   private:
+    LTaggedToI* instr_;
+  };
+
+  LOperand* input = instr->value();
+  DCHECK(input->IsRegister());
+  DCHECK(input->Equals(instr->result()));
+
+  Register input_reg = ToRegister(input);
+
+  if (instr->hydrogen()->value()->representation().IsSmi()) {
+    __ SmiUntag(input_reg);
+  } else {
+    DeferredTaggedToI* deferred = new (zone()) DeferredTaggedToI(this, instr);
+
+    // Branch to deferred code if the input is a HeapObject.
+    __ JumpIfNotSmi(input_reg, deferred->entry());
+
+    __ SmiUntag(input_reg);
+    __ bind(deferred->exit());
+  }
+}
+
+
+void LCodeGen::DoNumberUntagD(LNumberUntagD* instr) {
+  LOperand* input = instr->value();
+  DCHECK(input->IsRegister());
+  LOperand* result = instr->result();
+  DCHECK(result->IsDoubleRegister());
+
+  Register input_reg = ToRegister(input);
+  DoubleRegister result_reg = ToDoubleRegister(result);
+
+  HValue* value = instr->hydrogen()->value();
+  NumberUntagDMode mode = value->representation().IsSmi()
+                              ? NUMBER_CANDIDATE_IS_SMI
+                              : NUMBER_CANDIDATE_IS_ANY_TAGGED;
+
+  EmitNumberUntagD(instr, input_reg, result_reg, mode);
+}
+
+
+void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
+  Register result_reg = ToRegister(instr->result());
+  Register scratch1 = scratch0();
+  DoubleRegister double_input = ToDoubleRegister(instr->value());
+  DoubleRegister double_scratch = double_scratch0();
+
+  if (instr->truncating()) {
+    __ TruncateDoubleToI(result_reg, double_input);
+  } else {
+    __ TryDoubleToInt32Exact(result_reg, double_input, scratch1,
+                             double_scratch);
+    // Deoptimize if the input wasn't a int32 (inside a double).
+    DeoptimizeIf(ne, instr, "lost precision or NaN");
+    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      Label done;
+      __ cmpi(result_reg, Operand::Zero());
+      __ bne(&done);
+#if V8_TARGET_ARCH_PPC64
+      __ MovDoubleToInt64(scratch1, double_input);
+#else
+      __ MovDoubleHighToInt(scratch1, double_input);
+#endif
+      __ cmpi(scratch1, Operand::Zero());
+      DeoptimizeIf(lt, instr, "minus zero");
+      __ bind(&done);
+    }
+  }
+}
+
+
+void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
+  Register result_reg = ToRegister(instr->result());
+  Register scratch1 = scratch0();
+  DoubleRegister double_input = ToDoubleRegister(instr->value());
+  DoubleRegister double_scratch = double_scratch0();
+
+  if (instr->truncating()) {
+    __ TruncateDoubleToI(result_reg, double_input);
+  } else {
+    __ TryDoubleToInt32Exact(result_reg, double_input, scratch1,
+                             double_scratch);
+    // Deoptimize if the input wasn't a int32 (inside a double).
+    DeoptimizeIf(ne, instr, "lost precision or NaN");
+    if (instr->hydrogen()->CheckFlag(HValue::kBailoutOnMinusZero)) {
+      Label done;
+      __ cmpi(result_reg, Operand::Zero());
+      __ bne(&done);
+#if V8_TARGET_ARCH_PPC64
+      __ MovDoubleToInt64(scratch1, double_input);
+#else
+      __ MovDoubleHighToInt(scratch1, double_input);
+#endif
+      __ cmpi(scratch1, Operand::Zero());
+      DeoptimizeIf(lt, instr, "minus zero");
+      __ bind(&done);
+    }
+  }
+#if V8_TARGET_ARCH_PPC64
+  __ SmiTag(result_reg);
+#else
+  __ SmiTagCheckOverflow(result_reg, r0);
+  DeoptimizeIf(lt, instr, "overflow", cr0);
+#endif
+}
+
+
+void LCodeGen::DoCheckSmi(LCheckSmi* instr) {
+  LOperand* input = instr->value();
+  __ TestIfSmi(ToRegister(input), r0);
+  DeoptimizeIf(ne, instr, "not a Smi", cr0);
+}
+
+
+void LCodeGen::DoCheckNonSmi(LCheckNonSmi* instr) {
+  if (!instr->hydrogen()->value()->type().IsHeapObject()) {
+    LOperand* input = instr->value();
+    __ TestIfSmi(ToRegister(input), r0);
+    DeoptimizeIf(eq, instr, "Smi", cr0);
+  }
+}
+
+
+void LCodeGen::DoCheckInstanceType(LCheckInstanceType* instr) {
+  Register input = ToRegister(instr->value());
+  Register scratch = scratch0();
+
+  __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+  __ lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+
+  if (instr->hydrogen()->is_interval_check()) {
+    InstanceType first;
+    InstanceType last;
+    instr->hydrogen()->GetCheckInterval(&first, &last);
+
+    __ cmpli(scratch, Operand(first));
+
+    // If there is only one type in the interval check for equality.
+    if (first == last) {
+      DeoptimizeIf(ne, instr, "wrong instance type");
+    } else {
+      DeoptimizeIf(lt, instr, "wrong instance type");
+      // Omit check for the last type.
+      if (last != LAST_TYPE) {
+        __ cmpli(scratch, Operand(last));
+        DeoptimizeIf(gt, instr, "wrong instance type");
+      }
+    }
+  } else {
+    uint8_t mask;
+    uint8_t tag;
+    instr->hydrogen()->GetCheckMaskAndTag(&mask, &tag);
+
+    if (base::bits::IsPowerOfTwo32(mask)) {
+      DCHECK(tag == 0 || base::bits::IsPowerOfTwo32(tag));
+      __ andi(r0, scratch, Operand(mask));
+      DeoptimizeIf(tag == 0 ? ne : eq, instr, "wrong instance type", cr0);
+    } else {
+      __ andi(scratch, scratch, Operand(mask));
+      __ cmpi(scratch, Operand(tag));
+      DeoptimizeIf(ne, instr, "wrong instance type");
+    }
+  }
+}
+
+
+void LCodeGen::DoCheckValue(LCheckValue* instr) {
+  Register reg = ToRegister(instr->value());
+  Handle<HeapObject> object = instr->hydrogen()->object().handle();
+  AllowDeferredHandleDereference smi_check;
+  if (isolate()->heap()->InNewSpace(*object)) {
+    Register reg = ToRegister(instr->value());
+    Handle<Cell> cell = isolate()->factory()->NewCell(object);
+    __ mov(ip, Operand(Handle<Object>(cell)));
+    __ LoadP(ip, FieldMemOperand(ip, Cell::kValueOffset));
+    __ cmp(reg, ip);
+  } else {
+    __ Cmpi(reg, Operand(object), r0);
+  }
+  DeoptimizeIf(ne, instr, "value mismatch");
+}
+
+
+void LCodeGen::DoDeferredInstanceMigration(LCheckMaps* instr, Register object) {
+  {
+    PushSafepointRegistersScope scope(this);
+    __ push(object);
+    __ li(cp, Operand::Zero());
+    __ CallRuntimeSaveDoubles(Runtime::kTryMigrateInstance);
+    RecordSafepointWithRegisters(instr->pointer_map(), 1,
+                                 Safepoint::kNoLazyDeopt);
+    __ StoreToSafepointRegisterSlot(r3, scratch0());
+  }
+  __ TestIfSmi(scratch0(), r0);
+  DeoptimizeIf(eq, instr, "instance migration failed", cr0);
+}
+
+
+void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
+  class DeferredCheckMaps FINAL : public LDeferredCode {
+   public:
+    DeferredCheckMaps(LCodeGen* codegen, LCheckMaps* instr, Register object)
+        : LDeferredCode(codegen), instr_(instr), object_(object) {
+      SetExit(check_maps());
+    }
+    void Generate() OVERRIDE {
+      codegen()->DoDeferredInstanceMigration(instr_, object_);
+    }
+    Label* check_maps() { return &check_maps_; }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
+   private:
+    LCheckMaps* instr_;
+    Label check_maps_;
+    Register object_;
+  };
+
+  if (instr->hydrogen()->IsStabilityCheck()) {
+    const UniqueSet<Map>* maps = instr->hydrogen()->maps();
+    for (int i = 0; i < maps->size(); ++i) {
+      AddStabilityDependency(maps->at(i).handle());
+    }
+    return;
+  }
+
+  Register map_reg = scratch0();
+
+  LOperand* input = instr->value();
+  DCHECK(input->IsRegister());
+  Register reg = ToRegister(input);
+
+  __ LoadP(map_reg, FieldMemOperand(reg, HeapObject::kMapOffset));
+
+  DeferredCheckMaps* deferred = NULL;
+  if (instr->hydrogen()->HasMigrationTarget()) {
+    deferred = new (zone()) DeferredCheckMaps(this, instr, reg);
+    __ bind(deferred->check_maps());
+  }
+
+  const UniqueSet<Map>* maps = instr->hydrogen()->maps();
+  Label success;
+  for (int i = 0; i < maps->size() - 1; i++) {
+    Handle<Map> map = maps->at(i).handle();
+    __ CompareMap(map_reg, map, &success);
+    __ beq(&success);
+  }
+
+  Handle<Map> map = maps->at(maps->size() - 1).handle();
+  __ CompareMap(map_reg, map, &success);
+  if (instr->hydrogen()->HasMigrationTarget()) {
+    __ bne(deferred->entry());
+  } else {
+    DeoptimizeIf(ne, instr, "wrong map");
+  }
+
+  __ bind(&success);
+}
+
+
+void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
+  DoubleRegister value_reg = ToDoubleRegister(instr->unclamped());
+  Register result_reg = ToRegister(instr->result());
+  __ ClampDoubleToUint8(result_reg, value_reg, double_scratch0());
+}
+
+
+void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
+  Register unclamped_reg = ToRegister(instr->unclamped());
+  Register result_reg = ToRegister(instr->result());
+  __ ClampUint8(result_reg, unclamped_reg);
+}
+
+
+void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
+  Register scratch = scratch0();
+  Register input_reg = ToRegister(instr->unclamped());
+  Register result_reg = ToRegister(instr->result());
+  DoubleRegister temp_reg = ToDoubleRegister(instr->temp());
+  Label is_smi, done, heap_number;
+
+  // Both smi and heap number cases are handled.
+  __ UntagAndJumpIfSmi(result_reg, input_reg, &is_smi);
+
+  // Check for heap number
+  __ LoadP(scratch, FieldMemOperand(input_reg, HeapObject::kMapOffset));
+  __ Cmpi(scratch, Operand(factory()->heap_number_map()), r0);
+  __ beq(&heap_number);
+
+  // Check for undefined. Undefined is converted to zero for clamping
+  // conversions.
+  __ Cmpi(input_reg, Operand(factory()->undefined_value()), r0);
+  DeoptimizeIf(ne, instr, "not a heap number/undefined");
+  __ li(result_reg, Operand::Zero());
+  __ b(&done);
+
+  // Heap number
+  __ bind(&heap_number);
+  __ lfd(temp_reg, FieldMemOperand(input_reg, HeapNumber::kValueOffset));
+  __ ClampDoubleToUint8(result_reg, temp_reg, double_scratch0());
+  __ b(&done);
+
+  // smi
+  __ bind(&is_smi);
+  __ ClampUint8(result_reg, result_reg);
+
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoDoubleBits(LDoubleBits* instr) {
+  DoubleRegister value_reg = ToDoubleRegister(instr->value());
+  Register result_reg = ToRegister(instr->result());
+
+  if (instr->hydrogen()->bits() == HDoubleBits::HIGH) {
+    __ MovDoubleHighToInt(result_reg, value_reg);
+  } else {
+    __ MovDoubleLowToInt(result_reg, value_reg);
+  }
+}
+
+
+void LCodeGen::DoConstructDouble(LConstructDouble* instr) {
+  Register hi_reg = ToRegister(instr->hi());
+  Register lo_reg = ToRegister(instr->lo());
+  DoubleRegister result_reg = ToDoubleRegister(instr->result());
+#if V8_TARGET_ARCH_PPC64
+  __ MovInt64ComponentsToDouble(result_reg, hi_reg, lo_reg, r0);
+#else
+  __ MovInt64ToDouble(result_reg, hi_reg, lo_reg);
+#endif
+}
+
+
+void LCodeGen::DoAllocate(LAllocate* instr) {
+  class DeferredAllocate FINAL : public LDeferredCode {
+   public:
+    DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
+        : LDeferredCode(codegen), instr_(instr) {}
+    void Generate() OVERRIDE { codegen()->DoDeferredAllocate(instr_); }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
+   private:
+    LAllocate* instr_;
+  };
+
+  DeferredAllocate* deferred = new (zone()) DeferredAllocate(this, instr);
+
+  Register result = ToRegister(instr->result());
+  Register scratch = ToRegister(instr->temp1());
+  Register scratch2 = ToRegister(instr->temp2());
+
+  // Allocate memory for the object.
+  AllocationFlags flags = TAG_OBJECT;
+  if (instr->hydrogen()->MustAllocateDoubleAligned()) {
+    flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
+  }
+  if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
+    DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
+    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+    flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
+  } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
+    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+    flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_DATA_SPACE);
+  }
+
+  if (instr->size()->IsConstantOperand()) {
+    int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+    if (size <= Page::kMaxRegularHeapObjectSize) {
+      __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
+    } else {
+      __ b(deferred->entry());
+    }
+  } else {
+    Register size = ToRegister(instr->size());
+    __ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
+  }
+
+  __ bind(deferred->exit());
+
+  if (instr->hydrogen()->MustPrefillWithFiller()) {
+    STATIC_ASSERT(kHeapObjectTag == 1);
+    if (instr->size()->IsConstantOperand()) {
+      int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+      __ LoadIntLiteral(scratch, size - kHeapObjectTag);
+    } else {
+      __ subi(scratch, ToRegister(instr->size()), Operand(kHeapObjectTag));
+    }
+    __ mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
+    Label loop;
+    __ bind(&loop);
+    __ subi(scratch, scratch, Operand(kPointerSize));
+    __ StorePX(scratch2, MemOperand(result, scratch));
+    __ cmpi(scratch, Operand::Zero());
+    __ bge(&loop);
+  }
+}
+
+
+void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
+  Register result = ToRegister(instr->result());
+
+  // TODO(3095996): Get rid of this. For now, we need to make the
+  // result register contain a valid pointer because it is already
+  // contained in the register pointer map.
+  __ LoadSmiLiteral(result, Smi::FromInt(0));
+
+  PushSafepointRegistersScope scope(this);
+  if (instr->size()->IsRegister()) {
+    Register size = ToRegister(instr->size());
+    DCHECK(!size.is(result));
+    __ SmiTag(size);
+    __ push(size);
+  } else {
+    int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
+#if !V8_TARGET_ARCH_PPC64
+    if (size >= 0 && size <= Smi::kMaxValue) {
+#endif
+      __ Push(Smi::FromInt(size));
+#if !V8_TARGET_ARCH_PPC64
+    } else {
+      // We should never get here at runtime => abort
+      __ stop("invalid allocation size");
+      return;
+    }
+#endif
+  }
+
+  int flags = AllocateDoubleAlignFlag::encode(
+      instr->hydrogen()->MustAllocateDoubleAligned());
+  if (instr->hydrogen()->IsOldPointerSpaceAllocation()) {
+    DCHECK(!instr->hydrogen()->IsOldDataSpaceAllocation());
+    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+    flags = AllocateTargetSpace::update(flags, OLD_POINTER_SPACE);
+  } else if (instr->hydrogen()->IsOldDataSpaceAllocation()) {
+    DCHECK(!instr->hydrogen()->IsNewSpaceAllocation());
+    flags = AllocateTargetSpace::update(flags, OLD_DATA_SPACE);
+  } else {
+    flags = AllocateTargetSpace::update(flags, NEW_SPACE);
+  }
+  __ Push(Smi::FromInt(flags));
+
+  CallRuntimeFromDeferred(Runtime::kAllocateInTargetSpace, 2, instr,
+                          instr->context());
+  __ StoreToSafepointRegisterSlot(r3, result);
+}
+
+
+void LCodeGen::DoToFastProperties(LToFastProperties* instr) {
+  DCHECK(ToRegister(instr->value()).is(r3));
+  __ push(r3);
+  CallRuntime(Runtime::kToFastProperties, 1, instr);
+}
+
+
+void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  Label materialized;
+  // Registers will be used as follows:
+  // r10 = literals array.
+  // r4 = regexp literal.
+  // r3 = regexp literal clone.
+  // r5 and r7-r9 are used as temporaries.
+  int literal_offset =
+      FixedArray::OffsetOfElementAt(instr->hydrogen()->literal_index());
+  __ Move(r10, instr->hydrogen()->literals());
+  __ LoadP(r4, FieldMemOperand(r10, literal_offset));
+  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+  __ cmp(r4, ip);
+  __ bne(&materialized);
+
+  // Create regexp literal using runtime function
+  // Result will be in r3.
+  __ LoadSmiLiteral(r9, Smi::FromInt(instr->hydrogen()->literal_index()));
+  __ mov(r8, Operand(instr->hydrogen()->pattern()));
+  __ mov(r7, Operand(instr->hydrogen()->flags()));
+  __ Push(r10, r9, r8, r7);
+  CallRuntime(Runtime::kMaterializeRegExpLiteral, 4, instr);
+  __ mr(r4, r3);
+
+  __ bind(&materialized);
+  int size = JSRegExp::kSize + JSRegExp::kInObjectFieldCount * kPointerSize;
+  Label allocated, runtime_allocate;
+
+  __ Allocate(size, r3, r5, r6, &runtime_allocate, TAG_OBJECT);
+  __ b(&allocated);
+
+  __ bind(&runtime_allocate);
+  __ LoadSmiLiteral(r3, Smi::FromInt(size));
+  __ Push(r4, r3);
+  CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
+  __ pop(r4);
+
+  __ bind(&allocated);
+  // Copy the content into the newly allocated memory.
+  __ CopyFields(r3, r4, r5.bit(), size / kPointerSize);
+}
+
+
+void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
+  DCHECK(ToRegister(instr->context()).is(cp));
+  // Use the fast case closure allocation code that allocates in new
+  // space for nested functions that don't need literals cloning.
+  bool pretenure = instr->hydrogen()->pretenure();
+  if (!pretenure && instr->hydrogen()->has_no_literals()) {
+    FastNewClosureStub stub(isolate(), instr->hydrogen()->strict_mode(),
+                            instr->hydrogen()->kind());
+    __ mov(r5, Operand(instr->hydrogen()->shared_info()));
+    CallCode(stub.GetCode(), RelocInfo::CODE_TARGET, instr);
+  } else {
+    __ mov(r5, Operand(instr->hydrogen()->shared_info()));
+    __ mov(r4, Operand(pretenure ? factory()->true_value()
+                                 : factory()->false_value()));
+    __ Push(cp, r5, r4);
+    CallRuntime(Runtime::kNewClosure, 3, instr);
+  }
+}
+
+
+void LCodeGen::DoTypeof(LTypeof* instr) {
+  Register input = ToRegister(instr->value());
+  __ push(input);
+  CallRuntime(Runtime::kTypeof, 1, instr);
+}
+
+
+void LCodeGen::DoTypeofIsAndBranch(LTypeofIsAndBranch* instr) {
+  Register input = ToRegister(instr->value());
+
+  Condition final_branch_condition =
+      EmitTypeofIs(instr->TrueLabel(chunk_), instr->FalseLabel(chunk_), input,
+                   instr->type_literal());
+  if (final_branch_condition != kNoCondition) {
+    EmitBranch(instr, final_branch_condition);
+  }
+}
+
+
+Condition LCodeGen::EmitTypeofIs(Label* true_label, Label* false_label,
+                                 Register input, Handle<String> type_name) {
+  Condition final_branch_condition = kNoCondition;
+  Register scratch = scratch0();
+  Factory* factory = isolate()->factory();
+  if (String::Equals(type_name, factory->number_string())) {
+    __ JumpIfSmi(input, true_label);
+    __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+    __ CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
+    final_branch_condition = eq;
+
+  } else if (String::Equals(type_name, factory->string_string())) {
+    __ JumpIfSmi(input, false_label);
+    __ CompareObjectType(input, scratch, no_reg, FIRST_NONSTRING_TYPE);
+    __ bge(false_label);
+    __ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
+    __ ExtractBit(r0, scratch, Map::kIsUndetectable);
+    __ cmpi(r0, Operand::Zero());
+    final_branch_condition = eq;
+
+  } else if (String::Equals(type_name, factory->symbol_string())) {
+    __ JumpIfSmi(input, false_label);
+    __ CompareObjectType(input, scratch, no_reg, SYMBOL_TYPE);
+    final_branch_condition = eq;
+
+  } else if (String::Equals(type_name, factory->boolean_string())) {
+    __ CompareRoot(input, Heap::kTrueValueRootIndex);
+    __ beq(true_label);
+    __ CompareRoot(input, Heap::kFalseValueRootIndex);
+    final_branch_condition = eq;
+
+  } else if (String::Equals(type_name, factory->undefined_string())) {
+    __ CompareRoot(input, Heap::kUndefinedValueRootIndex);
+    __ beq(true_label);
+    __ JumpIfSmi(input, false_label);
+    // Check for undetectable objects => true.
+    __ LoadP(scratch, FieldMemOperand(input, HeapObject::kMapOffset));
+    __ lbz(scratch, FieldMemOperand(scratch, Map::kBitFieldOffset));
+    __ ExtractBit(r0, scratch, Map::kIsUndetectable);
+    __ cmpi(r0, Operand::Zero());
+    final_branch_condition = ne;
+
+  } else if (String::Equals(type_name, factory->function_string())) {
+    STATIC_ASSERT(NUM_OF_CALLABLE_SPEC_OBJECT_TYPES == 2);
+    Register type_reg = scratch;
+    __ JumpIfSmi(input, false_label);
+    __ CompareObjectType(input, scratch, type_reg, JS_FUNCTION_TYPE);
+    __ beq(true_label);
+    __ cmpi(type_reg, Operand(JS_FUNCTION_PROXY_TYPE));
+    final_branch_condition = eq;
+
+  } else if (String::Equals(type_name, factory->object_string())) {
+    Register map = scratch;
+    __ JumpIfSmi(input, false_label);
+    __ CompareRoot(input, Heap::kNullValueRootIndex);
+    __ beq(true_label);
+    __ CheckObjectTypeRange(input, map, FIRST_NONCALLABLE_SPEC_OBJECT_TYPE,
+                            LAST_NONCALLABLE_SPEC_OBJECT_TYPE, false_label);
+    // Check for undetectable objects => false.
+    __ lbz(scratch, FieldMemOperand(map, Map::kBitFieldOffset));
+    __ ExtractBit(r0, scratch, Map::kIsUndetectable);
+    __ cmpi(r0, Operand::Zero());
+    final_branch_condition = eq;
+
+  } else {
+    __ b(false_label);
+  }
+
+  return final_branch_condition;
+}
+
+
+void LCodeGen::DoIsConstructCallAndBranch(LIsConstructCallAndBranch* instr) {
+  Register temp1 = ToRegister(instr->temp());
+
+  EmitIsConstructCall(temp1, scratch0());
+  EmitBranch(instr, eq);
+}
+
+
+void LCodeGen::EmitIsConstructCall(Register temp1, Register temp2) {
+  DCHECK(!temp1.is(temp2));
+  // Get the frame pointer for the calling frame.
+  __ LoadP(temp1, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+
+  // Skip the arguments adaptor frame if it exists.
+  Label check_frame_marker;
+  __ LoadP(temp2, MemOperand(temp1, StandardFrameConstants::kContextOffset));
+  __ CmpSmiLiteral(temp2, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0);
+  __ bne(&check_frame_marker);
+  __ LoadP(temp1, MemOperand(temp1, StandardFrameConstants::kCallerFPOffset));
+
+  // Check the marker in the calling frame.
+  __ bind(&check_frame_marker);
+  __ LoadP(temp1, MemOperand(temp1, StandardFrameConstants::kMarkerOffset));
+  __ CmpSmiLiteral(temp1, Smi::FromInt(StackFrame::CONSTRUCT), r0);
+}
+
+
+void LCodeGen::EnsureSpaceForLazyDeopt(int space_needed) {
+  if (!info()->IsStub()) {
+    // Ensure that we have enough space after the previous lazy-bailout
+    // instruction for patching the code here.
+    int current_pc = masm()->pc_offset();
+    if (current_pc < last_lazy_deopt_pc_ + space_needed) {
+      int padding_size = last_lazy_deopt_pc_ + space_needed - current_pc;
+      DCHECK_EQ(0, padding_size % Assembler::kInstrSize);
+      while (padding_size > 0) {
+        __ nop();
+        padding_size -= Assembler::kInstrSize;
+      }
+    }
+  }
+  last_lazy_deopt_pc_ = masm()->pc_offset();
+}
+
+
+void LCodeGen::DoLazyBailout(LLazyBailout* instr) {
+  last_lazy_deopt_pc_ = masm()->pc_offset();
+  DCHECK(instr->HasEnvironment());
+  LEnvironment* env = instr->environment();
+  RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+}
+
+
+void LCodeGen::DoDeoptimize(LDeoptimize* instr) {
+  Deoptimizer::BailoutType type = instr->hydrogen()->type();
+  // TODO(danno): Stubs expect all deopts to be lazy for historical reasons (the
+  // needed return address), even though the implementation of LAZY and EAGER is
+  // now identical. When LAZY is eventually completely folded into EAGER, remove
+  // the special case below.
+  if (info()->IsStub() && type == Deoptimizer::EAGER) {
+    type = Deoptimizer::LAZY;
+  }
+
+  DeoptimizeIf(al, instr, instr->hydrogen()->reason(), type);
+}
+
+
+void LCodeGen::DoDummy(LDummy* instr) {
+  // Nothing to see here, move on!
+}
+
+
+void LCodeGen::DoDummyUse(LDummyUse* instr) {
+  // Nothing to see here, move on!
+}
+
+
+void LCodeGen::DoDeferredStackCheck(LStackCheck* instr) {
+  PushSafepointRegistersScope scope(this);
+  LoadContextFromDeferred(instr->context());
+  __ CallRuntimeSaveDoubles(Runtime::kStackGuard);
+  RecordSafepointWithLazyDeopt(
+      instr, RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS);
+  DCHECK(instr->HasEnvironment());
+  LEnvironment* env = instr->environment();
+  safepoints_.RecordLazyDeoptimizationIndex(env->deoptimization_index());
+}
+
+
+void LCodeGen::DoStackCheck(LStackCheck* instr) {
+  class DeferredStackCheck FINAL : public LDeferredCode {
+   public:
+    DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
+        : LDeferredCode(codegen), instr_(instr) {}
+    void Generate() OVERRIDE { codegen()->DoDeferredStackCheck(instr_); }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
+   private:
+    LStackCheck* instr_;
+  };
+
+  DCHECK(instr->HasEnvironment());
+  LEnvironment* env = instr->environment();
+  // There is no LLazyBailout instruction for stack-checks. We have to
+  // prepare for lazy deoptimization explicitly here.
+  if (instr->hydrogen()->is_function_entry()) {
+    // Perform stack overflow check.
+    Label done;
+    __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+    __ cmpl(sp, ip);
+    __ bge(&done);
+    DCHECK(instr->context()->IsRegister());
+    DCHECK(ToRegister(instr->context()).is(cp));
+    CallCode(isolate()->builtins()->StackCheck(), RelocInfo::CODE_TARGET,
+             instr);
+    __ bind(&done);
+  } else {
+    DCHECK(instr->hydrogen()->is_backwards_branch());
+    // Perform stack overflow check if this goto needs it before jumping.
+    DeferredStackCheck* deferred_stack_check =
+        new (zone()) DeferredStackCheck(this, instr);
+    __ LoadRoot(ip, Heap::kStackLimitRootIndex);
+    __ cmpl(sp, ip);
+    __ blt(deferred_stack_check->entry());
+    EnsureSpaceForLazyDeopt(Deoptimizer::patch_size());
+    __ bind(instr->done_label());
+    deferred_stack_check->SetExit(instr->done_label());
+    RegisterEnvironmentForDeoptimization(env, Safepoint::kLazyDeopt);
+    // Don't record a deoptimization index for the safepoint here.
+    // This will be done explicitly when emitting call and the safepoint in
+    // the deferred code.
+  }
+}
+
+
+void LCodeGen::DoOsrEntry(LOsrEntry* instr) {
+  // This is a pseudo-instruction that ensures that the environment here is
+  // properly registered for deoptimization and records the assembler's PC
+  // offset.
+  LEnvironment* environment = instr->environment();
+
+  // If the environment were already registered, we would have no way of
+  // backpatching it with the spill slot operands.
+  DCHECK(!environment->HasBeenRegistered());
+  RegisterEnvironmentForDeoptimization(environment, Safepoint::kNoLazyDeopt);
+
+  GenerateOsrPrologue();
+}
+
+
+void LCodeGen::DoForInPrepareMap(LForInPrepareMap* instr) {
+  __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
+  __ cmp(r3, ip);
+  DeoptimizeIf(eq, instr, "undefined");
+
+  Register null_value = r8;
+  __ LoadRoot(null_value, Heap::kNullValueRootIndex);
+  __ cmp(r3, null_value);
+  DeoptimizeIf(eq, instr, "null");
+
+  __ TestIfSmi(r3, r0);
+  DeoptimizeIf(eq, instr, "Smi", cr0);
+
+  STATIC_ASSERT(FIRST_JS_PROXY_TYPE == FIRST_SPEC_OBJECT_TYPE);
+  __ CompareObjectType(r3, r4, r4, LAST_JS_PROXY_TYPE);
+  DeoptimizeIf(le, instr, "wrong instance type");
+
+  Label use_cache, call_runtime;
+  __ CheckEnumCache(null_value, &call_runtime);
+
+  __ LoadP(r3, FieldMemOperand(r3, HeapObject::kMapOffset));
+  __ b(&use_cache);
+
+  // Get the set of properties to enumerate.
+  __ bind(&call_runtime);
+  __ push(r3);
+  CallRuntime(Runtime::kGetPropertyNamesFast, 1, instr);
+
+  __ LoadP(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
+  __ LoadRoot(ip, Heap::kMetaMapRootIndex);
+  __ cmp(r4, ip);
+  DeoptimizeIf(ne, instr, "wrong map");
+  __ bind(&use_cache);
+}
+
+
+void LCodeGen::DoForInCacheArray(LForInCacheArray* instr) {
+  Register map = ToRegister(instr->map());
+  Register result = ToRegister(instr->result());
+  Label load_cache, done;
+  __ EnumLength(result, map);
+  __ CmpSmiLiteral(result, Smi::FromInt(0), r0);
+  __ bne(&load_cache);
+  __ mov(result, Operand(isolate()->factory()->empty_fixed_array()));
+  __ b(&done);
+
+  __ bind(&load_cache);
+  __ LoadInstanceDescriptors(map, result);
+  __ LoadP(result, FieldMemOperand(result, DescriptorArray::kEnumCacheOffset));
+  __ LoadP(result, FieldMemOperand(result, FixedArray::SizeFor(instr->idx())));
+  __ cmpi(result, Operand::Zero());
+  DeoptimizeIf(eq, instr, "no cache");
+
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoCheckMapValue(LCheckMapValue* instr) {
+  Register object = ToRegister(instr->value());
+  Register map = ToRegister(instr->map());
+  __ LoadP(scratch0(), FieldMemOperand(object, HeapObject::kMapOffset));
+  __ cmp(map, scratch0());
+  DeoptimizeIf(ne, instr, "wrong map");
+}
+
+
+void LCodeGen::DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr,
+                                           Register result, Register object,
+                                           Register index) {
+  PushSafepointRegistersScope scope(this);
+  __ Push(object, index);
+  __ li(cp, Operand::Zero());
+  __ CallRuntimeSaveDoubles(Runtime::kLoadMutableDouble);
+  RecordSafepointWithRegisters(instr->pointer_map(), 2,
+                               Safepoint::kNoLazyDeopt);
+  __ StoreToSafepointRegisterSlot(r3, result);
+}
+
+
+void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
+  class DeferredLoadMutableDouble FINAL : public LDeferredCode {
+   public:
+    DeferredLoadMutableDouble(LCodeGen* codegen, LLoadFieldByIndex* instr,
+                              Register result, Register object, Register index)
+        : LDeferredCode(codegen),
+          instr_(instr),
+          result_(result),
+          object_(object),
+          index_(index) {}
+    void Generate() OVERRIDE {
+      codegen()->DoDeferredLoadMutableDouble(instr_, result_, object_, index_);
+    }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
+   private:
+    LLoadFieldByIndex* instr_;
+    Register result_;
+    Register object_;
+    Register index_;
+  };
+
+  Register object = ToRegister(instr->object());
+  Register index = ToRegister(instr->index());
+  Register result = ToRegister(instr->result());
+  Register scratch = scratch0();
+
+  DeferredLoadMutableDouble* deferred;
+  deferred = new (zone())
+      DeferredLoadMutableDouble(this, instr, result, object, index);
+
+  Label out_of_object, done;
+
+  __ TestBitMask(index, reinterpret_cast<uintptr_t>(Smi::FromInt(1)), r0);
+  __ bne(deferred->entry(), cr0);
+  __ ShiftRightArithImm(index, index, 1);
+
+  __ cmpi(index, Operand::Zero());
+  __ blt(&out_of_object);
+
+  __ SmiToPtrArrayOffset(r0, index);
+  __ add(scratch, object, r0);
+  __ LoadP(result, FieldMemOperand(scratch, JSObject::kHeaderSize));
+
+  __ b(&done);
+
+  __ bind(&out_of_object);
+  __ LoadP(result, FieldMemOperand(object, JSObject::kPropertiesOffset));
+  // Index is equal to negated out of object property index plus 1.
+  __ SmiToPtrArrayOffset(r0, index);
+  __ sub(scratch, result, r0);
+  __ LoadP(result,
+           FieldMemOperand(scratch, FixedArray::kHeaderSize - kPointerSize));
+  __ bind(deferred->exit());
+  __ bind(&done);
+}
+
+
+void LCodeGen::DoStoreFrameContext(LStoreFrameContext* instr) {
+  Register context = ToRegister(instr->context());
+  __ StoreP(context, MemOperand(fp, StandardFrameConstants::kContextOffset));
+}
+
+
+void LCodeGen::DoAllocateBlockContext(LAllocateBlockContext* instr) {
+  Handle<ScopeInfo> scope_info = instr->scope_info();
+  __ Push(scope_info);
+  __ push(ToRegister(instr->function()));
+  CallRuntime(Runtime::kPushBlockContext, 2, instr);
+  RecordSafepoint(Safepoint::kNoLazyDeopt);
+}
+
+
+#undef __
+}
+}  // namespace v8::internal
diff --git a/deps/v8/src/ppc/lithium-codegen-ppc.h b/deps/v8/src/ppc/lithium-codegen-ppc.h
new file mode 100644 (file)
index 0000000..8ae3b3c
--- /dev/null
@@ -0,0 +1,372 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_PPC_LITHIUM_CODEGEN_PPC_H_
+#define V8_PPC_LITHIUM_CODEGEN_PPC_H_
+
+#include "src/ppc/lithium-ppc.h"
+
+#include "src/ppc/lithium-gap-resolver-ppc.h"
+#include "src/deoptimizer.h"
+#include "src/lithium-codegen.h"
+#include "src/safepoint-table.h"
+#include "src/scopes.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class LDeferredCode;
+class SafepointGenerator;
+
+class LCodeGen : public LCodeGenBase {
+ public:
+  LCodeGen(LChunk* chunk, MacroAssembler* assembler, CompilationInfo* info)
+      : LCodeGenBase(chunk, assembler, info),
+        deoptimizations_(4, info->zone()),
+        jump_table_(4, info->zone()),
+        deoptimization_literals_(8, info->zone()),
+        inlined_function_count_(0),
+        scope_(info->scope()),
+        translations_(info->zone()),
+        deferred_(8, info->zone()),
+        osr_pc_offset_(-1),
+        frame_is_built_(false),
+        safepoints_(info->zone()),
+        resolver_(this),
+        expected_safepoint_kind_(Safepoint::kSimple) {
+    PopulateDeoptimizationLiteralsWithInlinedFunctions();
+  }
+
+
+  int LookupDestination(int block_id) const {
+    return chunk()->LookupDestination(block_id);
+  }
+
+  bool IsNextEmittedBlock(int block_id) const {
+    return LookupDestination(block_id) == GetNextEmittedBlock();
+  }
+
+  bool NeedsEagerFrame() const {
+    return GetStackSlotCount() > 0 || info()->is_non_deferred_calling() ||
+           !info()->IsStub() || info()->requires_frame();
+  }
+  bool NeedsDeferredFrame() const {
+    return !NeedsEagerFrame() && info()->is_deferred_calling();
+  }
+
+  LinkRegisterStatus GetLinkRegisterState() const {
+    return frame_is_built_ ? kLRHasBeenSaved : kLRHasNotBeenSaved;
+  }
+
+  // Support for converting LOperands to assembler types.
+  // LOperand must be a register.
+  Register ToRegister(LOperand* op) const;
+
+  // LOperand is loaded into scratch, unless already a register.
+  Register EmitLoadRegister(LOperand* op, Register scratch);
+
+  // LConstantOperand must be an Integer32 or Smi
+  void EmitLoadIntegerConstant(LConstantOperand* const_op, Register dst);
+
+  // LOperand must be a double register.
+  DoubleRegister ToDoubleRegister(LOperand* op) const;
+
+  intptr_t ToRepresentation(LConstantOperand* op,
+                            const Representation& r) const;
+  int32_t ToInteger32(LConstantOperand* op) const;
+  Smi* ToSmi(LConstantOperand* op) const;
+  double ToDouble(LConstantOperand* op) const;
+  Operand ToOperand(LOperand* op);
+  MemOperand ToMemOperand(LOperand* op) const;
+  // Returns a MemOperand pointing to the high word of a DoubleStackSlot.
+  MemOperand ToHighMemOperand(LOperand* op) const;
+
+  bool IsInteger32(LConstantOperand* op) const;
+  bool IsSmi(LConstantOperand* op) const;
+  Handle<Object> ToHandle(LConstantOperand* op) const;
+
+  // Try to generate code for the entire chunk, but it may fail if the
+  // chunk contains constructs we cannot handle. Returns true if the
+  // code generation attempt succeeded.
+  bool GenerateCode();
+
+  // Finish the code by setting stack height, safepoint, and bailout
+  // information on it.
+  void FinishCode(Handle<Code> code);
+
+  // Deferred code support.
+  void DoDeferredNumberTagD(LNumberTagD* instr);
+
+  enum IntegerSignedness { SIGNED_INT32, UNSIGNED_INT32 };
+  void DoDeferredNumberTagIU(LInstruction* instr, LOperand* value,
+                             LOperand* temp1, LOperand* temp2,
+                             IntegerSignedness signedness);
+
+  void DoDeferredTaggedToI(LTaggedToI* instr);
+  void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
+  void DoDeferredStackCheck(LStackCheck* instr);
+  void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
+  void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
+  void DoDeferredAllocate(LAllocate* instr);
+  void DoDeferredInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
+                                       Label* map_check);
+  void DoDeferredInstanceMigration(LCheckMaps* instr, Register object);
+  void DoDeferredLoadMutableDouble(LLoadFieldByIndex* instr, Register result,
+                                   Register object, Register index);
+
+  // Parallel move support.
+  void DoParallelMove(LParallelMove* move);
+  void DoGap(LGap* instr);
+
+  MemOperand PrepareKeyedOperand(Register key, Register base,
+                                 bool key_is_constant, bool key_is_tagged,
+                                 int constant_key, int element_size_shift,
+                                 int base_offset);
+
+  // Emit frame translation commands for an environment.
+  void WriteTranslation(LEnvironment* environment, Translation* translation);
+
+// Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) void Do##type(L##type* node);
+  LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+ private:
+  StrictMode strict_mode() const { return info()->strict_mode(); }
+
+  Scope* scope() const { return scope_; }
+
+  Register scratch0() { return r11; }
+  DoubleRegister double_scratch0() { return kScratchDoubleReg; }
+
+  LInstruction* GetNextInstruction();
+
+  void EmitClassOfTest(Label* if_true, Label* if_false,
+                       Handle<String> class_name, Register input,
+                       Register temporary, Register temporary2);
+
+  int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
+
+  void AddDeferredCode(LDeferredCode* code) { deferred_.Add(code, zone()); }
+
+  void SaveCallerDoubles();
+  void RestoreCallerDoubles();
+
+  // Code generation passes.  Returns true if code generation should
+  // continue.
+  void GenerateBodyInstructionPre(LInstruction* instr) OVERRIDE;
+  bool GeneratePrologue();
+  bool GenerateDeferredCode();
+  bool GenerateJumpTable();
+  bool GenerateSafepointTable();
+
+  // Generates the custom OSR entrypoint and sets the osr_pc_offset.
+  void GenerateOsrPrologue();
+
+  enum SafepointMode {
+    RECORD_SIMPLE_SAFEPOINT,
+    RECORD_SAFEPOINT_WITH_REGISTERS_AND_NO_ARGUMENTS
+  };
+
+  void CallCode(Handle<Code> code, RelocInfo::Mode mode, LInstruction* instr);
+
+  void CallCodeGeneric(Handle<Code> code, RelocInfo::Mode mode,
+                       LInstruction* instr, SafepointMode safepoint_mode);
+
+  void CallRuntime(const Runtime::Function* function, int num_arguments,
+                   LInstruction* instr,
+                   SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+
+  void CallRuntime(Runtime::FunctionId id, int num_arguments,
+                   LInstruction* instr) {
+    const Runtime::Function* function = Runtime::FunctionForId(id);
+    CallRuntime(function, num_arguments, instr);
+  }
+
+  void LoadContextFromDeferred(LOperand* context);
+  void CallRuntimeFromDeferred(Runtime::FunctionId id, int argc,
+                               LInstruction* instr, LOperand* context);
+
+  enum R4State { R4_UNINITIALIZED, R4_CONTAINS_TARGET };
+
+  // Generate a direct call to a known function.  Expects the function
+  // to be in r4.
+  void CallKnownFunction(Handle<JSFunction> function,
+                         int formal_parameter_count, int arity,
+                         LInstruction* instr, R4State r4_state);
+
+  void RecordSafepointWithLazyDeopt(LInstruction* instr,
+                                    SafepointMode safepoint_mode);
+
+  void RegisterEnvironmentForDeoptimization(LEnvironment* environment,
+                                            Safepoint::DeoptMode mode);
+  void DeoptimizeIf(Condition condition, LInstruction* instr,
+                    const char* detail, Deoptimizer::BailoutType bailout_type,
+                    CRegister cr = cr7);
+  void DeoptimizeIf(Condition condition, LInstruction* instr,
+                    const char* detail, CRegister cr = cr7);
+
+  void AddToTranslation(LEnvironment* environment, Translation* translation,
+                        LOperand* op, bool is_tagged, bool is_uint32,
+                        int* object_index_pointer,
+                        int* dematerialized_index_pointer);
+  void PopulateDeoptimizationData(Handle<Code> code);
+  int DefineDeoptimizationLiteral(Handle<Object> literal);
+
+  void PopulateDeoptimizationLiteralsWithInlinedFunctions();
+
+  Register ToRegister(int index) const;
+  DoubleRegister ToDoubleRegister(int index) const;
+
+  MemOperand BuildSeqStringOperand(Register string, LOperand* index,
+                                   String::Encoding encoding);
+
+  void EmitMathAbs(LMathAbs* instr);
+#if V8_TARGET_ARCH_PPC64
+  void EmitInteger32MathAbs(LMathAbs* instr);
+#endif
+
+  // Support for recording safepoint and position information.
+  void RecordSafepoint(LPointerMap* pointers, Safepoint::Kind kind,
+                       int arguments, Safepoint::DeoptMode mode);
+  void RecordSafepoint(LPointerMap* pointers, Safepoint::DeoptMode mode);
+  void RecordSafepoint(Safepoint::DeoptMode mode);
+  void RecordSafepointWithRegisters(LPointerMap* pointers, int arguments,
+                                    Safepoint::DeoptMode mode);
+
+  void RecordAndWritePosition(int position) OVERRIDE;
+
+  static Condition TokenToCondition(Token::Value op);
+  void EmitGoto(int block);
+
+  // EmitBranch expects to be the last instruction of a block.
+  template <class InstrType>
+  void EmitBranch(InstrType instr, Condition condition, CRegister cr = cr7);
+  template <class InstrType>
+  void EmitFalseBranch(InstrType instr, Condition condition,
+                       CRegister cr = cr7);
+  void EmitNumberUntagD(LNumberUntagD* instr, Register input,
+                        DoubleRegister result, NumberUntagDMode mode);
+
+  // Emits optimized code for typeof x == "y".  Modifies input register.
+  // Returns the condition on which a final split to
+  // true and false label should be made, to optimize fallthrough.
+  Condition EmitTypeofIs(Label* true_label, Label* false_label, Register input,
+                         Handle<String> type_name);
+
+  // Emits optimized code for %_IsObject(x).  Preserves input register.
+  // Returns the condition on which a final split to
+  // true and false label should be made, to optimize fallthrough.
+  Condition EmitIsObject(Register input, Register temp1, Label* is_not_object,
+                         Label* is_object);
+
+  // Emits optimized code for %_IsString(x).  Preserves input register.
+  // Returns the condition on which a final split to
+  // true and false label should be made, to optimize fallthrough.
+  Condition EmitIsString(Register input, Register temp1, Label* is_not_string,
+                         SmiCheck check_needed);
+
+  // Emits optimized code for %_IsConstructCall().
+  // Caller should branch on equal condition.
+  void EmitIsConstructCall(Register temp1, Register temp2);
+
+  // Emits optimized code to deep-copy the contents of statically known
+  // object graphs (e.g. object literal boilerplate).
+  void EmitDeepCopy(Handle<JSObject> object, Register result, Register source,
+                    int* offset, AllocationSiteMode mode);
+
+  void EnsureSpaceForLazyDeopt(int space_needed) OVERRIDE;
+  void DoLoadKeyedExternalArray(LLoadKeyed* instr);
+  void DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr);
+  void DoLoadKeyedFixedArray(LLoadKeyed* instr);
+  void DoStoreKeyedExternalArray(LStoreKeyed* instr);
+  void DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr);
+  void DoStoreKeyedFixedArray(LStoreKeyed* instr);
+
+  template <class T>
+  void EmitVectorLoadICRegisters(T* instr);
+
+  ZoneList<LEnvironment*> deoptimizations_;
+  ZoneList<Deoptimizer::JumpTableEntry> jump_table_;
+  ZoneList<Handle<Object> > deoptimization_literals_;
+  int inlined_function_count_;
+  Scope* const scope_;
+  TranslationBuffer translations_;
+  ZoneList<LDeferredCode*> deferred_;
+  int osr_pc_offset_;
+  bool frame_is_built_;
+
+  // Builder that keeps track of safepoints in the code. The table
+  // itself is emitted at the end of the generated code.
+  SafepointTableBuilder safepoints_;
+
+  // Compiler from a set of parallel moves to a sequential list of moves.
+  LGapResolver resolver_;
+
+  Safepoint::Kind expected_safepoint_kind_;
+
+  class PushSafepointRegistersScope FINAL BASE_EMBEDDED {
+   public:
+    explicit PushSafepointRegistersScope(LCodeGen* codegen)
+        : codegen_(codegen) {
+      DCHECK(codegen_->info()->is_calling());
+      DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kSimple);
+      codegen_->expected_safepoint_kind_ = Safepoint::kWithRegisters;
+      StoreRegistersStateStub stub(codegen_->isolate());
+      codegen_->masm_->CallStub(&stub);
+    }
+
+    ~PushSafepointRegistersScope() {
+      DCHECK(codegen_->expected_safepoint_kind_ == Safepoint::kWithRegisters);
+      RestoreRegistersStateStub stub(codegen_->isolate());
+      codegen_->masm_->CallStub(&stub);
+      codegen_->expected_safepoint_kind_ = Safepoint::kSimple;
+    }
+
+   private:
+    LCodeGen* codegen_;
+  };
+
+  friend class LDeferredCode;
+  friend class LEnvironment;
+  friend class SafepointGenerator;
+  DISALLOW_COPY_AND_ASSIGN(LCodeGen);
+};
+
+
+class LDeferredCode : public ZoneObject {
+ public:
+  explicit LDeferredCode(LCodeGen* codegen)
+      : codegen_(codegen),
+        external_exit_(NULL),
+        instruction_index_(codegen->current_instruction_) {
+    codegen->AddDeferredCode(this);
+  }
+
+  virtual ~LDeferredCode() {}
+  virtual void Generate() = 0;
+  virtual LInstruction* instr() = 0;
+
+  void SetExit(Label* exit) { external_exit_ = exit; }
+  Label* entry() { return &entry_; }
+  Label* exit() { return external_exit_ != NULL ? external_exit_ : &exit_; }
+  int instruction_index() const { return instruction_index_; }
+
+ protected:
+  LCodeGen* codegen() const { return codegen_; }
+  MacroAssembler* masm() const { return codegen_->masm(); }
+
+ private:
+  LCodeGen* codegen_;
+  Label entry_;
+  Label exit_;
+  Label* external_exit_;
+  int instruction_index_;
+};
+}
+}  // namespace v8::internal
+
+#endif  // V8_PPC_LITHIUM_CODEGEN_PPC_H_
diff --git a/deps/v8/src/ppc/lithium-gap-resolver-ppc.cc b/deps/v8/src/ppc/lithium-gap-resolver-ppc.cc
new file mode 100644 (file)
index 0000000..c261b66
--- /dev/null
@@ -0,0 +1,288 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/ppc/lithium-codegen-ppc.h"
+#include "src/ppc/lithium-gap-resolver-ppc.h"
+
+namespace v8 {
+namespace internal {
+
+static const Register kSavedValueRegister = {11};
+
+LGapResolver::LGapResolver(LCodeGen* owner)
+    : cgen_(owner),
+      moves_(32, owner->zone()),
+      root_index_(0),
+      in_cycle_(false),
+      saved_destination_(NULL) {}
+
+
+void LGapResolver::Resolve(LParallelMove* parallel_move) {
+  DCHECK(moves_.is_empty());
+  // Build up a worklist of moves.
+  BuildInitialMoveList(parallel_move);
+
+  for (int i = 0; i < moves_.length(); ++i) {
+    LMoveOperands move = moves_[i];
+    // Skip constants to perform them last.  They don't block other moves
+    // and skipping such moves with register destinations keeps those
+    // registers free for the whole algorithm.
+    if (!move.IsEliminated() && !move.source()->IsConstantOperand()) {
+      root_index_ = i;  // Any cycle is found when by reaching this move again.
+      PerformMove(i);
+      if (in_cycle_) {
+        RestoreValue();
+      }
+    }
+  }
+
+  // Perform the moves with constant sources.
+  for (int i = 0; i < moves_.length(); ++i) {
+    if (!moves_[i].IsEliminated()) {
+      DCHECK(moves_[i].source()->IsConstantOperand());
+      EmitMove(i);
+    }
+  }
+
+  moves_.Rewind(0);
+}
+
+
+void LGapResolver::BuildInitialMoveList(LParallelMove* parallel_move) {
+  // Perform a linear sweep of the moves to add them to the initial list of
+  // moves to perform, ignoring any move that is redundant (the source is
+  // the same as the destination, the destination is ignored and
+  // unallocated, or the move was already eliminated).
+  const ZoneList<LMoveOperands>* moves = parallel_move->move_operands();
+  for (int i = 0; i < moves->length(); ++i) {
+    LMoveOperands move = moves->at(i);
+    if (!move.IsRedundant()) moves_.Add(move, cgen_->zone());
+  }
+  Verify();
+}
+
+
+void LGapResolver::PerformMove(int index) {
+  // Each call to this function performs a move and deletes it from the move
+  // graph.  We first recursively perform any move blocking this one.  We
+  // mark a move as "pending" on entry to PerformMove in order to detect
+  // cycles in the move graph.
+
+  // We can only find a cycle, when doing a depth-first traversal of moves,
+  // be encountering the starting move again. So by spilling the source of
+  // the starting move, we break the cycle.  All moves are then unblocked,
+  // and the starting move is completed by writing the spilled value to
+  // its destination.  All other moves from the spilled source have been
+  // completed prior to breaking the cycle.
+  // An additional complication is that moves to MemOperands with large
+  // offsets (more than 1K or 4K) require us to spill this spilled value to
+  // the stack, to free up the register.
+  DCHECK(!moves_[index].IsPending());
+  DCHECK(!moves_[index].IsRedundant());
+
+  // Clear this move's destination to indicate a pending move.  The actual
+  // destination is saved in a stack allocated local.  Multiple moves can
+  // be pending because this function is recursive.
+  DCHECK(moves_[index].source() != NULL);  // Or else it will look eliminated.
+  LOperand* destination = moves_[index].destination();
+  moves_[index].set_destination(NULL);
+
+  // Perform a depth-first traversal of the move graph to resolve
+  // dependencies.  Any unperformed, unpending move with a source the same
+  // as this one's destination blocks this one so recursively perform all
+  // such moves.
+  for (int i = 0; i < moves_.length(); ++i) {
+    LMoveOperands other_move = moves_[i];
+    if (other_move.Blocks(destination) && !other_move.IsPending()) {
+      PerformMove(i);
+      // If there is a blocking, pending move it must be moves_[root_index_]
+      // and all other moves with the same source as moves_[root_index_] are
+      // sucessfully executed (because they are cycle-free) by this loop.
+    }
+  }
+
+  // We are about to resolve this move and don't need it marked as
+  // pending, so restore its destination.
+  moves_[index].set_destination(destination);
+
+  // The move may be blocked on a pending move, which must be the starting move.
+  // In this case, we have a cycle, and we save the source of this move to
+  // a scratch register to break it.
+  LMoveOperands other_move = moves_[root_index_];
+  if (other_move.Blocks(destination)) {
+    DCHECK(other_move.IsPending());
+    BreakCycle(index);
+    return;
+  }
+
+  // This move is no longer blocked.
+  EmitMove(index);
+}
+
+
+void LGapResolver::Verify() {
+#ifdef ENABLE_SLOW_DCHECKS
+  // No operand should be the destination for more than one move.
+  for (int i = 0; i < moves_.length(); ++i) {
+    LOperand* destination = moves_[i].destination();
+    for (int j = i + 1; j < moves_.length(); ++j) {
+      SLOW_DCHECK(!destination->Equals(moves_[j].destination()));
+    }
+  }
+#endif
+}
+
+#define __ ACCESS_MASM(cgen_->masm())
+
+void LGapResolver::BreakCycle(int index) {
+  // We save in a register the value that should end up in the source of
+  // moves_[root_index].  After performing all moves in the tree rooted
+  // in that move, we save the value to that source.
+  DCHECK(moves_[index].destination()->Equals(moves_[root_index_].source()));
+  DCHECK(!in_cycle_);
+  in_cycle_ = true;
+  LOperand* source = moves_[index].source();
+  saved_destination_ = moves_[index].destination();
+  if (source->IsRegister()) {
+    __ mr(kSavedValueRegister, cgen_->ToRegister(source));
+  } else if (source->IsStackSlot()) {
+    __ LoadP(kSavedValueRegister, cgen_->ToMemOperand(source));
+  } else if (source->IsDoubleRegister()) {
+    __ fmr(kScratchDoubleReg, cgen_->ToDoubleRegister(source));
+  } else if (source->IsDoubleStackSlot()) {
+    __ lfd(kScratchDoubleReg, cgen_->ToMemOperand(source));
+  } else {
+    UNREACHABLE();
+  }
+  // This move will be done by restoring the saved value to the destination.
+  moves_[index].Eliminate();
+}
+
+
+void LGapResolver::RestoreValue() {
+  DCHECK(in_cycle_);
+  DCHECK(saved_destination_ != NULL);
+
+  // Spilled value is in kSavedValueRegister or kSavedDoubleValueRegister.
+  if (saved_destination_->IsRegister()) {
+    __ mr(cgen_->ToRegister(saved_destination_), kSavedValueRegister);
+  } else if (saved_destination_->IsStackSlot()) {
+    __ StoreP(kSavedValueRegister, cgen_->ToMemOperand(saved_destination_));
+  } else if (saved_destination_->IsDoubleRegister()) {
+    __ fmr(cgen_->ToDoubleRegister(saved_destination_), kScratchDoubleReg);
+  } else if (saved_destination_->IsDoubleStackSlot()) {
+    __ stfd(kScratchDoubleReg, cgen_->ToMemOperand(saved_destination_));
+  } else {
+    UNREACHABLE();
+  }
+
+  in_cycle_ = false;
+  saved_destination_ = NULL;
+}
+
+
+void LGapResolver::EmitMove(int index) {
+  LOperand* source = moves_[index].source();
+  LOperand* destination = moves_[index].destination();
+
+  // Dispatch on the source and destination operand kinds.  Not all
+  // combinations are possible.
+
+  if (source->IsRegister()) {
+    Register source_register = cgen_->ToRegister(source);
+    if (destination->IsRegister()) {
+      __ mr(cgen_->ToRegister(destination), source_register);
+    } else {
+      DCHECK(destination->IsStackSlot());
+      __ StoreP(source_register, cgen_->ToMemOperand(destination));
+    }
+  } else if (source->IsStackSlot()) {
+    MemOperand source_operand = cgen_->ToMemOperand(source);
+    if (destination->IsRegister()) {
+      __ LoadP(cgen_->ToRegister(destination), source_operand);
+    } else {
+      DCHECK(destination->IsStackSlot());
+      MemOperand destination_operand = cgen_->ToMemOperand(destination);
+      if (in_cycle_) {
+        __ LoadP(ip, source_operand);
+        __ StoreP(ip, destination_operand);
+      } else {
+        __ LoadP(kSavedValueRegister, source_operand);
+        __ StoreP(kSavedValueRegister, destination_operand);
+      }
+    }
+
+  } else if (source->IsConstantOperand()) {
+    LConstantOperand* constant_source = LConstantOperand::cast(source);
+    if (destination->IsRegister()) {
+      Register dst = cgen_->ToRegister(destination);
+      if (cgen_->IsInteger32(constant_source)) {
+        cgen_->EmitLoadIntegerConstant(constant_source, dst);
+      } else {
+        __ Move(dst, cgen_->ToHandle(constant_source));
+      }
+    } else if (destination->IsDoubleRegister()) {
+      DoubleRegister result = cgen_->ToDoubleRegister(destination);
+      double v = cgen_->ToDouble(constant_source);
+      __ LoadDoubleLiteral(result, v, ip);
+    } else {
+      DCHECK(destination->IsStackSlot());
+      DCHECK(!in_cycle_);  // Constant moves happen after all cycles are gone.
+      if (cgen_->IsInteger32(constant_source)) {
+        cgen_->EmitLoadIntegerConstant(constant_source, kSavedValueRegister);
+      } else {
+        __ Move(kSavedValueRegister, cgen_->ToHandle(constant_source));
+      }
+      __ StoreP(kSavedValueRegister, cgen_->ToMemOperand(destination));
+    }
+
+  } else if (source->IsDoubleRegister()) {
+    DoubleRegister source_register = cgen_->ToDoubleRegister(source);
+    if (destination->IsDoubleRegister()) {
+      __ fmr(cgen_->ToDoubleRegister(destination), source_register);
+    } else {
+      DCHECK(destination->IsDoubleStackSlot());
+      __ stfd(source_register, cgen_->ToMemOperand(destination));
+    }
+
+  } else if (source->IsDoubleStackSlot()) {
+    MemOperand source_operand = cgen_->ToMemOperand(source);
+    if (destination->IsDoubleRegister()) {
+      __ lfd(cgen_->ToDoubleRegister(destination), source_operand);
+    } else {
+      DCHECK(destination->IsDoubleStackSlot());
+      MemOperand destination_operand = cgen_->ToMemOperand(destination);
+      if (in_cycle_) {
+// kSavedDoubleValueRegister was used to break the cycle,
+// but kSavedValueRegister is free.
+#if V8_TARGET_ARCH_PPC64
+        __ ld(kSavedValueRegister, source_operand);
+        __ std(kSavedValueRegister, destination_operand);
+#else
+        MemOperand source_high_operand = cgen_->ToHighMemOperand(source);
+        MemOperand destination_high_operand =
+            cgen_->ToHighMemOperand(destination);
+        __ lwz(kSavedValueRegister, source_operand);
+        __ stw(kSavedValueRegister, destination_operand);
+        __ lwz(kSavedValueRegister, source_high_operand);
+        __ stw(kSavedValueRegister, destination_high_operand);
+#endif
+      } else {
+        __ lfd(kScratchDoubleReg, source_operand);
+        __ stfd(kScratchDoubleReg, destination_operand);
+      }
+    }
+  } else {
+    UNREACHABLE();
+  }
+
+  moves_[index].Eliminate();
+}
+
+
+#undef __
+}
+}  // namespace v8::internal
diff --git a/deps/v8/src/ppc/lithium-gap-resolver-ppc.h b/deps/v8/src/ppc/lithium-gap-resolver-ppc.h
new file mode 100644 (file)
index 0000000..78bd213
--- /dev/null
@@ -0,0 +1,60 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_PPC_LITHIUM_GAP_RESOLVER_PPC_H_
+#define V8_PPC_LITHIUM_GAP_RESOLVER_PPC_H_
+
+#include "src/v8.h"
+
+#include "src/lithium.h"
+
+namespace v8 {
+namespace internal {
+
+class LCodeGen;
+class LGapResolver;
+
+class LGapResolver FINAL BASE_EMBEDDED {
+ public:
+  explicit LGapResolver(LCodeGen* owner);
+
+  // Resolve a set of parallel moves, emitting assembler instructions.
+  void Resolve(LParallelMove* parallel_move);
+
+ private:
+  // Build the initial list of moves.
+  void BuildInitialMoveList(LParallelMove* parallel_move);
+
+  // Perform the move at the moves_ index in question (possibly requiring
+  // other moves to satisfy dependencies).
+  void PerformMove(int index);
+
+  // If a cycle is found in the series of moves, save the blocking value to
+  // a scratch register.  The cycle must be found by hitting the root of the
+  // depth-first search.
+  void BreakCycle(int index);
+
+  // After a cycle has been resolved, restore the value from the scratch
+  // register to its proper destination.
+  void RestoreValue();
+
+  // Emit a move and remove it from the move graph.
+  void EmitMove(int index);
+
+  // Verify the move list before performing moves.
+  void Verify();
+
+  LCodeGen* cgen_;
+
+  // List of moves not yet resolved.
+  ZoneList<LMoveOperands> moves_;
+
+  int root_index_;
+  bool in_cycle_;
+  LOperand* saved_destination_;
+};
+}
+}  // namespace v8::internal
+
+#endif  // V8_PPC_LITHIUM_GAP_RESOLVER_PPC_H_
diff --git a/deps/v8/src/ppc/lithium-ppc.cc b/deps/v8/src/ppc/lithium-ppc.cc
new file mode 100644 (file)
index 0000000..42470c5
--- /dev/null
@@ -0,0 +1,2626 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <sstream>
+
+#include "src/v8.h"
+
+#include "src/hydrogen-osr.h"
+#include "src/lithium-inl.h"
+#include "src/ppc/lithium-codegen-ppc.h"
+
+namespace v8 {
+namespace internal {
+
+#define DEFINE_COMPILE(type)                           \
+  void L##type::CompileToNative(LCodeGen* generator) { \
+    generator->Do##type(this);                         \
+  }
+LITHIUM_CONCRETE_INSTRUCTION_LIST(DEFINE_COMPILE)
+#undef DEFINE_COMPILE
+
+#ifdef DEBUG
+void LInstruction::VerifyCall() {
+  // Call instructions can use only fixed registers as temporaries and
+  // outputs because all registers are blocked by the calling convention.
+  // Inputs operands must use a fixed register or use-at-start policy or
+  // a non-register policy.
+  DCHECK(Output() == NULL || LUnallocated::cast(Output())->HasFixedPolicy() ||
+         !LUnallocated::cast(Output())->HasRegisterPolicy());
+  for (UseIterator it(this); !it.Done(); it.Advance()) {
+    LUnallocated* operand = LUnallocated::cast(it.Current());
+    DCHECK(operand->HasFixedPolicy() || operand->IsUsedAtStart());
+  }
+  for (TempIterator it(this); !it.Done(); it.Advance()) {
+    LUnallocated* operand = LUnallocated::cast(it.Current());
+    DCHECK(operand->HasFixedPolicy() || !operand->HasRegisterPolicy());
+  }
+}
+#endif
+
+
+void LInstruction::PrintTo(StringStream* stream) {
+  stream->Add("%s ", this->Mnemonic());
+
+  PrintOutputOperandTo(stream);
+
+  PrintDataTo(stream);
+
+  if (HasEnvironment()) {
+    stream->Add(" ");
+    environment()->PrintTo(stream);
+  }
+
+  if (HasPointerMap()) {
+    stream->Add(" ");
+    pointer_map()->PrintTo(stream);
+  }
+}
+
+
+void LInstruction::PrintDataTo(StringStream* stream) {
+  stream->Add("= ");
+  for (int i = 0; i < InputCount(); i++) {
+    if (i > 0) stream->Add(" ");
+    if (InputAt(i) == NULL) {
+      stream->Add("NULL");
+    } else {
+      InputAt(i)->PrintTo(stream);
+    }
+  }
+}
+
+
+void LInstruction::PrintOutputOperandTo(StringStream* stream) {
+  if (HasResult()) result()->PrintTo(stream);
+}
+
+
+void LLabel::PrintDataTo(StringStream* stream) {
+  LGap::PrintDataTo(stream);
+  LLabel* rep = replacement();
+  if (rep != NULL) {
+    stream->Add(" Dead block replaced with B%d", rep->block_id());
+  }
+}
+
+
+bool LGap::IsRedundant() const {
+  for (int i = 0; i < 4; i++) {
+    if (parallel_moves_[i] != NULL && !parallel_moves_[i]->IsRedundant()) {
+      return false;
+    }
+  }
+
+  return true;
+}
+
+
+void LGap::PrintDataTo(StringStream* stream) {
+  for (int i = 0; i < 4; i++) {
+    stream->Add("(");
+    if (parallel_moves_[i] != NULL) {
+      parallel_moves_[i]->PrintDataTo(stream);
+    }
+    stream->Add(") ");
+  }
+}
+
+
+const char* LArithmeticD::Mnemonic() const {
+  switch (op()) {
+    case Token::ADD:
+      return "add-d";
+    case Token::SUB:
+      return "sub-d";
+    case Token::MUL:
+      return "mul-d";
+    case Token::DIV:
+      return "div-d";
+    case Token::MOD:
+      return "mod-d";
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+
+const char* LArithmeticT::Mnemonic() const {
+  switch (op()) {
+    case Token::ADD:
+      return "add-t";
+    case Token::SUB:
+      return "sub-t";
+    case Token::MUL:
+      return "mul-t";
+    case Token::MOD:
+      return "mod-t";
+    case Token::DIV:
+      return "div-t";
+    case Token::BIT_AND:
+      return "bit-and-t";
+    case Token::BIT_OR:
+      return "bit-or-t";
+    case Token::BIT_XOR:
+      return "bit-xor-t";
+    case Token::ROR:
+      return "ror-t";
+    case Token::SHL:
+      return "shl-t";
+    case Token::SAR:
+      return "sar-t";
+    case Token::SHR:
+      return "shr-t";
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+
+bool LGoto::HasInterestingComment(LCodeGen* gen) const {
+  return !gen->IsNextEmittedBlock(block_id());
+}
+
+
+void LGoto::PrintDataTo(StringStream* stream) {
+  stream->Add("B%d", block_id());
+}
+
+
+void LBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("B%d | B%d on ", true_block_id(), false_block_id());
+  value()->PrintTo(stream);
+}
+
+
+void LCompareNumericAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if ");
+  left()->PrintTo(stream);
+  stream->Add(" %s ", Token::String(op()));
+  right()->PrintTo(stream);
+  stream->Add(" then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsObjectAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if is_object(");
+  value()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsStringAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if is_string(");
+  value()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsSmiAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if is_smi(");
+  value()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LIsUndetectableAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if is_undetectable(");
+  value()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LStringCompareAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if string_compare(");
+  left()->PrintTo(stream);
+  right()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LHasInstanceTypeAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if has_instance_type(");
+  value()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LHasCachedArrayIndexAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if has_cached_array_index(");
+  value()->PrintTo(stream);
+  stream->Add(") then B%d else B%d", true_block_id(), false_block_id());
+}
+
+
+void LClassOfTestAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if class_of_test(");
+  value()->PrintTo(stream);
+  stream->Add(", \"%o\") then B%d else B%d", *hydrogen()->class_name(),
+              true_block_id(), false_block_id());
+}
+
+
+void LTypeofIsAndBranch::PrintDataTo(StringStream* stream) {
+  stream->Add("if typeof ");
+  value()->PrintTo(stream);
+  stream->Add(" == \"%s\" then B%d else B%d",
+              hydrogen()->type_literal()->ToCString().get(), true_block_id(),
+              false_block_id());
+}
+
+
+void LStoreCodeEntry::PrintDataTo(StringStream* stream) {
+  stream->Add(" = ");
+  function()->PrintTo(stream);
+  stream->Add(".code_entry = ");
+  code_object()->PrintTo(stream);
+}
+
+
+void LInnerAllocatedObject::PrintDataTo(StringStream* stream) {
+  stream->Add(" = ");
+  base_object()->PrintTo(stream);
+  stream->Add(" + ");
+  offset()->PrintTo(stream);
+}
+
+
+void LCallJSFunction::PrintDataTo(StringStream* stream) {
+  stream->Add("= ");
+  function()->PrintTo(stream);
+  stream->Add("#%d / ", arity());
+}
+
+
+void LCallWithDescriptor::PrintDataTo(StringStream* stream) {
+  for (int i = 0; i < InputCount(); i++) {
+    InputAt(i)->PrintTo(stream);
+    stream->Add(" ");
+  }
+  stream->Add("#%d / ", arity());
+}
+
+
+void LLoadContextSlot::PrintDataTo(StringStream* stream) {
+  context()->PrintTo(stream);
+  stream->Add("[%d]", slot_index());
+}
+
+
+void LStoreContextSlot::PrintDataTo(StringStream* stream) {
+  context()->PrintTo(stream);
+  stream->Add("[%d] <- ", slot_index());
+  value()->PrintTo(stream);
+}
+
+
+void LInvokeFunction::PrintDataTo(StringStream* stream) {
+  stream->Add("= ");
+  function()->PrintTo(stream);
+  stream->Add(" #%d / ", arity());
+}
+
+
+void LCallNew::PrintDataTo(StringStream* stream) {
+  stream->Add("= ");
+  constructor()->PrintTo(stream);
+  stream->Add(" #%d / ", arity());
+}
+
+
+void LCallNewArray::PrintDataTo(StringStream* stream) {
+  stream->Add("= ");
+  constructor()->PrintTo(stream);
+  stream->Add(" #%d / ", arity());
+  ElementsKind kind = hydrogen()->elements_kind();
+  stream->Add(" (%s) ", ElementsKindToString(kind));
+}
+
+
+void LAccessArgumentsAt::PrintDataTo(StringStream* stream) {
+  arguments()->PrintTo(stream);
+  stream->Add(" length ");
+  length()->PrintTo(stream);
+  stream->Add(" index ");
+  index()->PrintTo(stream);
+}
+
+
+void LStoreNamedField::PrintDataTo(StringStream* stream) {
+  object()->PrintTo(stream);
+  std::ostringstream os;
+  os << hydrogen()->access() << " <- ";
+  stream->Add(os.str().c_str());
+  value()->PrintTo(stream);
+}
+
+
+void LStoreNamedGeneric::PrintDataTo(StringStream* stream) {
+  object()->PrintTo(stream);
+  stream->Add(".");
+  stream->Add(String::cast(*name())->ToCString().get());
+  stream->Add(" <- ");
+  value()->PrintTo(stream);
+}
+
+
+void LLoadKeyed::PrintDataTo(StringStream* stream) {
+  elements()->PrintTo(stream);
+  stream->Add("[");
+  key()->PrintTo(stream);
+  if (hydrogen()->IsDehoisted()) {
+    stream->Add(" + %d]", base_offset());
+  } else {
+    stream->Add("]");
+  }
+}
+
+
+void LStoreKeyed::PrintDataTo(StringStream* stream) {
+  elements()->PrintTo(stream);
+  stream->Add("[");
+  key()->PrintTo(stream);
+  if (hydrogen()->IsDehoisted()) {
+    stream->Add(" + %d] <-", base_offset());
+  } else {
+    stream->Add("] <- ");
+  }
+
+  if (value() == NULL) {
+    DCHECK(hydrogen()->IsConstantHoleStore() &&
+           hydrogen()->value()->representation().IsDouble());
+    stream->Add("<the hole(nan)>");
+  } else {
+    value()->PrintTo(stream);
+  }
+}
+
+
+void LStoreKeyedGeneric::PrintDataTo(StringStream* stream) {
+  object()->PrintTo(stream);
+  stream->Add("[");
+  key()->PrintTo(stream);
+  stream->Add("] <- ");
+  value()->PrintTo(stream);
+}
+
+
+void LTransitionElementsKind::PrintDataTo(StringStream* stream) {
+  object()->PrintTo(stream);
+  stream->Add(" %p -> %p", *original_map(), *transitioned_map());
+}
+
+
+int LPlatformChunk::GetNextSpillIndex(RegisterKind kind) {
+  // Skip a slot if for a double-width slot.
+  if (kind == DOUBLE_REGISTERS) spill_slot_count_++;
+  return spill_slot_count_++;
+}
+
+
+LOperand* LPlatformChunk::GetNextSpillSlot(RegisterKind kind) {
+  int index = GetNextSpillIndex(kind);
+  if (kind == DOUBLE_REGISTERS) {
+    return LDoubleStackSlot::Create(index, zone());
+  } else {
+    DCHECK(kind == GENERAL_REGISTERS);
+    return LStackSlot::Create(index, zone());
+  }
+}
+
+
+LPlatformChunk* LChunkBuilder::Build() {
+  DCHECK(is_unused());
+  chunk_ = new (zone()) LPlatformChunk(info(), graph());
+  LPhase phase("L_Building chunk", chunk_);
+  status_ = BUILDING;
+
+  // If compiling for OSR, reserve space for the unoptimized frame,
+  // which will be subsumed into this frame.
+  if (graph()->has_osr()) {
+    for (int i = graph()->osr()->UnoptimizedFrameSlots(); i > 0; i--) {
+      chunk_->GetNextSpillIndex(GENERAL_REGISTERS);
+    }
+  }
+
+  const ZoneList<HBasicBlock*>* blocks = graph()->blocks();
+  for (int i = 0; i < blocks->length(); i++) {
+    HBasicBlock* next = NULL;
+    if (i < blocks->length() - 1) next = blocks->at(i + 1);
+    DoBasicBlock(blocks->at(i), next);
+    if (is_aborted()) return NULL;
+  }
+  status_ = DONE;
+  return chunk_;
+}
+
+
+LUnallocated* LChunkBuilder::ToUnallocated(Register reg) {
+  return new (zone()) LUnallocated(LUnallocated::FIXED_REGISTER,
+                                   Register::ToAllocationIndex(reg));
+}
+
+
+LUnallocated* LChunkBuilder::ToUnallocated(DoubleRegister reg) {
+  return new (zone()) LUnallocated(LUnallocated::FIXED_DOUBLE_REGISTER,
+                                   DoubleRegister::ToAllocationIndex(reg));
+}
+
+
+LOperand* LChunkBuilder::UseFixed(HValue* value, Register fixed_register) {
+  return Use(value, ToUnallocated(fixed_register));
+}
+
+
+LOperand* LChunkBuilder::UseFixedDouble(HValue* value, DoubleRegister reg) {
+  return Use(value, ToUnallocated(reg));
+}
+
+
+LOperand* LChunkBuilder::UseRegister(HValue* value) {
+  return Use(value,
+             new (zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+}
+
+
+LOperand* LChunkBuilder::UseRegisterAtStart(HValue* value) {
+  return Use(value, new (zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER,
+                                              LUnallocated::USED_AT_START));
+}
+
+
+LOperand* LChunkBuilder::UseTempRegister(HValue* value) {
+  return Use(value, new (zone()) LUnallocated(LUnallocated::WRITABLE_REGISTER));
+}
+
+
+LOperand* LChunkBuilder::Use(HValue* value) {
+  return Use(value, new (zone()) LUnallocated(LUnallocated::NONE));
+}
+
+
+LOperand* LChunkBuilder::UseAtStart(HValue* value) {
+  return Use(value, new (zone())
+             LUnallocated(LUnallocated::NONE, LUnallocated::USED_AT_START));
+}
+
+
+LOperand* LChunkBuilder::UseOrConstant(HValue* value) {
+  return value->IsConstant()
+             ? chunk_->DefineConstantOperand(HConstant::cast(value))
+             : Use(value);
+}
+
+
+LOperand* LChunkBuilder::UseOrConstantAtStart(HValue* value) {
+  return value->IsConstant()
+             ? chunk_->DefineConstantOperand(HConstant::cast(value))
+             : UseAtStart(value);
+}
+
+
+LOperand* LChunkBuilder::UseRegisterOrConstant(HValue* value) {
+  return value->IsConstant()
+             ? chunk_->DefineConstantOperand(HConstant::cast(value))
+             : UseRegister(value);
+}
+
+
+LOperand* LChunkBuilder::UseRegisterOrConstantAtStart(HValue* value) {
+  return value->IsConstant()
+             ? chunk_->DefineConstantOperand(HConstant::cast(value))
+             : UseRegisterAtStart(value);
+}
+
+
+LOperand* LChunkBuilder::UseConstant(HValue* value) {
+  return chunk_->DefineConstantOperand(HConstant::cast(value));
+}
+
+
+LOperand* LChunkBuilder::UseAny(HValue* value) {
+  return value->IsConstant()
+             ? chunk_->DefineConstantOperand(HConstant::cast(value))
+             : Use(value, new (zone()) LUnallocated(LUnallocated::ANY));
+}
+
+
+LOperand* LChunkBuilder::Use(HValue* value, LUnallocated* operand) {
+  if (value->EmitAtUses()) {
+    HInstruction* instr = HInstruction::cast(value);
+    VisitInstruction(instr);
+  }
+  operand->set_virtual_register(value->id());
+  return operand;
+}
+
+
+LInstruction* LChunkBuilder::Define(LTemplateResultInstruction<1>* instr,
+                                    LUnallocated* result) {
+  result->set_virtual_register(current_instruction_->id());
+  instr->set_result(result);
+  return instr;
+}
+
+
+LInstruction* LChunkBuilder::DefineAsRegister(
+    LTemplateResultInstruction<1>* instr) {
+  return Define(instr,
+                new (zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER));
+}
+
+
+LInstruction* LChunkBuilder::DefineAsSpilled(
+    LTemplateResultInstruction<1>* instr, int index) {
+  return Define(instr,
+                new (zone()) LUnallocated(LUnallocated::FIXED_SLOT, index));
+}
+
+
+LInstruction* LChunkBuilder::DefineSameAsFirst(
+    LTemplateResultInstruction<1>* instr) {
+  return Define(instr,
+                new (zone()) LUnallocated(LUnallocated::SAME_AS_FIRST_INPUT));
+}
+
+
+LInstruction* LChunkBuilder::DefineFixed(LTemplateResultInstruction<1>* instr,
+                                         Register reg) {
+  return Define(instr, ToUnallocated(reg));
+}
+
+
+LInstruction* LChunkBuilder::DefineFixedDouble(
+    LTemplateResultInstruction<1>* instr, DoubleRegister reg) {
+  return Define(instr, ToUnallocated(reg));
+}
+
+
+LInstruction* LChunkBuilder::AssignEnvironment(LInstruction* instr) {
+  HEnvironment* hydrogen_env = current_block_->last_environment();
+  int argument_index_accumulator = 0;
+  ZoneList<HValue*> objects_to_materialize(0, zone());
+  instr->set_environment(CreateEnvironment(
+      hydrogen_env, &argument_index_accumulator, &objects_to_materialize));
+  return instr;
+}
+
+
+LInstruction* LChunkBuilder::MarkAsCall(LInstruction* instr,
+                                        HInstruction* hinstr,
+                                        CanDeoptimize can_deoptimize) {
+  info()->MarkAsNonDeferredCalling();
+#ifdef DEBUG
+  instr->VerifyCall();
+#endif
+  instr->MarkAsCall();
+  instr = AssignPointerMap(instr);
+
+  // If instruction does not have side-effects lazy deoptimization
+  // after the call will try to deoptimize to the point before the call.
+  // Thus we still need to attach environment to this call even if
+  // call sequence can not deoptimize eagerly.
+  bool needs_environment = (can_deoptimize == CAN_DEOPTIMIZE_EAGERLY) ||
+                           !hinstr->HasObservableSideEffects();
+  if (needs_environment && !instr->HasEnvironment()) {
+    instr = AssignEnvironment(instr);
+    // We can't really figure out if the environment is needed or not.
+    instr->environment()->set_has_been_used();
+  }
+
+  return instr;
+}
+
+
+LInstruction* LChunkBuilder::AssignPointerMap(LInstruction* instr) {
+  DCHECK(!instr->HasPointerMap());
+  instr->set_pointer_map(new (zone()) LPointerMap(zone()));
+  return instr;
+}
+
+
+LUnallocated* LChunkBuilder::TempRegister() {
+  LUnallocated* operand =
+      new (zone()) LUnallocated(LUnallocated::MUST_HAVE_REGISTER);
+  int vreg = allocator_->GetVirtualRegister();
+  if (!allocator_->AllocationOk()) {
+    Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister);
+    vreg = 0;
+  }
+  operand->set_virtual_register(vreg);
+  return operand;
+}
+
+
+LUnallocated* LChunkBuilder::TempDoubleRegister() {
+  LUnallocated* operand =
+      new (zone()) LUnallocated(LUnallocated::MUST_HAVE_DOUBLE_REGISTER);
+  int vreg = allocator_->GetVirtualRegister();
+  if (!allocator_->AllocationOk()) {
+    Abort(kOutOfVirtualRegistersWhileTryingToAllocateTempRegister);
+    vreg = 0;
+  }
+  operand->set_virtual_register(vreg);
+  return operand;
+}
+
+
+LOperand* LChunkBuilder::FixedTemp(Register reg) {
+  LUnallocated* operand = ToUnallocated(reg);
+  DCHECK(operand->HasFixedPolicy());
+  return operand;
+}
+
+
+LOperand* LChunkBuilder::FixedTemp(DoubleRegister reg) {
+  LUnallocated* operand = ToUnallocated(reg);
+  DCHECK(operand->HasFixedPolicy());
+  return operand;
+}
+
+
+LInstruction* LChunkBuilder::DoBlockEntry(HBlockEntry* instr) {
+  return new (zone()) LLabel(instr->block());
+}
+
+
+LInstruction* LChunkBuilder::DoDummyUse(HDummyUse* instr) {
+  return DefineAsRegister(new (zone()) LDummyUse(UseAny(instr->value())));
+}
+
+
+LInstruction* LChunkBuilder::DoEnvironmentMarker(HEnvironmentMarker* instr) {
+  UNREACHABLE();
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoDeoptimize(HDeoptimize* instr) {
+  return AssignEnvironment(new (zone()) LDeoptimize);
+}
+
+
+LInstruction* LChunkBuilder::DoShift(Token::Value op,
+                                     HBitwiseBinaryOperation* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+    LOperand* left = UseRegisterAtStart(instr->left());
+
+    HValue* right_value = instr->right();
+    LOperand* right = NULL;
+    int constant_value = 0;
+    bool does_deopt = false;
+    if (right_value->IsConstant()) {
+      HConstant* constant = HConstant::cast(right_value);
+      right = chunk_->DefineConstantOperand(constant);
+      constant_value = constant->Integer32Value() & 0x1f;
+      // Left shifts can deoptimize if we shift by > 0 and the result cannot be
+      // truncated to smi.
+      if (instr->representation().IsSmi() && constant_value > 0) {
+        does_deopt = !instr->CheckUsesForFlag(HValue::kTruncatingToSmi);
+      }
+    } else {
+      right = UseRegisterAtStart(right_value);
+    }
+
+    // Shift operations can only deoptimize if we do a logical shift
+    // by 0 and the result cannot be truncated to int32.
+    if (op == Token::SHR && constant_value == 0) {
+      does_deopt = !instr->CheckFlag(HInstruction::kUint32);
+    }
+
+    LInstruction* result =
+        DefineAsRegister(new (zone()) LShiftI(op, left, right, does_deopt));
+    return does_deopt ? AssignEnvironment(result) : result;
+  } else {
+    return DoArithmeticT(op, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoArithmeticD(Token::Value op,
+                                           HArithmeticBinaryOperation* instr) {
+  DCHECK(instr->representation().IsDouble());
+  DCHECK(instr->left()->representation().IsDouble());
+  DCHECK(instr->right()->representation().IsDouble());
+  if (op == Token::MOD) {
+    LOperand* left = UseFixedDouble(instr->left(), d1);
+    LOperand* right = UseFixedDouble(instr->right(), d2);
+    LArithmeticD* result = new (zone()) LArithmeticD(op, left, right);
+    // We call a C function for double modulo. It can't trigger a GC. We need
+    // to use fixed result register for the call.
+    // TODO(fschneider): Allow any register as input registers.
+    return MarkAsCall(DefineFixedDouble(result, d1), instr);
+  } else {
+    LOperand* left = UseRegisterAtStart(instr->left());
+    LOperand* right = UseRegisterAtStart(instr->right());
+    LArithmeticD* result = new (zone()) LArithmeticD(op, left, right);
+    return DefineAsRegister(result);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoArithmeticT(Token::Value op,
+                                           HBinaryOperation* instr) {
+  HValue* left = instr->left();
+  HValue* right = instr->right();
+  DCHECK(left->representation().IsTagged());
+  DCHECK(right->representation().IsTagged());
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* left_operand = UseFixed(left, r4);
+  LOperand* right_operand = UseFixed(right, r3);
+  LArithmeticT* result =
+      new (zone()) LArithmeticT(op, context, left_operand, right_operand);
+  return MarkAsCall(DefineFixed(result, r3), instr);
+}
+
+
+void LChunkBuilder::DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block) {
+  DCHECK(is_building());
+  current_block_ = block;
+  next_block_ = next_block;
+  if (block->IsStartBlock()) {
+    block->UpdateEnvironment(graph_->start_environment());
+    argument_count_ = 0;
+  } else if (block->predecessors()->length() == 1) {
+    // We have a single predecessor => copy environment and outgoing
+    // argument count from the predecessor.
+    DCHECK(block->phis()->length() == 0);
+    HBasicBlock* pred = block->predecessors()->at(0);
+    HEnvironment* last_environment = pred->last_environment();
+    DCHECK(last_environment != NULL);
+    // Only copy the environment, if it is later used again.
+    if (pred->end()->SecondSuccessor() == NULL) {
+      DCHECK(pred->end()->FirstSuccessor() == block);
+    } else {
+      if (pred->end()->FirstSuccessor()->block_id() > block->block_id() ||
+          pred->end()->SecondSuccessor()->block_id() > block->block_id()) {
+        last_environment = last_environment->Copy();
+      }
+    }
+    block->UpdateEnvironment(last_environment);
+    DCHECK(pred->argument_count() >= 0);
+    argument_count_ = pred->argument_count();
+  } else {
+    // We are at a state join => process phis.
+    HBasicBlock* pred = block->predecessors()->at(0);
+    // No need to copy the environment, it cannot be used later.
+    HEnvironment* last_environment = pred->last_environment();
+    for (int i = 0; i < block->phis()->length(); ++i) {
+      HPhi* phi = block->phis()->at(i);
+      if (phi->HasMergedIndex()) {
+        last_environment->SetValueAt(phi->merged_index(), phi);
+      }
+    }
+    for (int i = 0; i < block->deleted_phis()->length(); ++i) {
+      if (block->deleted_phis()->at(i) < last_environment->length()) {
+        last_environment->SetValueAt(block->deleted_phis()->at(i),
+                                     graph_->GetConstantUndefined());
+      }
+    }
+    block->UpdateEnvironment(last_environment);
+    // Pick up the outgoing argument count of one of the predecessors.
+    argument_count_ = pred->argument_count();
+  }
+  HInstruction* current = block->first();
+  int start = chunk_->instructions()->length();
+  while (current != NULL && !is_aborted()) {
+    // Code for constants in registers is generated lazily.
+    if (!current->EmitAtUses()) {
+      VisitInstruction(current);
+    }
+    current = current->next();
+  }
+  int end = chunk_->instructions()->length() - 1;
+  if (end >= start) {
+    block->set_first_instruction_index(start);
+    block->set_last_instruction_index(end);
+  }
+  block->set_argument_count(argument_count_);
+  next_block_ = NULL;
+  current_block_ = NULL;
+}
+
+
+void LChunkBuilder::VisitInstruction(HInstruction* current) {
+  HInstruction* old_current = current_instruction_;
+  current_instruction_ = current;
+
+  LInstruction* instr = NULL;
+  if (current->CanReplaceWithDummyUses()) {
+    if (current->OperandCount() == 0) {
+      instr = DefineAsRegister(new (zone()) LDummy());
+    } else {
+      DCHECK(!current->OperandAt(0)->IsControlInstruction());
+      instr = DefineAsRegister(new (zone())
+                               LDummyUse(UseAny(current->OperandAt(0))));
+    }
+    for (int i = 1; i < current->OperandCount(); ++i) {
+      if (current->OperandAt(i)->IsControlInstruction()) continue;
+      LInstruction* dummy =
+          new (zone()) LDummyUse(UseAny(current->OperandAt(i)));
+      dummy->set_hydrogen_value(current);
+      chunk_->AddInstruction(dummy, current_block_);
+    }
+  } else {
+    HBasicBlock* successor;
+    if (current->IsControlInstruction() &&
+        HControlInstruction::cast(current)->KnownSuccessorBlock(&successor) &&
+        successor != NULL) {
+      instr = new (zone()) LGoto(successor);
+    } else {
+      instr = current->CompileToLithium(this);
+    }
+  }
+
+  argument_count_ += current->argument_delta();
+  DCHECK(argument_count_ >= 0);
+
+  if (instr != NULL) {
+    AddInstruction(instr, current);
+  }
+
+  current_instruction_ = old_current;
+}
+
+
+void LChunkBuilder::AddInstruction(LInstruction* instr,
+                                   HInstruction* hydrogen_val) {
+  // Associate the hydrogen instruction first, since we may need it for
+  // the ClobbersRegisters() or ClobbersDoubleRegisters() calls below.
+  instr->set_hydrogen_value(hydrogen_val);
+
+#if DEBUG
+  // Make sure that the lithium instruction has either no fixed register
+  // constraints in temps or the result OR no uses that are only used at
+  // start. If this invariant doesn't hold, the register allocator can decide
+  // to insert a split of a range immediately before the instruction due to an
+  // already allocated register needing to be used for the instruction's fixed
+  // register constraint. In this case, The register allocator won't see an
+  // interference between the split child and the use-at-start (it would if
+  // the it was just a plain use), so it is free to move the split child into
+  // the same register that is used for the use-at-start.
+  // See https://code.google.com/p/chromium/issues/detail?id=201590
+  if (!(instr->ClobbersRegisters() &&
+        instr->ClobbersDoubleRegisters(isolate()))) {
+    int fixed = 0;
+    int used_at_start = 0;
+    for (UseIterator it(instr); !it.Done(); it.Advance()) {
+      LUnallocated* operand = LUnallocated::cast(it.Current());
+      if (operand->IsUsedAtStart()) ++used_at_start;
+    }
+    if (instr->Output() != NULL) {
+      if (LUnallocated::cast(instr->Output())->HasFixedPolicy()) ++fixed;
+    }
+    for (TempIterator it(instr); !it.Done(); it.Advance()) {
+      LUnallocated* operand = LUnallocated::cast(it.Current());
+      if (operand->HasFixedPolicy()) ++fixed;
+    }
+    DCHECK(fixed == 0 || used_at_start == 0);
+  }
+#endif
+
+  if (FLAG_stress_pointer_maps && !instr->HasPointerMap()) {
+    instr = AssignPointerMap(instr);
+  }
+  if (FLAG_stress_environments && !instr->HasEnvironment()) {
+    instr = AssignEnvironment(instr);
+  }
+  chunk_->AddInstruction(instr, current_block_);
+
+  if (instr->IsCall()) {
+    HValue* hydrogen_value_for_lazy_bailout = hydrogen_val;
+    LInstruction* instruction_needing_environment = NULL;
+    if (hydrogen_val->HasObservableSideEffects()) {
+      HSimulate* sim = HSimulate::cast(hydrogen_val->next());
+      instruction_needing_environment = instr;
+      sim->ReplayEnvironment(current_block_->last_environment());
+      hydrogen_value_for_lazy_bailout = sim;
+    }
+    LInstruction* bailout = AssignEnvironment(new (zone()) LLazyBailout());
+    bailout->set_hydrogen_value(hydrogen_value_for_lazy_bailout);
+    chunk_->AddInstruction(bailout, current_block_);
+    if (instruction_needing_environment != NULL) {
+      // Store the lazy deopt environment with the instruction if needed.
+      // Right now it is only used for LInstanceOfKnownGlobal.
+      instruction_needing_environment->SetDeferredLazyDeoptimizationEnvironment(
+          bailout->environment());
+    }
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoGoto(HGoto* instr) {
+  return new (zone()) LGoto(instr->FirstSuccessor());
+}
+
+
+LInstruction* LChunkBuilder::DoBranch(HBranch* instr) {
+  HValue* value = instr->value();
+  Representation r = value->representation();
+  HType type = value->type();
+  ToBooleanStub::Types expected = instr->expected_input_types();
+  if (expected.IsEmpty()) expected = ToBooleanStub::Types::Generic();
+
+  bool easy_case = !r.IsTagged() || type.IsBoolean() || type.IsSmi() ||
+                   type.IsJSArray() || type.IsHeapNumber() || type.IsString();
+  LInstruction* branch = new (zone()) LBranch(UseRegister(value));
+  if (!easy_case &&
+      ((!expected.Contains(ToBooleanStub::SMI) && expected.NeedsMap()) ||
+       !expected.IsGeneric())) {
+    branch = AssignEnvironment(branch);
+  }
+  return branch;
+}
+
+
+LInstruction* LChunkBuilder::DoDebugBreak(HDebugBreak* instr) {
+  return new (zone()) LDebugBreak();
+}
+
+
+LInstruction* LChunkBuilder::DoCompareMap(HCompareMap* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LOperand* temp = TempRegister();
+  return new (zone()) LCmpMapAndBranch(value, temp);
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsLength(HArgumentsLength* instr) {
+  info()->MarkAsRequiresFrame();
+  LOperand* value = UseRegister(instr->value());
+  return DefineAsRegister(new (zone()) LArgumentsLength(value));
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsElements(HArgumentsElements* elems) {
+  info()->MarkAsRequiresFrame();
+  return DefineAsRegister(new (zone()) LArgumentsElements);
+}
+
+
+LInstruction* LChunkBuilder::DoInstanceOf(HInstanceOf* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LInstanceOf* result = new (zone()) LInstanceOf(
+      context, UseFixed(instr->left(), r3), UseFixed(instr->right(), r4));
+  return MarkAsCall(DefineFixed(result, r3), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoInstanceOfKnownGlobal(
+    HInstanceOfKnownGlobal* instr) {
+  LInstanceOfKnownGlobal* result = new (zone())
+      LInstanceOfKnownGlobal(UseFixed(instr->context(), cp),
+                             UseFixed(instr->left(), r3), FixedTemp(r7));
+  return MarkAsCall(DefineFixed(result, r3), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoWrapReceiver(HWrapReceiver* instr) {
+  LOperand* receiver = UseRegisterAtStart(instr->receiver());
+  LOperand* function = UseRegisterAtStart(instr->function());
+  LWrapReceiver* result = new (zone()) LWrapReceiver(receiver, function);
+  return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoApplyArguments(HApplyArguments* instr) {
+  LOperand* function = UseFixed(instr->function(), r4);
+  LOperand* receiver = UseFixed(instr->receiver(), r3);
+  LOperand* length = UseFixed(instr->length(), r5);
+  LOperand* elements = UseFixed(instr->elements(), r6);
+  LApplyArguments* result =
+      new (zone()) LApplyArguments(function, receiver, length, elements);
+  return MarkAsCall(DefineFixed(result, r3), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoPushArguments(HPushArguments* instr) {
+  int argc = instr->OperandCount();
+  for (int i = 0; i < argc; ++i) {
+    LOperand* argument = Use(instr->argument(i));
+    AddInstruction(new (zone()) LPushArgument(argument), instr);
+  }
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoStoreCodeEntry(
+    HStoreCodeEntry* store_code_entry) {
+  LOperand* function = UseRegister(store_code_entry->function());
+  LOperand* code_object = UseTempRegister(store_code_entry->code_object());
+  return new (zone()) LStoreCodeEntry(function, code_object);
+}
+
+
+LInstruction* LChunkBuilder::DoInnerAllocatedObject(
+    HInnerAllocatedObject* instr) {
+  LOperand* base_object = UseRegisterAtStart(instr->base_object());
+  LOperand* offset = UseRegisterOrConstantAtStart(instr->offset());
+  return DefineAsRegister(new (zone())
+                          LInnerAllocatedObject(base_object, offset));
+}
+
+
+LInstruction* LChunkBuilder::DoThisFunction(HThisFunction* instr) {
+  return instr->HasNoUses() ? NULL
+                            : DefineAsRegister(new (zone()) LThisFunction);
+}
+
+
+LInstruction* LChunkBuilder::DoContext(HContext* instr) {
+  if (instr->HasNoUses()) return NULL;
+
+  if (info()->IsStub()) {
+    return DefineFixed(new (zone()) LContext, cp);
+  }
+
+  return DefineAsRegister(new (zone()) LContext);
+}
+
+
+LInstruction* LChunkBuilder::DoDeclareGlobals(HDeclareGlobals* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  return MarkAsCall(new (zone()) LDeclareGlobals(context), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallJSFunction(HCallJSFunction* instr) {
+  LOperand* function = UseFixed(instr->function(), r4);
+
+  LCallJSFunction* result = new (zone()) LCallJSFunction(function);
+
+  return MarkAsCall(DefineFixed(result, r3), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallWithDescriptor(HCallWithDescriptor* instr) {
+  CallInterfaceDescriptor descriptor = instr->descriptor();
+
+  LOperand* target = UseRegisterOrConstantAtStart(instr->target());
+  ZoneList<LOperand*> ops(instr->OperandCount(), zone());
+  ops.Add(target, zone());
+  for (int i = 1; i < instr->OperandCount(); i++) {
+    LOperand* op =
+        UseFixed(instr->OperandAt(i), descriptor.GetParameterRegister(i - 1));
+    ops.Add(op, zone());
+  }
+
+  LCallWithDescriptor* result =
+      new (zone()) LCallWithDescriptor(descriptor, ops, zone());
+  return MarkAsCall(DefineFixed(result, r3), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTailCallThroughMegamorphicCache(
+    HTailCallThroughMegamorphicCache* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* receiver_register =
+      UseFixed(instr->receiver(), LoadDescriptor::ReceiverRegister());
+  LOperand* name_register =
+      UseFixed(instr->name(), LoadDescriptor::NameRegister());
+  // Not marked as call. It can't deoptimize, and it never returns.
+  return new (zone()) LTailCallThroughMegamorphicCache(
+      context, receiver_register, name_register);
+}
+
+
+LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* function = UseFixed(instr->function(), r4);
+  LInvokeFunction* result = new (zone()) LInvokeFunction(context, function);
+  return MarkAsCall(DefineFixed(result, r3), instr, CANNOT_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
+  switch (instr->op()) {
+    case kMathFloor:
+      return DoMathFloor(instr);
+    case kMathRound:
+      return DoMathRound(instr);
+    case kMathFround:
+      return DoMathFround(instr);
+    case kMathAbs:
+      return DoMathAbs(instr);
+    case kMathLog:
+      return DoMathLog(instr);
+    case kMathExp:
+      return DoMathExp(instr);
+    case kMathSqrt:
+      return DoMathSqrt(instr);
+    case kMathPowHalf:
+      return DoMathPowHalf(instr);
+    case kMathClz32:
+      return DoMathClz32(instr);
+    default:
+      UNREACHABLE();
+      return NULL;
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoMathFloor(HUnaryMathOperation* instr) {
+  LOperand* input = UseRegister(instr->value());
+  LMathFloor* result = new (zone()) LMathFloor(input);
+  return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
+}
+
+
+LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
+  LOperand* input = UseRegister(instr->value());
+  LOperand* temp = TempDoubleRegister();
+  LMathRound* result = new (zone()) LMathRound(input, temp);
+  return AssignEnvironment(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoMathFround(HUnaryMathOperation* instr) {
+  LOperand* input = UseRegister(instr->value());
+  LMathFround* result = new (zone()) LMathFround(input);
+  return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) {
+  Representation r = instr->value()->representation();
+  LOperand* context = (r.IsDouble() || r.IsSmiOrInteger32())
+                          ? NULL
+                          : UseFixed(instr->context(), cp);
+  LOperand* input = UseRegister(instr->value());
+  LInstruction* result =
+      DefineAsRegister(new (zone()) LMathAbs(context, input));
+  if (!r.IsDouble() && !r.IsSmiOrInteger32()) result = AssignPointerMap(result);
+  if (!r.IsDouble()) result = AssignEnvironment(result);
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
+  DCHECK(instr->representation().IsDouble());
+  DCHECK(instr->value()->representation().IsDouble());
+  LOperand* input = UseFixedDouble(instr->value(), d1);
+  return MarkAsCall(DefineFixedDouble(new (zone()) LMathLog(input), d1), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoMathClz32(HUnaryMathOperation* instr) {
+  LOperand* input = UseRegisterAtStart(instr->value());
+  LMathClz32* result = new (zone()) LMathClz32(input);
+  return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
+  DCHECK(instr->representation().IsDouble());
+  DCHECK(instr->value()->representation().IsDouble());
+  LOperand* input = UseRegister(instr->value());
+  LOperand* temp1 = TempRegister();
+  LOperand* temp2 = TempRegister();
+  LOperand* double_temp = TempDoubleRegister();
+  LMathExp* result = new (zone()) LMathExp(input, double_temp, temp1, temp2);
+  return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) {
+  LOperand* input = UseRegisterAtStart(instr->value());
+  LMathSqrt* result = new (zone()) LMathSqrt(input);
+  return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) {
+  LOperand* input = UseRegisterAtStart(instr->value());
+  LMathPowHalf* result = new (zone()) LMathPowHalf(input);
+  return DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoCallNew(HCallNew* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* constructor = UseFixed(instr->constructor(), r4);
+  LCallNew* result = new (zone()) LCallNew(context, constructor);
+  return MarkAsCall(DefineFixed(result, r3), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallNewArray(HCallNewArray* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* constructor = UseFixed(instr->constructor(), r4);
+  LCallNewArray* result = new (zone()) LCallNewArray(context, constructor);
+  return MarkAsCall(DefineFixed(result, r3), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallFunction(HCallFunction* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* function = UseFixed(instr->function(), r4);
+  LCallFunction* call = new (zone()) LCallFunction(context, function);
+  return MarkAsCall(DefineFixed(call, r3), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCallRuntime(HCallRuntime* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  return MarkAsCall(DefineFixed(new (zone()) LCallRuntime(context), r3), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoRor(HRor* instr) {
+  return DoShift(Token::ROR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoShr(HShr* instr) {
+  return DoShift(Token::SHR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoSar(HSar* instr) {
+  return DoShift(Token::SAR, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoShl(HShl* instr) {
+  return DoShift(Token::SHL, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoBitwise(HBitwise* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+    DCHECK(instr->CheckFlag(HValue::kTruncatingToInt32));
+
+    LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+    LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
+    return DefineAsRegister(new (zone()) LBitI(left, right));
+  } else {
+    return DoArithmeticT(instr->op(), instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoDivByPowerOf2I(HDiv* instr) {
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LInstruction* result =
+      DefineAsRegister(new (zone()) LDivByPowerOf2I(dividend, divisor));
+  if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+      (instr->CheckFlag(HValue::kCanOverflow) && divisor == -1) ||
+      (!instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32) &&
+       divisor != 1 && divisor != -1)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDivByConstI(HDiv* instr) {
+  DCHECK(instr->representation().IsInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LInstruction* result =
+      DefineAsRegister(new (zone()) LDivByConstI(dividend, divisor));
+  if (divisor == 0 ||
+      (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+      !instr->CheckFlag(HInstruction::kAllUsesTruncatingToInt32)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDivI(HDiv* instr) {
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  LOperand* divisor = UseRegister(instr->right());
+  LInstruction* result =
+      DefineAsRegister(new (zone()) LDivI(dividend, divisor));
+  if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+      instr->CheckFlag(HValue::kBailoutOnMinusZero) ||
+      (instr->CheckFlag(HValue::kCanOverflow) &&
+       !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32)) ||
+      (!instr->IsMathFloorOfDiv() &&
+       !instr->CheckFlag(HValue::kAllUsesTruncatingToInt32))) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoDiv(HDiv* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    if (instr->RightIsPowerOf2()) {
+      return DoDivByPowerOf2I(instr);
+    } else if (instr->right()->IsConstant()) {
+      return DoDivByConstI(instr);
+    } else {
+      return DoDivI(instr);
+    }
+  } else if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::DIV, instr);
+  } else {
+    return DoArithmeticT(Token::DIV, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr) {
+  LOperand* dividend = UseRegisterAtStart(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LInstruction* result =
+      DefineAsRegister(new (zone()) LFlooringDivByPowerOf2I(dividend, divisor));
+  if ((instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0) ||
+      (instr->CheckFlag(HValue::kLeftCanBeMinInt) && divisor == -1)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoFlooringDivByConstI(HMathFloorOfDiv* instr) {
+  DCHECK(instr->representation().IsInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LOperand* temp =
+      ((divisor > 0 && !instr->CheckFlag(HValue::kLeftCanBeNegative)) ||
+       (divisor < 0 && !instr->CheckFlag(HValue::kLeftCanBePositive)))
+          ? NULL
+          : TempRegister();
+  LInstruction* result = DefineAsRegister(
+      new (zone()) LFlooringDivByConstI(dividend, divisor, temp));
+  if (divisor == 0 ||
+      (instr->CheckFlag(HValue::kBailoutOnMinusZero) && divisor < 0)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoFlooringDivI(HMathFloorOfDiv* instr) {
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  LOperand* divisor = UseRegister(instr->right());
+  LFlooringDivI* div = new (zone()) LFlooringDivI(dividend, divisor);
+  return AssignEnvironment(DefineAsRegister(div));
+}
+
+
+LInstruction* LChunkBuilder::DoMathFloorOfDiv(HMathFloorOfDiv* instr) {
+  if (instr->RightIsPowerOf2()) {
+    return DoFlooringDivByPowerOf2I(instr);
+  } else if (instr->right()->IsConstant()) {
+    return DoFlooringDivByConstI(instr);
+  } else {
+    return DoFlooringDivI(instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoModByPowerOf2I(HMod* instr) {
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegisterAtStart(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LInstruction* result =
+      DefineSameAsFirst(new (zone()) LModByPowerOf2I(dividend, divisor));
+  if (instr->CheckFlag(HValue::kLeftCanBeNegative) &&
+      instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoModByConstI(HMod* instr) {
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  int32_t divisor = instr->right()->GetInteger32Constant();
+  LInstruction* result =
+      DefineAsRegister(new (zone()) LModByConstI(dividend, divisor));
+  if (divisor == 0 || instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoModI(HMod* instr) {
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  LOperand* dividend = UseRegister(instr->left());
+  LOperand* divisor = UseRegister(instr->right());
+  LInstruction* result =
+      DefineAsRegister(new (zone()) LModI(dividend, divisor));
+  if (instr->CheckFlag(HValue::kCanBeDivByZero) ||
+      instr->CheckFlag(HValue::kBailoutOnMinusZero)) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoMod(HMod* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    if (instr->RightIsPowerOf2()) {
+      return DoModByPowerOf2I(instr);
+    } else if (instr->right()->IsConstant()) {
+      return DoModByConstI(instr);
+    } else {
+      return DoModI(instr);
+    }
+  } else if (instr->representation().IsDouble()) {
+    return DoArithmeticD(Token::MOD, instr);
+  } else {
+    return DoArithmeticT(Token::MOD, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoMul(HMul* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+    HValue* left = instr->BetterLeftOperand();
+    HValue* right = instr->BetterRightOperand();
+    LOperand* left_op;
+    LOperand* right_op;
+    bool can_overflow = instr->CheckFlag(HValue::kCanOverflow);
+    bool bailout_on_minus_zero = instr->CheckFlag(HValue::kBailoutOnMinusZero);
+
+    if (right->IsConstant()) {
+      HConstant* constant = HConstant::cast(right);
+      int32_t constant_value = constant->Integer32Value();
+      // Constants -1, 0 and 1 can be optimized if the result can overflow.
+      // For other constants, it can be optimized only without overflow.
+      if (!can_overflow || ((constant_value >= -1) && (constant_value <= 1))) {
+        left_op = UseRegisterAtStart(left);
+        right_op = UseConstant(right);
+      } else {
+        if (bailout_on_minus_zero) {
+          left_op = UseRegister(left);
+        } else {
+          left_op = UseRegisterAtStart(left);
+        }
+        right_op = UseRegister(right);
+      }
+    } else {
+      if (bailout_on_minus_zero) {
+        left_op = UseRegister(left);
+      } else {
+        left_op = UseRegisterAtStart(left);
+      }
+      right_op = UseRegister(right);
+    }
+    LMulI* mul = new (zone()) LMulI(left_op, right_op);
+    if (can_overflow || bailout_on_minus_zero) {
+      AssignEnvironment(mul);
+    }
+    return DefineAsRegister(mul);
+
+  } else if (instr->representation().IsDouble()) {
+    if (instr->HasOneUse() &&
+        (instr->uses().value()->IsAdd() || instr->uses().value()->IsSub())) {
+      HBinaryOperation* use = HBinaryOperation::cast(instr->uses().value());
+
+      if (use->IsAdd() && instr == use->left()) {
+        // This mul is the lhs of an add. The add and mul will be folded into a
+        // multiply-add in DoAdd.
+        return NULL;
+      }
+      if (instr == use->right() && use->IsAdd() &&
+          !(use->left()->IsMul() && use->left()->HasOneUse())) {
+        // This mul is the rhs of an add, where the lhs is not another mul.
+        // The add and mul will be folded into a multiply-add in DoAdd.
+        return NULL;
+      }
+      if (instr == use->left() && use->IsSub()) {
+        // This mul is the lhs of a sub. The mul and sub will be folded into a
+        // multiply-sub in DoSub.
+        return NULL;
+      }
+    }
+
+    return DoArithmeticD(Token::MUL, instr);
+  } else {
+    return DoArithmeticT(Token::MUL, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoSub(HSub* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+
+    if (instr->left()->IsConstant() &&
+        !instr->CheckFlag(HValue::kCanOverflow)) {
+      // If lhs is constant, do reverse subtraction instead.
+      return DoRSub(instr);
+    }
+
+    LOperand* left = UseRegisterAtStart(instr->left());
+    LOperand* right = UseOrConstantAtStart(instr->right());
+    LSubI* sub = new (zone()) LSubI(left, right);
+    LInstruction* result = DefineAsRegister(sub);
+    if (instr->CheckFlag(HValue::kCanOverflow)) {
+      result = AssignEnvironment(result);
+    }
+    return result;
+  } else if (instr->representation().IsDouble()) {
+    if (instr->left()->IsMul() && instr->left()->HasOneUse()) {
+      return DoMultiplySub(instr->right(), HMul::cast(instr->left()));
+    }
+
+    return DoArithmeticD(Token::SUB, instr);
+  } else {
+    return DoArithmeticT(Token::SUB, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoRSub(HSub* instr) {
+  DCHECK(instr->representation().IsSmiOrInteger32());
+  DCHECK(instr->left()->representation().Equals(instr->representation()));
+  DCHECK(instr->right()->representation().Equals(instr->representation()));
+  DCHECK(!instr->CheckFlag(HValue::kCanOverflow));
+
+  // Note: The lhs of the subtraction becomes the rhs of the
+  // reverse-subtraction.
+  LOperand* left = UseRegisterAtStart(instr->right());
+  LOperand* right = UseOrConstantAtStart(instr->left());
+  LRSubI* rsb = new (zone()) LRSubI(left, right);
+  LInstruction* result = DefineAsRegister(rsb);
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoMultiplyAdd(HMul* mul, HValue* addend) {
+  LOperand* multiplier_op = UseRegisterAtStart(mul->left());
+  LOperand* multiplicand_op = UseRegisterAtStart(mul->right());
+  LOperand* addend_op = UseRegisterAtStart(addend);
+  return DefineSameAsFirst(
+      new (zone()) LMultiplyAddD(addend_op, multiplier_op, multiplicand_op));
+}
+
+
+LInstruction* LChunkBuilder::DoMultiplySub(HValue* minuend, HMul* mul) {
+  LOperand* minuend_op = UseRegisterAtStart(minuend);
+  LOperand* multiplier_op = UseRegisterAtStart(mul->left());
+  LOperand* multiplicand_op = UseRegisterAtStart(mul->right());
+
+  return DefineSameAsFirst(
+      new (zone()) LMultiplySubD(minuend_op, multiplier_op, multiplicand_op));
+}
+
+
+LInstruction* LChunkBuilder::DoAdd(HAdd* instr) {
+  if (instr->representation().IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+    LOperand* left = UseRegisterAtStart(instr->BetterLeftOperand());
+    LOperand* right = UseOrConstantAtStart(instr->BetterRightOperand());
+    LAddI* add = new (zone()) LAddI(left, right);
+    LInstruction* result = DefineAsRegister(add);
+    if (instr->CheckFlag(HValue::kCanOverflow)) {
+      result = AssignEnvironment(result);
+    }
+    return result;
+  } else if (instr->representation().IsExternal()) {
+    DCHECK(instr->left()->representation().IsExternal());
+    DCHECK(instr->right()->representation().IsInteger32());
+    DCHECK(!instr->CheckFlag(HValue::kCanOverflow));
+    LOperand* left = UseRegisterAtStart(instr->left());
+    LOperand* right = UseOrConstantAtStart(instr->right());
+    LAddI* add = new (zone()) LAddI(left, right);
+    LInstruction* result = DefineAsRegister(add);
+    return result;
+  } else if (instr->representation().IsDouble()) {
+    if (instr->left()->IsMul() && instr->left()->HasOneUse()) {
+      return DoMultiplyAdd(HMul::cast(instr->left()), instr->right());
+    }
+
+    if (instr->right()->IsMul() && instr->right()->HasOneUse()) {
+      DCHECK(!instr->left()->IsMul() || !instr->left()->HasOneUse());
+      return DoMultiplyAdd(HMul::cast(instr->right()), instr->left());
+    }
+
+    return DoArithmeticD(Token::ADD, instr);
+  } else {
+    return DoArithmeticT(Token::ADD, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoMathMinMax(HMathMinMax* instr) {
+  LOperand* left = NULL;
+  LOperand* right = NULL;
+  if (instr->representation().IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(instr->representation()));
+    DCHECK(instr->right()->representation().Equals(instr->representation()));
+    left = UseRegisterAtStart(instr->BetterLeftOperand());
+    right = UseOrConstantAtStart(instr->BetterRightOperand());
+  } else {
+    DCHECK(instr->representation().IsDouble());
+    DCHECK(instr->left()->representation().IsDouble());
+    DCHECK(instr->right()->representation().IsDouble());
+    left = UseRegisterAtStart(instr->left());
+    right = UseRegisterAtStart(instr->right());
+  }
+  return DefineAsRegister(new (zone()) LMathMinMax(left, right));
+}
+
+
+LInstruction* LChunkBuilder::DoPower(HPower* instr) {
+  DCHECK(instr->representation().IsDouble());
+  // We call a C function for double power. It can't trigger a GC.
+  // We need to use fixed result register for the call.
+  Representation exponent_type = instr->right()->representation();
+  DCHECK(instr->left()->representation().IsDouble());
+  LOperand* left = UseFixedDouble(instr->left(), d1);
+  LOperand* right =
+      exponent_type.IsDouble()
+          ? UseFixedDouble(instr->right(), d2)
+          : UseFixed(instr->right(), MathPowTaggedDescriptor::exponent());
+  LPower* result = new (zone()) LPower(left, right);
+  return MarkAsCall(DefineFixedDouble(result, d3), instr,
+                    CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareGeneric(HCompareGeneric* instr) {
+  DCHECK(instr->left()->representation().IsTagged());
+  DCHECK(instr->right()->representation().IsTagged());
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* left = UseFixed(instr->left(), r4);
+  LOperand* right = UseFixed(instr->right(), r3);
+  LCmpT* result = new (zone()) LCmpT(context, left, right);
+  return MarkAsCall(DefineFixed(result, r3), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareNumericAndBranch(
+    HCompareNumericAndBranch* instr) {
+  Representation r = instr->representation();
+  if (r.IsSmiOrInteger32()) {
+    DCHECK(instr->left()->representation().Equals(r));
+    DCHECK(instr->right()->representation().Equals(r));
+    LOperand* left = UseRegisterOrConstantAtStart(instr->left());
+    LOperand* right = UseRegisterOrConstantAtStart(instr->right());
+    return new (zone()) LCompareNumericAndBranch(left, right);
+  } else {
+    DCHECK(r.IsDouble());
+    DCHECK(instr->left()->representation().IsDouble());
+    DCHECK(instr->right()->representation().IsDouble());
+    LOperand* left = UseRegisterAtStart(instr->left());
+    LOperand* right = UseRegisterAtStart(instr->right());
+    return new (zone()) LCompareNumericAndBranch(left, right);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoCompareObjectEqAndBranch(
+    HCompareObjectEqAndBranch* instr) {
+  LOperand* left = UseRegisterAtStart(instr->left());
+  LOperand* right = UseRegisterAtStart(instr->right());
+  return new (zone()) LCmpObjectEqAndBranch(left, right);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareHoleAndBranch(
+    HCompareHoleAndBranch* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return new (zone()) LCmpHoleAndBranch(value);
+}
+
+
+LInstruction* LChunkBuilder::DoCompareMinusZeroAndBranch(
+    HCompareMinusZeroAndBranch* instr) {
+  LOperand* value = UseRegister(instr->value());
+  LOperand* scratch = TempRegister();
+  return new (zone()) LCompareMinusZeroAndBranch(value, scratch);
+}
+
+
+LInstruction* LChunkBuilder::DoIsObjectAndBranch(HIsObjectAndBranch* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LOperand* temp = TempRegister();
+  return new (zone()) LIsObjectAndBranch(value, temp);
+}
+
+
+LInstruction* LChunkBuilder::DoIsStringAndBranch(HIsStringAndBranch* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LOperand* temp = TempRegister();
+  return new (zone()) LIsStringAndBranch(value, temp);
+}
+
+
+LInstruction* LChunkBuilder::DoIsSmiAndBranch(HIsSmiAndBranch* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  return new (zone()) LIsSmiAndBranch(Use(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoIsUndetectableAndBranch(
+    HIsUndetectableAndBranch* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return new (zone()) LIsUndetectableAndBranch(value, TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoStringCompareAndBranch(
+    HStringCompareAndBranch* instr) {
+  DCHECK(instr->left()->representation().IsTagged());
+  DCHECK(instr->right()->representation().IsTagged());
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* left = UseFixed(instr->left(), r4);
+  LOperand* right = UseFixed(instr->right(), r3);
+  LStringCompareAndBranch* result =
+      new (zone()) LStringCompareAndBranch(context, left, right);
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoHasInstanceTypeAndBranch(
+    HHasInstanceTypeAndBranch* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return new (zone()) LHasInstanceTypeAndBranch(value);
+}
+
+
+LInstruction* LChunkBuilder::DoGetCachedArrayIndex(
+    HGetCachedArrayIndex* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegisterAtStart(instr->value());
+
+  return DefineAsRegister(new (zone()) LGetCachedArrayIndex(value));
+}
+
+
+LInstruction* LChunkBuilder::DoHasCachedArrayIndexAndBranch(
+    HHasCachedArrayIndexAndBranch* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  return new (zone())
+      LHasCachedArrayIndexAndBranch(UseRegisterAtStart(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoClassOfTestAndBranch(
+    HClassOfTestAndBranch* instr) {
+  DCHECK(instr->value()->representation().IsTagged());
+  LOperand* value = UseRegister(instr->value());
+  return new (zone()) LClassOfTestAndBranch(value, TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoMapEnumLength(HMapEnumLength* instr) {
+  LOperand* map = UseRegisterAtStart(instr->value());
+  return DefineAsRegister(new (zone()) LMapEnumLength(map));
+}
+
+
+LInstruction* LChunkBuilder::DoDateField(HDateField* instr) {
+  LOperand* object = UseFixed(instr->value(), r3);
+  LDateField* result =
+      new (zone()) LDateField(object, FixedTemp(r4), instr->index());
+  return MarkAsCall(DefineFixed(result, r3), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoSeqStringGetChar(HSeqStringGetChar* instr) {
+  LOperand* string = UseRegisterAtStart(instr->string());
+  LOperand* index = UseRegisterOrConstantAtStart(instr->index());
+  return DefineAsRegister(new (zone()) LSeqStringGetChar(string, index));
+}
+
+
+LInstruction* LChunkBuilder::DoSeqStringSetChar(HSeqStringSetChar* instr) {
+  LOperand* string = UseRegisterAtStart(instr->string());
+  LOperand* index = FLAG_debug_code
+                        ? UseRegisterAtStart(instr->index())
+                        : UseRegisterOrConstantAtStart(instr->index());
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LOperand* context = FLAG_debug_code ? UseFixed(instr->context(), cp) : NULL;
+  return new (zone()) LSeqStringSetChar(context, string, index, value);
+}
+
+
+LInstruction* LChunkBuilder::DoBoundsCheck(HBoundsCheck* instr) {
+  if (!FLAG_debug_code && instr->skip_check()) return NULL;
+  LOperand* index = UseRegisterOrConstantAtStart(instr->index());
+  LOperand* length = !index->IsConstantOperand()
+                         ? UseRegisterOrConstantAtStart(instr->length())
+                         : UseRegisterAtStart(instr->length());
+  LInstruction* result = new (zone()) LBoundsCheck(index, length);
+  if (!FLAG_debug_code || !instr->skip_check()) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoBoundsCheckBaseIndexInformation(
+    HBoundsCheckBaseIndexInformation* instr) {
+  UNREACHABLE();
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoAbnormalExit(HAbnormalExit* instr) {
+  // The control instruction marking the end of a block that completed
+  // abruptly (e.g., threw an exception).  There is nothing specific to do.
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoUseConst(HUseConst* instr) { return NULL; }
+
+
+LInstruction* LChunkBuilder::DoForceRepresentation(HForceRepresentation* bad) {
+  // All HForceRepresentation instructions should be eliminated in the
+  // representation change phase of Hydrogen.
+  UNREACHABLE();
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoChange(HChange* instr) {
+  Representation from = instr->from();
+  Representation to = instr->to();
+  HValue* val = instr->value();
+  if (from.IsSmi()) {
+    if (to.IsTagged()) {
+      LOperand* value = UseRegister(val);
+      return DefineSameAsFirst(new (zone()) LDummyUse(value));
+    }
+    from = Representation::Tagged();
+  }
+  if (from.IsTagged()) {
+    if (to.IsDouble()) {
+      LOperand* value = UseRegister(val);
+      LInstruction* result =
+          DefineAsRegister(new (zone()) LNumberUntagD(value));
+      if (!val->representation().IsSmi()) result = AssignEnvironment(result);
+      return result;
+    } else if (to.IsSmi()) {
+      LOperand* value = UseRegister(val);
+      if (val->type().IsSmi()) {
+        return DefineSameAsFirst(new (zone()) LDummyUse(value));
+      }
+      return AssignEnvironment(
+          DefineSameAsFirst(new (zone()) LCheckSmi(value)));
+    } else {
+      DCHECK(to.IsInteger32());
+      if (val->type().IsSmi() || val->representation().IsSmi()) {
+        LOperand* value = UseRegisterAtStart(val);
+        return DefineAsRegister(new (zone()) LSmiUntag(value, false));
+      } else {
+        LOperand* value = UseRegister(val);
+        LOperand* temp1 = TempRegister();
+        LOperand* temp2 = TempDoubleRegister();
+        LInstruction* result =
+            DefineSameAsFirst(new (zone()) LTaggedToI(value, temp1, temp2));
+        if (!val->representation().IsSmi()) result = AssignEnvironment(result);
+        return result;
+      }
+    }
+  } else if (from.IsDouble()) {
+    if (to.IsTagged()) {
+      info()->MarkAsDeferredCalling();
+      LOperand* value = UseRegister(val);
+      LOperand* temp1 = TempRegister();
+      LOperand* temp2 = TempRegister();
+      LUnallocated* result_temp = TempRegister();
+      LNumberTagD* result = new (zone()) LNumberTagD(value, temp1, temp2);
+      return AssignPointerMap(Define(result, result_temp));
+    } else if (to.IsSmi()) {
+      LOperand* value = UseRegister(val);
+      return AssignEnvironment(
+          DefineAsRegister(new (zone()) LDoubleToSmi(value)));
+    } else {
+      DCHECK(to.IsInteger32());
+      LOperand* value = UseRegister(val);
+      LInstruction* result = DefineAsRegister(new (zone()) LDoubleToI(value));
+      if (!instr->CanTruncateToInt32()) result = AssignEnvironment(result);
+      return result;
+    }
+  } else if (from.IsInteger32()) {
+    info()->MarkAsDeferredCalling();
+    if (to.IsTagged()) {
+      if (!instr->CheckFlag(HValue::kCanOverflow)) {
+        LOperand* value = UseRegisterAtStart(val);
+        return DefineAsRegister(new (zone()) LSmiTag(value));
+      } else if (val->CheckFlag(HInstruction::kUint32)) {
+        LOperand* value = UseRegisterAtStart(val);
+        LOperand* temp1 = TempRegister();
+        LOperand* temp2 = TempRegister();
+        LNumberTagU* result = new (zone()) LNumberTagU(value, temp1, temp2);
+        return AssignPointerMap(DefineAsRegister(result));
+      } else {
+        LOperand* value = UseRegisterAtStart(val);
+        LOperand* temp1 = TempRegister();
+        LOperand* temp2 = TempRegister();
+        LNumberTagI* result = new (zone()) LNumberTagI(value, temp1, temp2);
+        return AssignPointerMap(DefineAsRegister(result));
+      }
+    } else if (to.IsSmi()) {
+      LOperand* value = UseRegister(val);
+      LInstruction* result = DefineAsRegister(new (zone()) LSmiTag(value));
+      if (instr->CheckFlag(HValue::kCanOverflow)) {
+        result = AssignEnvironment(result);
+      }
+      return result;
+    } else {
+      DCHECK(to.IsDouble());
+      if (val->CheckFlag(HInstruction::kUint32)) {
+        return DefineAsRegister(new (zone()) LUint32ToDouble(UseRegister(val)));
+      } else {
+        return DefineAsRegister(new (zone()) LInteger32ToDouble(Use(val)));
+      }
+    }
+  }
+  UNREACHABLE();
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCheckHeapObject(HCheckHeapObject* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LInstruction* result = new (zone()) LCheckNonSmi(value);
+  if (!instr->value()->type().IsHeapObject()) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoCheckSmi(HCheckSmi* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return AssignEnvironment(new (zone()) LCheckSmi(value));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckInstanceType(HCheckInstanceType* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LInstruction* result = new (zone()) LCheckInstanceType(value);
+  return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoCheckValue(HCheckValue* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  return AssignEnvironment(new (zone()) LCheckValue(value));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckMaps(HCheckMaps* instr) {
+  if (instr->IsStabilityCheck()) return new (zone()) LCheckMaps;
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LInstruction* result = AssignEnvironment(new (zone()) LCheckMaps(value));
+  if (instr->HasMigrationTarget()) {
+    info()->MarkAsDeferredCalling();
+    result = AssignPointerMap(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoClampToUint8(HClampToUint8* instr) {
+  HValue* value = instr->value();
+  Representation input_rep = value->representation();
+  LOperand* reg = UseRegister(value);
+  if (input_rep.IsDouble()) {
+    return DefineAsRegister(new (zone()) LClampDToUint8(reg));
+  } else if (input_rep.IsInteger32()) {
+    return DefineAsRegister(new (zone()) LClampIToUint8(reg));
+  } else {
+    DCHECK(input_rep.IsSmiOrTagged());
+    LClampTToUint8* result =
+        new (zone()) LClampTToUint8(reg, TempDoubleRegister());
+    return AssignEnvironment(DefineAsRegister(result));
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoDoubleBits(HDoubleBits* instr) {
+  HValue* value = instr->value();
+  DCHECK(value->representation().IsDouble());
+  return DefineAsRegister(new (zone()) LDoubleBits(UseRegister(value)));
+}
+
+
+LInstruction* LChunkBuilder::DoConstructDouble(HConstructDouble* instr) {
+  LOperand* lo = UseRegister(instr->lo());
+  LOperand* hi = UseRegister(instr->hi());
+  return DefineAsRegister(new (zone()) LConstructDouble(hi, lo));
+}
+
+
+LInstruction* LChunkBuilder::DoReturn(HReturn* instr) {
+  LOperand* context = info()->IsStub() ? UseFixed(instr->context(), cp) : NULL;
+  LOperand* parameter_count = UseRegisterOrConstant(instr->parameter_count());
+  return new (zone())
+      LReturn(UseFixed(instr->value(), r3), context, parameter_count);
+}
+
+
+LInstruction* LChunkBuilder::DoConstant(HConstant* instr) {
+  Representation r = instr->representation();
+  if (r.IsSmi()) {
+    return DefineAsRegister(new (zone()) LConstantS);
+  } else if (r.IsInteger32()) {
+    return DefineAsRegister(new (zone()) LConstantI);
+  } else if (r.IsDouble()) {
+    return DefineAsRegister(new (zone()) LConstantD);
+  } else if (r.IsExternal()) {
+    return DefineAsRegister(new (zone()) LConstantE);
+  } else if (r.IsTagged()) {
+    return DefineAsRegister(new (zone()) LConstantT);
+  } else {
+    UNREACHABLE();
+    return NULL;
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoLoadGlobalCell(HLoadGlobalCell* instr) {
+  LLoadGlobalCell* result = new (zone()) LLoadGlobalCell;
+  return instr->RequiresHoleCheck()
+             ? AssignEnvironment(DefineAsRegister(result))
+             : DefineAsRegister(result);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* global_object =
+      UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
+  LOperand* vector = NULL;
+  if (FLAG_vector_ics) {
+    vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
+  }
+  LLoadGlobalGeneric* result =
+      new (zone()) LLoadGlobalGeneric(context, global_object, vector);
+  return MarkAsCall(DefineFixed(result, r3), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreGlobalCell(HStoreGlobalCell* instr) {
+  LOperand* value = UseRegister(instr->value());
+  // Use a temp to check the value in the cell in the case where we perform
+  // a hole check.
+  return instr->RequiresHoleCheck()
+             ? AssignEnvironment(new (zone())
+                                 LStoreGlobalCell(value, TempRegister()))
+             : new (zone()) LStoreGlobalCell(value, NULL);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadContextSlot(HLoadContextSlot* instr) {
+  LOperand* context = UseRegisterAtStart(instr->value());
+  LInstruction* result =
+      DefineAsRegister(new (zone()) LLoadContextSlot(context));
+  if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoStoreContextSlot(HStoreContextSlot* instr) {
+  LOperand* context;
+  LOperand* value;
+  if (instr->NeedsWriteBarrier()) {
+    context = UseTempRegister(instr->context());
+    value = UseTempRegister(instr->value());
+  } else {
+    context = UseRegister(instr->context());
+    value = UseRegister(instr->value());
+  }
+  LInstruction* result = new (zone()) LStoreContextSlot(context, value);
+  if (instr->RequiresHoleCheck() && instr->DeoptimizesOnHole()) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoLoadNamedField(HLoadNamedField* instr) {
+  LOperand* obj = UseRegisterAtStart(instr->object());
+  return DefineAsRegister(new (zone()) LLoadNamedField(obj));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* object =
+      UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
+  LOperand* vector = NULL;
+  if (FLAG_vector_ics) {
+    vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
+  }
+
+  LInstruction* result =
+      DefineFixed(new (zone()) LLoadNamedGeneric(context, object, vector), r3);
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadFunctionPrototype(
+    HLoadFunctionPrototype* instr) {
+  return AssignEnvironment(DefineAsRegister(
+      new (zone()) LLoadFunctionPrototype(UseRegister(instr->function()))));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadRoot(HLoadRoot* instr) {
+  return DefineAsRegister(new (zone()) LLoadRoot);
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
+  DCHECK(instr->key()->representation().IsSmiOrInteger32());
+  ElementsKind elements_kind = instr->elements_kind();
+  LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+  LInstruction* result = NULL;
+
+  if (!instr->is_typed_elements()) {
+    LOperand* obj = NULL;
+    if (instr->representation().IsDouble()) {
+      obj = UseRegister(instr->elements());
+    } else {
+      obj = UseRegisterAtStart(instr->elements());
+    }
+    result = DefineAsRegister(new (zone()) LLoadKeyed(obj, key));
+  } else {
+    DCHECK((instr->representation().IsInteger32() &&
+            !IsDoubleOrFloatElementsKind(elements_kind)) ||
+           (instr->representation().IsDouble() &&
+            IsDoubleOrFloatElementsKind(elements_kind)));
+    LOperand* backing_store = UseRegister(instr->elements());
+    result = DefineAsRegister(new (zone()) LLoadKeyed(backing_store, key));
+  }
+
+  if ((instr->is_external() || instr->is_fixed_typed_array())
+          ?
+          // see LCodeGen::DoLoadKeyedExternalArray
+          ((elements_kind == EXTERNAL_UINT32_ELEMENTS ||
+            elements_kind == UINT32_ELEMENTS) &&
+           !instr->CheckFlag(HInstruction::kUint32))
+          :
+          // see LCodeGen::DoLoadKeyedFixedDoubleArray and
+          // LCodeGen::DoLoadKeyedFixedArray
+          instr->RequiresHoleCheck()) {
+    result = AssignEnvironment(result);
+  }
+  return result;
+}
+
+
+LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* object =
+      UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
+  LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
+  LOperand* vector = NULL;
+  if (FLAG_vector_ics) {
+    vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
+  }
+
+  LInstruction* result = DefineFixed(
+      new (zone()) LLoadKeyedGeneric(context, object, key, vector), r3);
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyed(HStoreKeyed* instr) {
+  if (!instr->is_typed_elements()) {
+    DCHECK(instr->elements()->representation().IsTagged());
+    bool needs_write_barrier = instr->NeedsWriteBarrier();
+    LOperand* object = NULL;
+    LOperand* key = NULL;
+    LOperand* val = NULL;
+
+    if (instr->value()->representation().IsDouble()) {
+      object = UseRegisterAtStart(instr->elements());
+      val = UseRegister(instr->value());
+      key = UseRegisterOrConstantAtStart(instr->key());
+    } else {
+      if (needs_write_barrier) {
+        object = UseTempRegister(instr->elements());
+        val = UseTempRegister(instr->value());
+        key = UseTempRegister(instr->key());
+      } else {
+        object = UseRegisterAtStart(instr->elements());
+        val = UseRegisterAtStart(instr->value());
+        key = UseRegisterOrConstantAtStart(instr->key());
+      }
+    }
+
+    return new (zone()) LStoreKeyed(object, key, val);
+  }
+
+  DCHECK((instr->value()->representation().IsInteger32() &&
+          !IsDoubleOrFloatElementsKind(instr->elements_kind())) ||
+         (instr->value()->representation().IsDouble() &&
+          IsDoubleOrFloatElementsKind(instr->elements_kind())));
+  DCHECK((instr->is_fixed_typed_array() &&
+          instr->elements()->representation().IsTagged()) ||
+         (instr->is_external() &&
+          instr->elements()->representation().IsExternal()));
+  LOperand* val = UseRegister(instr->value());
+  LOperand* key = UseRegisterOrConstantAtStart(instr->key());
+  LOperand* backing_store = UseRegister(instr->elements());
+  return new (zone()) LStoreKeyed(backing_store, key, val);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreKeyedGeneric(HStoreKeyedGeneric* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* obj =
+      UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
+  LOperand* key = UseFixed(instr->key(), StoreDescriptor::NameRegister());
+  LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
+
+  DCHECK(instr->object()->representation().IsTagged());
+  DCHECK(instr->key()->representation().IsTagged());
+  DCHECK(instr->value()->representation().IsTagged());
+
+  return MarkAsCall(new (zone()) LStoreKeyedGeneric(context, obj, key, val),
+                    instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTransitionElementsKind(
+    HTransitionElementsKind* instr) {
+  if (IsSimpleMapChangeTransition(instr->from_kind(), instr->to_kind())) {
+    LOperand* object = UseRegister(instr->object());
+    LOperand* new_map_reg = TempRegister();
+    LTransitionElementsKind* result =
+        new (zone()) LTransitionElementsKind(object, NULL, new_map_reg);
+    return result;
+  } else {
+    LOperand* object = UseFixed(instr->object(), r3);
+    LOperand* context = UseFixed(instr->context(), cp);
+    LTransitionElementsKind* result =
+        new (zone()) LTransitionElementsKind(object, context, NULL);
+    return MarkAsCall(result, instr);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoTrapAllocationMemento(
+    HTrapAllocationMemento* instr) {
+  LOperand* object = UseRegister(instr->object());
+  LOperand* temp = TempRegister();
+  LTrapAllocationMemento* result =
+      new (zone()) LTrapAllocationMemento(object, temp);
+  return AssignEnvironment(result);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreNamedField(HStoreNamedField* instr) {
+  bool is_in_object = instr->access().IsInobject();
+  bool needs_write_barrier = instr->NeedsWriteBarrier();
+  bool needs_write_barrier_for_map =
+      instr->has_transition() && instr->NeedsWriteBarrierForMap();
+
+  LOperand* obj;
+  if (needs_write_barrier) {
+    obj = is_in_object ? UseRegister(instr->object())
+                       : UseTempRegister(instr->object());
+  } else {
+    obj = needs_write_barrier_for_map ? UseRegister(instr->object())
+                                      : UseRegisterAtStart(instr->object());
+  }
+
+  LOperand* val;
+  if (needs_write_barrier) {
+    val = UseTempRegister(instr->value());
+  } else if (instr->field_representation().IsDouble()) {
+    val = UseRegisterAtStart(instr->value());
+  } else {
+    val = UseRegister(instr->value());
+  }
+
+  // We need a temporary register for write barrier of the map field.
+  LOperand* temp = needs_write_barrier_for_map ? TempRegister() : NULL;
+
+  return new (zone()) LStoreNamedField(obj, val, temp);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreNamedGeneric(HStoreNamedGeneric* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* obj =
+      UseFixed(instr->object(), StoreDescriptor::ReceiverRegister());
+  LOperand* val = UseFixed(instr->value(), StoreDescriptor::ValueRegister());
+
+  LInstruction* result = new (zone()) LStoreNamedGeneric(context, obj, val);
+  return MarkAsCall(result, instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStringAdd(HStringAdd* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* left = UseFixed(instr->left(), r4);
+  LOperand* right = UseFixed(instr->right(), r3);
+  return MarkAsCall(
+      DefineFixed(new (zone()) LStringAdd(context, left, right), r3), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoStringCharCodeAt(HStringCharCodeAt* instr) {
+  LOperand* string = UseTempRegister(instr->string());
+  LOperand* index = UseTempRegister(instr->index());
+  LOperand* context = UseAny(instr->context());
+  LStringCharCodeAt* result =
+      new (zone()) LStringCharCodeAt(context, string, index);
+  return AssignPointerMap(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoStringCharFromCode(HStringCharFromCode* instr) {
+  LOperand* char_code = UseRegister(instr->value());
+  LOperand* context = UseAny(instr->context());
+  LStringCharFromCode* result =
+      new (zone()) LStringCharFromCode(context, char_code);
+  return AssignPointerMap(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
+  info()->MarkAsDeferredCalling();
+  LOperand* context = UseAny(instr->context());
+  LOperand* size = UseRegisterOrConstant(instr->size());
+  LOperand* temp1 = TempRegister();
+  LOperand* temp2 = TempRegister();
+  LAllocate* result = new (zone()) LAllocate(context, size, temp1, temp2);
+  return AssignPointerMap(DefineAsRegister(result));
+}
+
+
+LInstruction* LChunkBuilder::DoRegExpLiteral(HRegExpLiteral* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  return MarkAsCall(DefineFixed(new (zone()) LRegExpLiteral(context), r3),
+                    instr);
+}
+
+
+LInstruction* LChunkBuilder::DoFunctionLiteral(HFunctionLiteral* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  return MarkAsCall(DefineFixed(new (zone()) LFunctionLiteral(context), r3),
+                    instr);
+}
+
+
+LInstruction* LChunkBuilder::DoOsrEntry(HOsrEntry* instr) {
+  DCHECK(argument_count_ == 0);
+  allocator_->MarkAsOsrEntry();
+  current_block_->last_environment()->set_ast_id(instr->ast_id());
+  return AssignEnvironment(new (zone()) LOsrEntry);
+}
+
+
+LInstruction* LChunkBuilder::DoParameter(HParameter* instr) {
+  LParameter* result = new (zone()) LParameter;
+  if (instr->kind() == HParameter::STACK_PARAMETER) {
+    int spill_index = chunk()->GetParameterStackSlot(instr->index());
+    return DefineAsSpilled(result, spill_index);
+  } else {
+    DCHECK(info()->IsStub());
+    CallInterfaceDescriptor descriptor =
+        info()->code_stub()->GetCallInterfaceDescriptor();
+    int index = static_cast<int>(instr->index());
+    Register reg = descriptor.GetEnvironmentParameterRegister(index);
+    return DefineFixed(result, reg);
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoUnknownOSRValue(HUnknownOSRValue* instr) {
+  // Use an index that corresponds to the location in the unoptimized frame,
+  // which the optimized frame will subsume.
+  int env_index = instr->index();
+  int spill_index = 0;
+  if (instr->environment()->is_parameter_index(env_index)) {
+    spill_index = chunk()->GetParameterStackSlot(env_index);
+  } else {
+    spill_index = env_index - instr->environment()->first_local_index();
+    if (spill_index > LUnallocated::kMaxFixedSlotIndex) {
+      Retry(kTooManySpillSlotsNeededForOSR);
+      spill_index = 0;
+    }
+  }
+  return DefineAsSpilled(new (zone()) LUnknownOSRValue, spill_index);
+}
+
+
+LInstruction* LChunkBuilder::DoCallStub(HCallStub* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  return MarkAsCall(DefineFixed(new (zone()) LCallStub(context), r3), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoArgumentsObject(HArgumentsObject* instr) {
+  // There are no real uses of the arguments object.
+  // arguments.length and element access are supported directly on
+  // stack arguments, and any real arguments object use causes a bailout.
+  // So this value is never used.
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoCapturedObject(HCapturedObject* instr) {
+  instr->ReplayEnvironment(current_block_->last_environment());
+
+  // There are no real uses of a captured object.
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoAccessArgumentsAt(HAccessArgumentsAt* instr) {
+  info()->MarkAsRequiresFrame();
+  LOperand* args = UseRegister(instr->arguments());
+  LOperand* length = UseRegisterOrConstantAtStart(instr->length());
+  LOperand* index = UseRegisterOrConstantAtStart(instr->index());
+  return DefineAsRegister(new (zone()) LAccessArgumentsAt(args, length, index));
+}
+
+
+LInstruction* LChunkBuilder::DoToFastProperties(HToFastProperties* instr) {
+  LOperand* object = UseFixed(instr->value(), r3);
+  LToFastProperties* result = new (zone()) LToFastProperties(object);
+  return MarkAsCall(DefineFixed(result, r3), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTypeof(HTypeof* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LTypeof* result = new (zone()) LTypeof(context, UseFixed(instr->value(), r3));
+  return MarkAsCall(DefineFixed(result, r3), instr);
+}
+
+
+LInstruction* LChunkBuilder::DoTypeofIsAndBranch(HTypeofIsAndBranch* instr) {
+  return new (zone()) LTypeofIsAndBranch(UseRegister(instr->value()));
+}
+
+
+LInstruction* LChunkBuilder::DoIsConstructCallAndBranch(
+    HIsConstructCallAndBranch* instr) {
+  return new (zone()) LIsConstructCallAndBranch(TempRegister());
+}
+
+
+LInstruction* LChunkBuilder::DoSimulate(HSimulate* instr) {
+  instr->ReplayEnvironment(current_block_->last_environment());
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoStackCheck(HStackCheck* instr) {
+  if (instr->is_function_entry()) {
+    LOperand* context = UseFixed(instr->context(), cp);
+    return MarkAsCall(new (zone()) LStackCheck(context), instr);
+  } else {
+    DCHECK(instr->is_backwards_branch());
+    LOperand* context = UseAny(instr->context());
+    return AssignEnvironment(
+        AssignPointerMap(new (zone()) LStackCheck(context)));
+  }
+}
+
+
+LInstruction* LChunkBuilder::DoEnterInlined(HEnterInlined* instr) {
+  HEnvironment* outer = current_block_->last_environment();
+  outer->set_ast_id(instr->ReturnId());
+  HConstant* undefined = graph()->GetConstantUndefined();
+  HEnvironment* inner = outer->CopyForInlining(
+      instr->closure(), instr->arguments_count(), instr->function(), undefined,
+      instr->inlining_kind());
+  // Only replay binding of arguments object if it wasn't removed from graph.
+  if (instr->arguments_var() != NULL && instr->arguments_object()->IsLinked()) {
+    inner->Bind(instr->arguments_var(), instr->arguments_object());
+  }
+  inner->BindContext(instr->closure_context());
+  inner->set_entry(instr);
+  current_block_->UpdateEnvironment(inner);
+  chunk_->AddInlinedClosure(instr->closure());
+  return NULL;
+}
+
+
+LInstruction* LChunkBuilder::DoLeaveInlined(HLeaveInlined* instr) {
+  LInstruction* pop = NULL;
+
+  HEnvironment* env = current_block_->last_environment();
+
+  if (env->entry()->arguments_pushed()) {
+    int argument_count = env->arguments_environment()->parameter_count();
+    pop = new (zone()) LDrop(argument_count);
+    DCHECK(instr->argument_delta() == -argument_count);
+  }
+
+  HEnvironment* outer =
+      current_block_->last_environment()->DiscardInlined(false);
+  current_block_->UpdateEnvironment(outer);
+
+  return pop;
+}
+
+
+LInstruction* LChunkBuilder::DoForInPrepareMap(HForInPrepareMap* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* object = UseFixed(instr->enumerable(), r3);
+  LForInPrepareMap* result = new (zone()) LForInPrepareMap(context, object);
+  return MarkAsCall(DefineFixed(result, r3), instr, CAN_DEOPTIMIZE_EAGERLY);
+}
+
+
+LInstruction* LChunkBuilder::DoForInCacheArray(HForInCacheArray* instr) {
+  LOperand* map = UseRegister(instr->map());
+  return AssignEnvironment(
+      DefineAsRegister(new (zone()) LForInCacheArray(map)));
+}
+
+
+LInstruction* LChunkBuilder::DoCheckMapValue(HCheckMapValue* instr) {
+  LOperand* value = UseRegisterAtStart(instr->value());
+  LOperand* map = UseRegisterAtStart(instr->map());
+  return AssignEnvironment(new (zone()) LCheckMapValue(value, map));
+}
+
+
+LInstruction* LChunkBuilder::DoLoadFieldByIndex(HLoadFieldByIndex* instr) {
+  LOperand* object = UseRegister(instr->object());
+  LOperand* index = UseTempRegister(instr->index());
+  LLoadFieldByIndex* load = new (zone()) LLoadFieldByIndex(object, index);
+  LInstruction* result = DefineSameAsFirst(load);
+  return AssignPointerMap(result);
+}
+
+
+LInstruction* LChunkBuilder::DoStoreFrameContext(HStoreFrameContext* instr) {
+  LOperand* context = UseRegisterAtStart(instr->context());
+  return new (zone()) LStoreFrameContext(context);
+}
+
+
+LInstruction* LChunkBuilder::DoAllocateBlockContext(
+    HAllocateBlockContext* instr) {
+  LOperand* context = UseFixed(instr->context(), cp);
+  LOperand* function = UseRegisterAtStart(instr->function());
+  LAllocateBlockContext* result =
+      new (zone()) LAllocateBlockContext(context, function);
+  return MarkAsCall(DefineFixed(result, cp), instr);
+}
+}
+}  // namespace v8::internal
diff --git a/deps/v8/src/ppc/lithium-ppc.h b/deps/v8/src/ppc/lithium-ppc.h
new file mode 100644 (file)
index 0000000..2176fa6
--- /dev/null
@@ -0,0 +1,2746 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_PPC_LITHIUM_PPC_H_
+#define V8_PPC_LITHIUM_PPC_H_
+
+#include "src/hydrogen.h"
+#include "src/lithium.h"
+#include "src/lithium-allocator.h"
+#include "src/safepoint-table.h"
+#include "src/utils.h"
+
+namespace v8 {
+namespace internal {
+
+// Forward declarations.
+class LCodeGen;
+
+#define LITHIUM_CONCRETE_INSTRUCTION_LIST(V) \
+  V(AccessArgumentsAt)                       \
+  V(AddI)                                    \
+  V(Allocate)                                \
+  V(AllocateBlockContext)                    \
+  V(ApplyArguments)                          \
+  V(ArgumentsElements)                       \
+  V(ArgumentsLength)                         \
+  V(ArithmeticD)                             \
+  V(ArithmeticT)                             \
+  V(BitI)                                    \
+  V(BoundsCheck)                             \
+  V(Branch)                                  \
+  V(CallJSFunction)                          \
+  V(CallWithDescriptor)                      \
+  V(CallFunction)                            \
+  V(CallNew)                                 \
+  V(CallNewArray)                            \
+  V(CallRuntime)                             \
+  V(CallStub)                                \
+  V(CheckInstanceType)                       \
+  V(CheckNonSmi)                             \
+  V(CheckMaps)                               \
+  V(CheckMapValue)                           \
+  V(CheckSmi)                                \
+  V(CheckValue)                              \
+  V(ClampDToUint8)                           \
+  V(ClampIToUint8)                           \
+  V(ClampTToUint8)                           \
+  V(ClassOfTestAndBranch)                    \
+  V(CompareMinusZeroAndBranch)               \
+  V(CompareNumericAndBranch)                 \
+  V(CmpObjectEqAndBranch)                    \
+  V(CmpHoleAndBranch)                        \
+  V(CmpMapAndBranch)                         \
+  V(CmpT)                                    \
+  V(ConstantD)                               \
+  V(ConstantE)                               \
+  V(ConstantI)                               \
+  V(ConstantS)                               \
+  V(ConstantT)                               \
+  V(ConstructDouble)                         \
+  V(Context)                                 \
+  V(DateField)                               \
+  V(DebugBreak)                              \
+  V(DeclareGlobals)                          \
+  V(Deoptimize)                              \
+  V(DivByConstI)                             \
+  V(DivByPowerOf2I)                          \
+  V(DivI)                                    \
+  V(DoubleBits)                              \
+  V(DoubleToI)                               \
+  V(DoubleToSmi)                             \
+  V(Drop)                                    \
+  V(Dummy)                                   \
+  V(DummyUse)                                \
+  V(FlooringDivByConstI)                     \
+  V(FlooringDivByPowerOf2I)                  \
+  V(FlooringDivI)                            \
+  V(ForInCacheArray)                         \
+  V(ForInPrepareMap)                         \
+  V(FunctionLiteral)                         \
+  V(GetCachedArrayIndex)                     \
+  V(Goto)                                    \
+  V(HasCachedArrayIndexAndBranch)            \
+  V(HasInstanceTypeAndBranch)                \
+  V(InnerAllocatedObject)                    \
+  V(InstanceOf)                              \
+  V(InstanceOfKnownGlobal)                   \
+  V(InstructionGap)                          \
+  V(Integer32ToDouble)                       \
+  V(InvokeFunction)                          \
+  V(IsConstructCallAndBranch)                \
+  V(IsObjectAndBranch)                       \
+  V(IsStringAndBranch)                       \
+  V(IsSmiAndBranch)                          \
+  V(IsUndetectableAndBranch)                 \
+  V(Label)                                   \
+  V(LazyBailout)                             \
+  V(LoadContextSlot)                         \
+  V(LoadRoot)                                \
+  V(LoadFieldByIndex)                        \
+  V(LoadFunctionPrototype)                   \
+  V(LoadGlobalCell)                          \
+  V(LoadGlobalGeneric)                       \
+  V(LoadKeyed)                               \
+  V(LoadKeyedGeneric)                        \
+  V(LoadNamedField)                          \
+  V(LoadNamedGeneric)                        \
+  V(MapEnumLength)                           \
+  V(MathAbs)                                 \
+  V(MathClz32)                               \
+  V(MathExp)                                 \
+  V(MathFloor)                               \
+  V(MathFround)                              \
+  V(MathLog)                                 \
+  V(MathMinMax)                              \
+  V(MathPowHalf)                             \
+  V(MathRound)                               \
+  V(MathSqrt)                                \
+  V(ModByConstI)                             \
+  V(ModByPowerOf2I)                          \
+  V(ModI)                                    \
+  V(MulI)                                    \
+  V(MultiplyAddD)                            \
+  V(MultiplySubD)                            \
+  V(NumberTagD)                              \
+  V(NumberTagI)                              \
+  V(NumberTagU)                              \
+  V(NumberUntagD)                            \
+  V(OsrEntry)                                \
+  V(Parameter)                               \
+  V(Power)                                   \
+  V(PushArgument)                            \
+  V(RegExpLiteral)                           \
+  V(Return)                                  \
+  V(SeqStringGetChar)                        \
+  V(SeqStringSetChar)                        \
+  V(ShiftI)                                  \
+  V(SmiTag)                                  \
+  V(SmiUntag)                                \
+  V(StackCheck)                              \
+  V(StoreCodeEntry)                          \
+  V(StoreContextSlot)                        \
+  V(StoreFrameContext)                       \
+  V(StoreGlobalCell)                         \
+  V(StoreKeyed)                              \
+  V(StoreKeyedGeneric)                       \
+  V(StoreNamedField)                         \
+  V(StoreNamedGeneric)                       \
+  V(StringAdd)                               \
+  V(StringCharCodeAt)                        \
+  V(StringCharFromCode)                      \
+  V(StringCompareAndBranch)                  \
+  V(SubI)                                    \
+  V(RSubI)                                   \
+  V(TaggedToI)                               \
+  V(TailCallThroughMegamorphicCache)         \
+  V(ThisFunction)                            \
+  V(ToFastProperties)                        \
+  V(TransitionElementsKind)                  \
+  V(TrapAllocationMemento)                   \
+  V(Typeof)                                  \
+  V(TypeofIsAndBranch)                       \
+  V(Uint32ToDouble)                          \
+  V(UnknownOSRValue)                         \
+  V(WrapReceiver)
+
+
+#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)            \
+  Opcode opcode() const FINAL { return LInstruction::k##type; } \
+  void CompileToNative(LCodeGen* generator) FINAL;              \
+  const char* Mnemonic() const FINAL { return mnemonic; }       \
+  static L##type* cast(LInstruction* instr) {                   \
+    DCHECK(instr->Is##type());                                  \
+    return reinterpret_cast<L##type*>(instr);                   \
+  }
+
+
+#define DECLARE_HYDROGEN_ACCESSOR(type) \
+  H##type* hydrogen() const { return H##type::cast(hydrogen_value()); }
+
+
+class LInstruction : public ZoneObject {
+ public:
+  LInstruction()
+      : environment_(NULL),
+        hydrogen_value_(NULL),
+        bit_field_(IsCallBits::encode(false)) {}
+
+  virtual ~LInstruction() {}
+
+  virtual void CompileToNative(LCodeGen* generator) = 0;
+  virtual const char* Mnemonic() const = 0;
+  virtual void PrintTo(StringStream* stream);
+  virtual void PrintDataTo(StringStream* stream);
+  virtual void PrintOutputOperandTo(StringStream* stream);
+
+  enum Opcode {
+// Declare a unique enum value for each instruction.
+#define DECLARE_OPCODE(type) k##type,
+    LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE) kNumberOfInstructions
+#undef DECLARE_OPCODE
+  };
+
+  virtual Opcode opcode() const = 0;
+
+// Declare non-virtual type testers for all leaf IR classes.
+#define DECLARE_PREDICATE(type) \
+  bool Is##type() const { return opcode() == k##type; }
+  LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE)
+#undef DECLARE_PREDICATE
+
+  // Declare virtual predicates for instructions that don't have
+  // an opcode.
+  virtual bool IsGap() const { return false; }
+
+  virtual bool IsControl() const { return false; }
+
+  // Try deleting this instruction if possible.
+  virtual bool TryDelete() { return false; }
+
+  void set_environment(LEnvironment* env) { environment_ = env; }
+  LEnvironment* environment() const { return environment_; }
+  bool HasEnvironment() const { return environment_ != NULL; }
+
+  void set_pointer_map(LPointerMap* p) { pointer_map_.set(p); }
+  LPointerMap* pointer_map() const { return pointer_map_.get(); }
+  bool HasPointerMap() const { return pointer_map_.is_set(); }
+
+  void set_hydrogen_value(HValue* value) { hydrogen_value_ = value; }
+  HValue* hydrogen_value() const { return hydrogen_value_; }
+
+  virtual void SetDeferredLazyDeoptimizationEnvironment(LEnvironment* env) {}
+
+  void MarkAsCall() { bit_field_ = IsCallBits::update(bit_field_, true); }
+  bool IsCall() const { return IsCallBits::decode(bit_field_); }
+
+  // Interface to the register allocator and iterators.
+  bool ClobbersTemps() const { return IsCall(); }
+  bool ClobbersRegisters() const { return IsCall(); }
+  virtual bool ClobbersDoubleRegisters(Isolate* isolate) const {
+    return IsCall();
+  }
+
+  // Interface to the register allocator and iterators.
+  bool IsMarkedAsCall() const { return IsCall(); }
+
+  virtual bool HasResult() const = 0;
+  virtual LOperand* result() const = 0;
+
+  LOperand* FirstInput() { return InputAt(0); }
+  LOperand* Output() { return HasResult() ? result() : NULL; }
+
+  virtual bool HasInterestingComment(LCodeGen* gen) const { return true; }
+
+#ifdef DEBUG
+  void VerifyCall();
+#endif
+
+  virtual int InputCount() = 0;
+  virtual LOperand* InputAt(int i) = 0;
+
+ private:
+  // Iterator support.
+  friend class InputIterator;
+
+  friend class TempIterator;
+  virtual int TempCount() = 0;
+  virtual LOperand* TempAt(int i) = 0;
+
+  class IsCallBits : public BitField<bool, 0, 1> {};
+
+  LEnvironment* environment_;
+  SetOncePointer<LPointerMap> pointer_map_;
+  HValue* hydrogen_value_;
+  int bit_field_;
+};
+
+
+// R = number of result operands (0 or 1).
+template <int R>
+class LTemplateResultInstruction : public LInstruction {
+ public:
+  // Allow 0 or 1 output operands.
+  STATIC_ASSERT(R == 0 || R == 1);
+  bool HasResult() const FINAL { return R != 0 && result() != NULL; }
+  void set_result(LOperand* operand) { results_[0] = operand; }
+  LOperand* result() const { return results_[0]; }
+
+ protected:
+  EmbeddedContainer<LOperand*, R> results_;
+};
+
+
+// R = number of result operands (0 or 1).
+// I = number of input operands.
+// T = number of temporary operands.
+template <int R, int I, int T>
+class LTemplateInstruction : public LTemplateResultInstruction<R> {
+ protected:
+  EmbeddedContainer<LOperand*, I> inputs_;
+  EmbeddedContainer<LOperand*, T> temps_;
+
+ private:
+  // Iterator support.
+  int InputCount() FINAL { return I; }
+  LOperand* InputAt(int i) FINAL { return inputs_[i]; }
+
+  int TempCount() FINAL { return T; }
+  LOperand* TempAt(int i) FINAL { return temps_[i]; }
+};
+
+
+class LGap : public LTemplateInstruction<0, 0, 0> {
+ public:
+  explicit LGap(HBasicBlock* block) : block_(block) {
+    parallel_moves_[BEFORE] = NULL;
+    parallel_moves_[START] = NULL;
+    parallel_moves_[END] = NULL;
+    parallel_moves_[AFTER] = NULL;
+  }
+
+  // Can't use the DECLARE-macro here because of sub-classes.
+  bool IsGap() const OVERRIDE { return true; }
+  void PrintDataTo(StringStream* stream) OVERRIDE;
+  static LGap* cast(LInstruction* instr) {
+    DCHECK(instr->IsGap());
+    return reinterpret_cast<LGap*>(instr);
+  }
+
+  bool IsRedundant() const;
+
+  HBasicBlock* block() const { return block_; }
+
+  enum InnerPosition {
+    BEFORE,
+    START,
+    END,
+    AFTER,
+    FIRST_INNER_POSITION = BEFORE,
+    LAST_INNER_POSITION = AFTER
+  };
+
+  LParallelMove* GetOrCreateParallelMove(InnerPosition pos, Zone* zone) {
+    if (parallel_moves_[pos] == NULL) {
+      parallel_moves_[pos] = new (zone) LParallelMove(zone);
+    }
+    return parallel_moves_[pos];
+  }
+
+  LParallelMove* GetParallelMove(InnerPosition pos) {
+    return parallel_moves_[pos];
+  }
+
+ private:
+  LParallelMove* parallel_moves_[LAST_INNER_POSITION + 1];
+  HBasicBlock* block_;
+};
+
+
+class LInstructionGap FINAL : public LGap {
+ public:
+  explicit LInstructionGap(HBasicBlock* block) : LGap(block) {}
+
+  bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
+    return !IsRedundant();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
+};
+
+
+class LGoto FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+  explicit LGoto(HBasicBlock* block) : block_(block) {}
+
+  bool HasInterestingComment(LCodeGen* gen) const OVERRIDE;
+  DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
+  void PrintDataTo(StringStream* stream) OVERRIDE;
+  bool IsControl() const OVERRIDE { return true; }
+
+  int block_id() const { return block_->block_id(); }
+
+ private:
+  HBasicBlock* block_;
+};
+
+
+class LLazyBailout FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+  LLazyBailout() : gap_instructions_size_(0) {}
+
+  DECLARE_CONCRETE_INSTRUCTION(LazyBailout, "lazy-bailout")
+
+  void set_gap_instructions_size(int gap_instructions_size) {
+    gap_instructions_size_ = gap_instructions_size;
+  }
+  int gap_instructions_size() { return gap_instructions_size_; }
+
+ private:
+  int gap_instructions_size_;
+};
+
+
+class LDummy FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+  LDummy() {}
+  DECLARE_CONCRETE_INSTRUCTION(Dummy, "dummy")
+};
+
+
+class LDummyUse FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LDummyUse(LOperand* value) { inputs_[0] = value; }
+  DECLARE_CONCRETE_INSTRUCTION(DummyUse, "dummy-use")
+};
+
+
+class LDeoptimize FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+  bool IsControl() const OVERRIDE { return true; }
+  DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
+  DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
+};
+
+
+class LLabel FINAL : public LGap {
+ public:
+  explicit LLabel(HBasicBlock* block) : LGap(block), replacement_(NULL) {}
+
+  bool HasInterestingComment(LCodeGen* gen) const OVERRIDE { return false; }
+  DECLARE_CONCRETE_INSTRUCTION(Label, "label")
+
+  void PrintDataTo(StringStream* stream) OVERRIDE;
+
+  int block_id() const { return block()->block_id(); }
+  bool is_loop_header() const { return block()->IsLoopHeader(); }
+  bool is_osr_entry() const { return block()->is_osr_entry(); }
+  Label* label() { return &label_; }
+  LLabel* replacement() const { return replacement_; }
+  void set_replacement(LLabel* label) { replacement_ = label; }
+  bool HasReplacement() const { return replacement_ != NULL; }
+
+ private:
+  Label label_;
+  LLabel* replacement_;
+};
+
+
+class LParameter FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+  virtual bool HasInterestingComment(LCodeGen* gen) const { return false; }
+  DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
+};
+
+
+class LCallStub FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LCallStub(LOperand* context) { inputs_[0] = context; }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallStub, "call-stub")
+  DECLARE_HYDROGEN_ACCESSOR(CallStub)
+};
+
+
+class LTailCallThroughMegamorphicCache FINAL
+    : public LTemplateInstruction<0, 3, 0> {
+ public:
+  explicit LTailCallThroughMegamorphicCache(LOperand* context,
+                                            LOperand* receiver,
+                                            LOperand* name) {
+    inputs_[0] = context;
+    inputs_[1] = receiver;
+    inputs_[2] = name;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* receiver() { return inputs_[1]; }
+  LOperand* name() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(TailCallThroughMegamorphicCache,
+                               "tail-call-through-megamorphic-cache")
+  DECLARE_HYDROGEN_ACCESSOR(TailCallThroughMegamorphicCache)
+};
+
+class LUnknownOSRValue FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+  bool HasInterestingComment(LCodeGen* gen) const OVERRIDE { return false; }
+  DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
+};
+
+
+template <int I, int T>
+class LControlInstruction : public LTemplateInstruction<0, I, T> {
+ public:
+  LControlInstruction() : false_label_(NULL), true_label_(NULL) {}
+
+  bool IsControl() const FINAL { return true; }
+
+  int SuccessorCount() { return hydrogen()->SuccessorCount(); }
+  HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
+
+  int TrueDestination(LChunk* chunk) {
+    return chunk->LookupDestination(true_block_id());
+  }
+  int FalseDestination(LChunk* chunk) {
+    return chunk->LookupDestination(false_block_id());
+  }
+
+  Label* TrueLabel(LChunk* chunk) {
+    if (true_label_ == NULL) {
+      true_label_ = chunk->GetAssemblyLabel(TrueDestination(chunk));
+    }
+    return true_label_;
+  }
+  Label* FalseLabel(LChunk* chunk) {
+    if (false_label_ == NULL) {
+      false_label_ = chunk->GetAssemblyLabel(FalseDestination(chunk));
+    }
+    return false_label_;
+  }
+
+ protected:
+  int true_block_id() { return SuccessorAt(0)->block_id(); }
+  int false_block_id() { return SuccessorAt(1)->block_id(); }
+
+ private:
+  HControlInstruction* hydrogen() {
+    return HControlInstruction::cast(this->hydrogen_value());
+  }
+
+  Label* false_label_;
+  Label* true_label_;
+};
+
+
+class LWrapReceiver FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LWrapReceiver(LOperand* receiver, LOperand* function) {
+    inputs_[0] = receiver;
+    inputs_[1] = function;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(WrapReceiver, "wrap-receiver")
+  DECLARE_HYDROGEN_ACCESSOR(WrapReceiver)
+
+  LOperand* receiver() { return inputs_[0]; }
+  LOperand* function() { return inputs_[1]; }
+};
+
+
+class LApplyArguments FINAL : public LTemplateInstruction<1, 4, 0> {
+ public:
+  LApplyArguments(LOperand* function, LOperand* receiver, LOperand* length,
+                  LOperand* elements) {
+    inputs_[0] = function;
+    inputs_[1] = receiver;
+    inputs_[2] = length;
+    inputs_[3] = elements;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(ApplyArguments, "apply-arguments")
+
+  LOperand* function() { return inputs_[0]; }
+  LOperand* receiver() { return inputs_[1]; }
+  LOperand* length() { return inputs_[2]; }
+  LOperand* elements() { return inputs_[3]; }
+};
+
+
+class LAccessArgumentsAt FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LAccessArgumentsAt(LOperand* arguments, LOperand* length, LOperand* index) {
+    inputs_[0] = arguments;
+    inputs_[1] = length;
+    inputs_[2] = index;
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
+
+  LOperand* arguments() { return inputs_[0]; }
+  LOperand* length() { return inputs_[1]; }
+  LOperand* index() { return inputs_[2]; }
+
+  void PrintDataTo(StringStream* stream) OVERRIDE;
+};
+
+
+class LArgumentsLength FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LArgumentsLength(LOperand* elements) { inputs_[0] = elements; }
+
+  LOperand* elements() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ArgumentsLength, "arguments-length")
+};
+
+
+class LArgumentsElements FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ArgumentsElements, "arguments-elements")
+  DECLARE_HYDROGEN_ACCESSOR(ArgumentsElements)
+};
+
+
+class LModByPowerOf2I FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  LModByPowerOf2I(LOperand* dividend, int32_t divisor) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ModByPowerOf2I, "mod-by-power-of-2-i")
+  DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+  int32_t divisor_;
+};
+
+
+class LModByConstI FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  LModByConstI(LOperand* dividend, int32_t divisor) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ModByConstI, "mod-by-const-i")
+  DECLARE_HYDROGEN_ACCESSOR(Mod)
+
+ private:
+  int32_t divisor_;
+};
+
+
+class LModI FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LModI(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
+  DECLARE_HYDROGEN_ACCESSOR(Mod)
+};
+
+
+class LDivByPowerOf2I FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  LDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DivByPowerOf2I, "div-by-power-of-2-i")
+  DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+  int32_t divisor_;
+};
+
+
+class LDivByConstI FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  LDivByConstI(LOperand* dividend, int32_t divisor) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DivByConstI, "div-by-const-i")
+  DECLARE_HYDROGEN_ACCESSOR(Div)
+
+ private:
+  int32_t divisor_;
+};
+
+
+class LDivI FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LDivI(LOperand* dividend, LOperand* divisor) {
+    inputs_[0] = dividend;
+    inputs_[1] = divisor;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  LOperand* divisor() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DivI, "div-i")
+  DECLARE_HYDROGEN_ACCESSOR(BinaryOperation)
+};
+
+
+class LFlooringDivByPowerOf2I FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  LFlooringDivByPowerOf2I(LOperand* dividend, int32_t divisor) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() { return divisor_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(FlooringDivByPowerOf2I,
+                               "flooring-div-by-power-of-2-i")
+  DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+  int32_t divisor_;
+};
+
+
+class LFlooringDivByConstI FINAL : public LTemplateInstruction<1, 1, 1> {
+ public:
+  LFlooringDivByConstI(LOperand* dividend, int32_t divisor, LOperand* temp) {
+    inputs_[0] = dividend;
+    divisor_ = divisor;
+    temps_[0] = temp;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  int32_t divisor() const { return divisor_; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(FlooringDivByConstI, "flooring-div-by-const-i")
+  DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+
+ private:
+  int32_t divisor_;
+};
+
+
+class LFlooringDivI FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LFlooringDivI(LOperand* dividend, LOperand* divisor) {
+    inputs_[0] = dividend;
+    inputs_[1] = divisor;
+  }
+
+  LOperand* dividend() { return inputs_[0]; }
+  LOperand* divisor() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(FlooringDivI, "flooring-div-i")
+  DECLARE_HYDROGEN_ACCESSOR(MathFloorOfDiv)
+};
+
+
+class LMulI FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LMulI(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MulI, "mul-i")
+  DECLARE_HYDROGEN_ACCESSOR(Mul)
+};
+
+
+// Instruction for computing multiplier * multiplicand + addend.
+class LMultiplyAddD FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LMultiplyAddD(LOperand* addend, LOperand* multiplier,
+                LOperand* multiplicand) {
+    inputs_[0] = addend;
+    inputs_[1] = multiplier;
+    inputs_[2] = multiplicand;
+  }
+
+  LOperand* addend() { return inputs_[0]; }
+  LOperand* multiplier() { return inputs_[1]; }
+  LOperand* multiplicand() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MultiplyAddD, "multiply-add-d")
+};
+
+
+// Instruction for computing minuend - multiplier * multiplicand.
+class LMultiplySubD FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LMultiplySubD(LOperand* minuend, LOperand* multiplier,
+                LOperand* multiplicand) {
+    inputs_[0] = minuend;
+    inputs_[1] = multiplier;
+    inputs_[2] = multiplicand;
+  }
+
+  LOperand* minuend() { return inputs_[0]; }
+  LOperand* multiplier() { return inputs_[1]; }
+  LOperand* multiplicand() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MultiplySubD, "multiply-sub-d")
+};
+
+
+class LDebugBreak FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(DebugBreak, "break")
+};
+
+
+class LCompareNumericAndBranch FINAL : public LControlInstruction<2, 0> {
+ public:
+  LCompareNumericAndBranch(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CompareNumericAndBranch,
+                               "compare-numeric-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(CompareNumericAndBranch)
+
+  Token::Value op() const { return hydrogen()->token(); }
+  bool is_double() const { return hydrogen()->representation().IsDouble(); }
+
+  void PrintDataTo(StringStream* stream) OVERRIDE;
+};
+
+
+class LMathFloor FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathFloor(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathFloor, "math-floor")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+
+class LMathRound FINAL : public LTemplateInstruction<1, 1, 1> {
+ public:
+  LMathRound(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathRound, "math-round")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+
+class LMathFround FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathFround(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathFround, "math-fround")
+};
+
+
+class LMathAbs FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LMathAbs(LOperand* context, LOperand* value) {
+    inputs_[1] = context;
+    inputs_[0] = value;
+  }
+
+  LOperand* context() { return inputs_[1]; }
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathAbs, "math-abs")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
+};
+
+
+class LMathLog FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathLog(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathLog, "math-log")
+};
+
+
+class LMathClz32 FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathClz32(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathClz32, "math-clz32")
+};
+
+
+class LMathExp FINAL : public LTemplateInstruction<1, 1, 3> {
+ public:
+  LMathExp(LOperand* value, LOperand* double_temp, LOperand* temp1,
+           LOperand* temp2) {
+    inputs_[0] = value;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+    temps_[2] = double_temp;
+    ExternalReference::InitializeMathExpData();
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp1() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+  LOperand* double_temp() { return temps_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
+};
+
+
+class LMathSqrt FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathSqrt(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathSqrt, "math-sqrt")
+};
+
+
+class LMathPowHalf FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMathPowHalf(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half")
+};
+
+
+class LCmpObjectEqAndBranch FINAL : public LControlInstruction<2, 0> {
+ public:
+  LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch, "cmp-object-eq-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(CompareObjectEqAndBranch)
+};
+
+
+class LCmpHoleAndBranch FINAL : public LControlInstruction<1, 0> {
+ public:
+  explicit LCmpHoleAndBranch(LOperand* object) { inputs_[0] = object; }
+
+  LOperand* object() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpHoleAndBranch, "cmp-hole-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(CompareHoleAndBranch)
+};
+
+
+class LCompareMinusZeroAndBranch FINAL : public LControlInstruction<1, 1> {
+ public:
+  LCompareMinusZeroAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CompareMinusZeroAndBranch,
+                               "cmp-minus-zero-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(CompareMinusZeroAndBranch)
+};
+
+
+class LIsObjectAndBranch FINAL : public LControlInstruction<1, 1> {
+ public:
+  LIsObjectAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
+
+  void PrintDataTo(StringStream* stream) OVERRIDE;
+};
+
+
+class LIsStringAndBranch FINAL : public LControlInstruction<1, 1> {
+ public:
+  LIsStringAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
+
+  void PrintDataTo(StringStream* stream) OVERRIDE;
+};
+
+
+class LIsSmiAndBranch FINAL : public LControlInstruction<1, 0> {
+ public:
+  explicit LIsSmiAndBranch(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
+
+  void PrintDataTo(StringStream* stream) OVERRIDE;
+};
+
+
+class LIsUndetectableAndBranch FINAL : public LControlInstruction<1, 1> {
+ public:
+  explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
+                               "is-undetectable-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
+
+  void PrintDataTo(StringStream* stream) OVERRIDE;
+};
+
+
+class LStringCompareAndBranch FINAL : public LControlInstruction<3, 0> {
+ public:
+  LStringCompareAndBranch(LOperand* context, LOperand* left, LOperand* right) {
+    inputs_[0] = context;
+    inputs_[1] = left;
+    inputs_[2] = right;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* left() { return inputs_[1]; }
+  LOperand* right() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringCompareAndBranch,
+                               "string-compare-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
+
+  Token::Value op() const { return hydrogen()->token(); }
+
+  void PrintDataTo(StringStream* stream) OVERRIDE;
+};
+
+
+class LHasInstanceTypeAndBranch FINAL : public LControlInstruction<1, 0> {
+ public:
+  explicit LHasInstanceTypeAndBranch(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(HasInstanceTypeAndBranch,
+                               "has-instance-type-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
+
+  void PrintDataTo(StringStream* stream) OVERRIDE;
+};
+
+
+class LGetCachedArrayIndex FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LGetCachedArrayIndex(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
+  DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
+};
+
+
+class LHasCachedArrayIndexAndBranch FINAL : public LControlInstruction<1, 0> {
+ public:
+  explicit LHasCachedArrayIndexAndBranch(LOperand* value) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
+                               "has-cached-array-index-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
+
+  void PrintDataTo(StringStream* stream) OVERRIDE;
+};
+
+
+class LClassOfTestAndBranch FINAL : public LControlInstruction<1, 1> {
+ public:
+  LClassOfTestAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClassOfTestAndBranch, "class-of-test-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
+
+  void PrintDataTo(StringStream* stream) OVERRIDE;
+};
+
+
+class LCmpT FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LCmpT(LOperand* context, LOperand* left, LOperand* right) {
+    inputs_[0] = context;
+    inputs_[1] = left;
+    inputs_[2] = right;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* left() { return inputs_[1]; }
+  LOperand* right() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpT, "cmp-t")
+  DECLARE_HYDROGEN_ACCESSOR(CompareGeneric)
+
+  Token::Value op() const { return hydrogen()->token(); }
+};
+
+
+class LInstanceOf FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LInstanceOf(LOperand* context, LOperand* left, LOperand* right) {
+    inputs_[0] = context;
+    inputs_[1] = left;
+    inputs_[2] = right;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* left() { return inputs_[1]; }
+  LOperand* right() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(InstanceOf, "instance-of")
+};
+
+
+class LInstanceOfKnownGlobal FINAL : public LTemplateInstruction<1, 2, 1> {
+ public:
+  LInstanceOfKnownGlobal(LOperand* context, LOperand* value, LOperand* temp) {
+    inputs_[0] = context;
+    inputs_[1] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* value() { return inputs_[1]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(InstanceOfKnownGlobal,
+                               "instance-of-known-global")
+  DECLARE_HYDROGEN_ACCESSOR(InstanceOfKnownGlobal)
+
+  Handle<JSFunction> function() const { return hydrogen()->function(); }
+  LEnvironment* GetDeferredLazyDeoptimizationEnvironment() {
+    return lazy_deopt_env_;
+  }
+  virtual void SetDeferredLazyDeoptimizationEnvironment(
+      LEnvironment* env) OVERRIDE {
+    lazy_deopt_env_ = env;
+  }
+
+ private:
+  LEnvironment* lazy_deopt_env_;
+};
+
+
+class LBoundsCheck FINAL : public LTemplateInstruction<0, 2, 0> {
+ public:
+  LBoundsCheck(LOperand* index, LOperand* length) {
+    inputs_[0] = index;
+    inputs_[1] = length;
+  }
+
+  LOperand* index() { return inputs_[0]; }
+  LOperand* length() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(BoundsCheck, "bounds-check")
+  DECLARE_HYDROGEN_ACCESSOR(BoundsCheck)
+};
+
+
+class LBitI FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LBitI(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  Token::Value op() const { return hydrogen()->op(); }
+
+  DECLARE_CONCRETE_INSTRUCTION(BitI, "bit-i")
+  DECLARE_HYDROGEN_ACCESSOR(Bitwise)
+};
+
+
+class LShiftI FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LShiftI(Token::Value op, LOperand* left, LOperand* right, bool can_deopt)
+      : op_(op), can_deopt_(can_deopt) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  Token::Value op() const { return op_; }
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+  bool can_deopt() const { return can_deopt_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ShiftI, "shift-i")
+
+ private:
+  Token::Value op_;
+  bool can_deopt_;
+};
+
+
+class LSubI FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LSubI(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(SubI, "sub-i")
+  DECLARE_HYDROGEN_ACCESSOR(Sub)
+};
+
+
+class LRSubI FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LRSubI(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(RSubI, "rsub-i")
+  DECLARE_HYDROGEN_ACCESSOR(Sub)
+};
+
+
+class LConstantI FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ConstantI, "constant-i")
+  DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+  int32_t value() const { return hydrogen()->Integer32Value(); }
+};
+
+
+class LConstantS FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ConstantS, "constant-s")
+  DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+  Smi* value() const { return Smi::FromInt(hydrogen()->Integer32Value()); }
+};
+
+
+class LConstantD FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ConstantD, "constant-d")
+  DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+  double value() const { return hydrogen()->DoubleValue(); }
+};
+
+
+class LConstantE FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ConstantE, "constant-e")
+  DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+  ExternalReference value() const {
+    return hydrogen()->ExternalReferenceValue();
+  }
+};
+
+
+class LConstantT FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ConstantT, "constant-t")
+  DECLARE_HYDROGEN_ACCESSOR(Constant)
+
+  Handle<Object> value(Isolate* isolate) const {
+    return hydrogen()->handle(isolate);
+  }
+};
+
+
+class LBranch FINAL : public LControlInstruction<1, 0> {
+ public:
+  explicit LBranch(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
+  DECLARE_HYDROGEN_ACCESSOR(Branch)
+
+  void PrintDataTo(StringStream* stream) OVERRIDE;
+};
+
+
+class LCmpMapAndBranch FINAL : public LControlInstruction<1, 1> {
+ public:
+  LCmpMapAndBranch(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CmpMapAndBranch, "cmp-map-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(CompareMap)
+
+  Handle<Map> map() const { return hydrogen()->map().handle(); }
+};
+
+
+class LMapEnumLength FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LMapEnumLength(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MapEnumLength, "map-enum-length")
+};
+
+
+class LDateField FINAL : public LTemplateInstruction<1, 1, 1> {
+ public:
+  LDateField(LOperand* date, LOperand* temp, Smi* index) : index_(index) {
+    inputs_[0] = date;
+    temps_[0] = temp;
+  }
+
+  LOperand* date() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+  Smi* index() const { return index_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DateField, "date-field")
+  DECLARE_HYDROGEN_ACCESSOR(DateField)
+
+ private:
+  Smi* index_;
+};
+
+
+class LSeqStringGetChar FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LSeqStringGetChar(LOperand* string, LOperand* index) {
+    inputs_[0] = string;
+    inputs_[1] = index;
+  }
+
+  LOperand* string() const { return inputs_[0]; }
+  LOperand* index() const { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(SeqStringGetChar, "seq-string-get-char")
+  DECLARE_HYDROGEN_ACCESSOR(SeqStringGetChar)
+};
+
+
+class LSeqStringSetChar FINAL : public LTemplateInstruction<1, 4, 0> {
+ public:
+  LSeqStringSetChar(LOperand* context, LOperand* string, LOperand* index,
+                    LOperand* value) {
+    inputs_[0] = context;
+    inputs_[1] = string;
+    inputs_[2] = index;
+    inputs_[3] = value;
+  }
+
+  LOperand* string() { return inputs_[1]; }
+  LOperand* index() { return inputs_[2]; }
+  LOperand* value() { return inputs_[3]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(SeqStringSetChar, "seq-string-set-char")
+  DECLARE_HYDROGEN_ACCESSOR(SeqStringSetChar)
+};
+
+
+class LAddI FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LAddI(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(AddI, "add-i")
+  DECLARE_HYDROGEN_ACCESSOR(Add)
+};
+
+
+class LMathMinMax FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LMathMinMax(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "math-min-max")
+  DECLARE_HYDROGEN_ACCESSOR(MathMinMax)
+};
+
+
+class LPower FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LPower(LOperand* left, LOperand* right) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Power, "power")
+  DECLARE_HYDROGEN_ACCESSOR(Power)
+};
+
+
+class LArithmeticD FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LArithmeticD(Token::Value op, LOperand* left, LOperand* right) : op_(op) {
+    inputs_[0] = left;
+    inputs_[1] = right;
+  }
+
+  Token::Value op() const { return op_; }
+  LOperand* left() { return inputs_[0]; }
+  LOperand* right() { return inputs_[1]; }
+
+  Opcode opcode() const OVERRIDE { return LInstruction::kArithmeticD; }
+  void CompileToNative(LCodeGen* generator) OVERRIDE;
+  const char* Mnemonic() const OVERRIDE;
+
+ private:
+  Token::Value op_;
+};
+
+
+class LArithmeticT FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LArithmeticT(Token::Value op, LOperand* context, LOperand* left,
+               LOperand* right)
+      : op_(op) {
+    inputs_[0] = context;
+    inputs_[1] = left;
+    inputs_[2] = right;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* left() { return inputs_[1]; }
+  LOperand* right() { return inputs_[2]; }
+  Token::Value op() const { return op_; }
+
+  Opcode opcode() const OVERRIDE { return LInstruction::kArithmeticT; }
+  void CompileToNative(LCodeGen* generator) OVERRIDE;
+  const char* Mnemonic() const OVERRIDE;
+
+ private:
+  Token::Value op_;
+};
+
+
+class LReturn FINAL : public LTemplateInstruction<0, 3, 0> {
+ public:
+  LReturn(LOperand* value, LOperand* context, LOperand* parameter_count) {
+    inputs_[0] = value;
+    inputs_[1] = context;
+    inputs_[2] = parameter_count;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+
+  bool has_constant_parameter_count() {
+    return parameter_count()->IsConstantOperand();
+  }
+  LConstantOperand* constant_parameter_count() {
+    DCHECK(has_constant_parameter_count());
+    return LConstantOperand::cast(parameter_count());
+  }
+  LOperand* parameter_count() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Return, "return")
+};
+
+
+class LLoadNamedField FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LLoadNamedField(LOperand* object) { inputs_[0] = object; }
+
+  LOperand* object() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field")
+  DECLARE_HYDROGEN_ACCESSOR(LoadNamedField)
+};
+
+
+class LLoadNamedGeneric FINAL : public LTemplateInstruction<1, 2, 1> {
+ public:
+  LLoadNamedGeneric(LOperand* context, LOperand* object, LOperand* vector) {
+    inputs_[0] = context;
+    inputs_[1] = object;
+    temps_[0] = vector;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+  LOperand* temp_vector() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadNamedGeneric, "load-named-generic")
+  DECLARE_HYDROGEN_ACCESSOR(LoadNamedGeneric)
+
+  Handle<Object> name() const { return hydrogen()->name(); }
+};
+
+
+class LLoadFunctionPrototype FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LLoadFunctionPrototype(LOperand* function) { inputs_[0] = function; }
+
+  LOperand* function() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadFunctionPrototype, "load-function-prototype")
+  DECLARE_HYDROGEN_ACCESSOR(LoadFunctionPrototype)
+};
+
+
+class LLoadRoot FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(LoadRoot, "load-root")
+  DECLARE_HYDROGEN_ACCESSOR(LoadRoot)
+
+  Heap::RootListIndex index() const { return hydrogen()->index(); }
+};
+
+
+class LLoadKeyed FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LLoadKeyed(LOperand* elements, LOperand* key) {
+    inputs_[0] = elements;
+    inputs_[1] = key;
+  }
+
+  LOperand* elements() { return inputs_[0]; }
+  LOperand* key() { return inputs_[1]; }
+  ElementsKind elements_kind() const { return hydrogen()->elements_kind(); }
+  bool is_external() const { return hydrogen()->is_external(); }
+  bool is_fixed_typed_array() const {
+    return hydrogen()->is_fixed_typed_array();
+  }
+  bool is_typed_elements() const {
+    return is_external() || is_fixed_typed_array();
+  }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
+  DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
+
+  void PrintDataTo(StringStream* stream) OVERRIDE;
+  uint32_t base_offset() const { return hydrogen()->base_offset(); }
+};
+
+
+class LLoadKeyedGeneric FINAL : public LTemplateInstruction<1, 3, 1> {
+ public:
+  LLoadKeyedGeneric(LOperand* context, LOperand* object, LOperand* key,
+                    LOperand* vector) {
+    inputs_[0] = context;
+    inputs_[1] = object;
+    inputs_[2] = key;
+    temps_[0] = vector;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+  LOperand* key() { return inputs_[2]; }
+  LOperand* temp_vector() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadKeyedGeneric, "load-keyed-generic")
+  DECLARE_HYDROGEN_ACCESSOR(LoadKeyedGeneric)
+};
+
+
+class LLoadGlobalCell FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
+  DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
+};
+
+
+class LLoadGlobalGeneric FINAL : public LTemplateInstruction<1, 2, 1> {
+ public:
+  LLoadGlobalGeneric(LOperand* context, LOperand* global_object,
+                     LOperand* vector) {
+    inputs_[0] = context;
+    inputs_[1] = global_object;
+    temps_[0] = vector;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* global_object() { return inputs_[1]; }
+  LOperand* temp_vector() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
+  DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
+
+  Handle<Object> name() const { return hydrogen()->name(); }
+  bool for_typeof() const { return hydrogen()->for_typeof(); }
+};
+
+
+class LStoreGlobalCell FINAL : public LTemplateInstruction<0, 1, 1> {
+ public:
+  LStoreGlobalCell(LOperand* value, LOperand* temp) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
+  DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
+};
+
+
+class LLoadContextSlot FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LLoadContextSlot(LOperand* context) { inputs_[0] = context; }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadContextSlot, "load-context-slot")
+  DECLARE_HYDROGEN_ACCESSOR(LoadContextSlot)
+
+  int slot_index() { return hydrogen()->slot_index(); }
+
+  void PrintDataTo(StringStream* stream) OVERRIDE;
+};
+
+
+class LStoreContextSlot FINAL : public LTemplateInstruction<0, 2, 0> {
+ public:
+  LStoreContextSlot(LOperand* context, LOperand* value) {
+    inputs_[0] = context;
+    inputs_[1] = value;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* value() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreContextSlot, "store-context-slot")
+  DECLARE_HYDROGEN_ACCESSOR(StoreContextSlot)
+
+  int slot_index() { return hydrogen()->slot_index(); }
+
+  void PrintDataTo(StringStream* stream) OVERRIDE;
+};
+
+
+class LPushArgument FINAL : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LPushArgument(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(PushArgument, "push-argument")
+};
+
+
+class LDrop FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+  explicit LDrop(int count) : count_(count) {}
+
+  int count() const { return count_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Drop, "drop")
+
+ private:
+  int count_;
+};
+
+
+class LStoreCodeEntry FINAL : public LTemplateInstruction<0, 2, 0> {
+ public:
+  LStoreCodeEntry(LOperand* function, LOperand* code_object) {
+    inputs_[0] = function;
+    inputs_[1] = code_object;
+  }
+
+  LOperand* function() { return inputs_[0]; }
+  LOperand* code_object() { return inputs_[1]; }
+
+  virtual void PrintDataTo(StringStream* stream);
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry")
+  DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry)
+};
+
+
+class LInnerAllocatedObject FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LInnerAllocatedObject(LOperand* base_object, LOperand* offset) {
+    inputs_[0] = base_object;
+    inputs_[1] = offset;
+  }
+
+  LOperand* base_object() const { return inputs_[0]; }
+  LOperand* offset() const { return inputs_[1]; }
+
+  void PrintDataTo(StringStream* stream) OVERRIDE;
+
+  DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object")
+};
+
+
+class LThisFunction FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
+  DECLARE_HYDROGEN_ACCESSOR(ThisFunction)
+};
+
+
+class LContext FINAL : public LTemplateInstruction<1, 0, 0> {
+ public:
+  DECLARE_CONCRETE_INSTRUCTION(Context, "context")
+  DECLARE_HYDROGEN_ACCESSOR(Context)
+};
+
+
+class LDeclareGlobals FINAL : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LDeclareGlobals(LOperand* context) { inputs_[0] = context; }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DeclareGlobals, "declare-globals")
+  DECLARE_HYDROGEN_ACCESSOR(DeclareGlobals)
+};
+
+
+class LCallJSFunction FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LCallJSFunction(LOperand* function) { inputs_[0] = function; }
+
+  LOperand* function() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
+  DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
+
+  void PrintDataTo(StringStream* stream) OVERRIDE;
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallWithDescriptor FINAL : public LTemplateResultInstruction<1> {
+ public:
+  LCallWithDescriptor(CallInterfaceDescriptor descriptor,
+                      const ZoneList<LOperand*>& operands, Zone* zone)
+      : descriptor_(descriptor),
+        inputs_(descriptor.GetRegisterParameterCount() + 1, zone) {
+    DCHECK(descriptor.GetRegisterParameterCount() + 1 == operands.length());
+    inputs_.AddAll(operands, zone);
+  }
+
+  LOperand* target() const { return inputs_[0]; }
+
+  const CallInterfaceDescriptor descriptor() { return descriptor_; }
+
+ private:
+  DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
+  DECLARE_HYDROGEN_ACCESSOR(CallWithDescriptor)
+
+  void PrintDataTo(StringStream* stream) OVERRIDE;
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+
+  CallInterfaceDescriptor descriptor_;
+  ZoneList<LOperand*> inputs_;
+
+  // Iterator support.
+  int InputCount() FINAL { return inputs_.length(); }
+  LOperand* InputAt(int i) FINAL { return inputs_[i]; }
+
+  int TempCount() FINAL { return 0; }
+  LOperand* TempAt(int i) FINAL { return NULL; }
+};
+
+
+class LInvokeFunction FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LInvokeFunction(LOperand* context, LOperand* function) {
+    inputs_[0] = context;
+    inputs_[1] = function;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* function() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
+  DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
+
+  void PrintDataTo(StringStream* stream) OVERRIDE;
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallFunction FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LCallFunction(LOperand* context, LOperand* function) {
+    inputs_[0] = context;
+    inputs_[1] = function;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* function() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallFunction, "call-function")
+  DECLARE_HYDROGEN_ACCESSOR(CallFunction)
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallNew FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LCallNew(LOperand* context, LOperand* constructor) {
+    inputs_[0] = context;
+    inputs_[1] = constructor;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* constructor() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
+  DECLARE_HYDROGEN_ACCESSOR(CallNew)
+
+  void PrintDataTo(StringStream* stream) OVERRIDE;
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallNewArray FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LCallNewArray(LOperand* context, LOperand* constructor) {
+    inputs_[0] = context;
+    inputs_[1] = constructor;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* constructor() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
+  DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
+
+  void PrintDataTo(StringStream* stream) OVERRIDE;
+
+  int arity() const { return hydrogen()->argument_count() - 1; }
+};
+
+
+class LCallRuntime FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LCallRuntime(LOperand* context) { inputs_[0] = context; }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
+  DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
+
+  bool ClobbersDoubleRegisters(Isolate* isolate) const OVERRIDE {
+    return save_doubles() == kDontSaveFPRegs;
+  }
+
+  const Runtime::Function* function() const { return hydrogen()->function(); }
+  int arity() const { return hydrogen()->argument_count(); }
+  SaveFPRegsMode save_doubles() const { return hydrogen()->save_doubles(); }
+};
+
+
+class LInteger32ToDouble FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LInteger32ToDouble(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Integer32ToDouble, "int32-to-double")
+};
+
+
+class LUint32ToDouble FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LUint32ToDouble(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Uint32ToDouble, "uint32-to-double")
+};
+
+
+class LNumberTagI FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+  LNumberTagI(LOperand* value, LOperand* temp1, LOperand* temp2) {
+    inputs_[0] = value;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp1() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumberTagI, "number-tag-i")
+};
+
+
+class LNumberTagU FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+  LNumberTagU(LOperand* value, LOperand* temp1, LOperand* temp2) {
+    inputs_[0] = value;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp1() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumberTagU, "number-tag-u")
+};
+
+
+class LNumberTagD FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+  LNumberTagD(LOperand* value, LOperand* temp, LOperand* temp2) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+    temps_[1] = temp2;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumberTagD, "number-tag-d")
+  DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LDoubleToSmi FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LDoubleToSmi(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DoubleToSmi, "double-to-smi")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+
+  bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+
+// Sometimes truncating conversion from a tagged value to an int32.
+class LDoubleToI FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LDoubleToI(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
+  DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
+
+  bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+
+// Truncating conversion from a tagged value to an int32.
+class LTaggedToI FINAL : public LTemplateInstruction<1, 1, 2> {
+ public:
+  LTaggedToI(LOperand* value, LOperand* temp, LOperand* temp2) {
+    inputs_[0] = value;
+    temps_[0] = temp;
+    temps_[1] = temp2;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
+  DECLARE_HYDROGEN_ACCESSOR(Change)
+
+  bool truncating() { return hydrogen()->CanTruncateToInt32(); }
+};
+
+
+class LSmiTag FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LSmiTag(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(SmiTag, "smi-tag")
+  DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LNumberUntagD FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LNumberUntagD(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
+  DECLARE_HYDROGEN_ACCESSOR(Change)
+};
+
+
+class LSmiUntag FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  LSmiUntag(LOperand* value, bool needs_check) : needs_check_(needs_check) {
+    inputs_[0] = value;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  bool needs_check() const { return needs_check_; }
+
+  DECLARE_CONCRETE_INSTRUCTION(SmiUntag, "smi-untag")
+
+ private:
+  bool needs_check_;
+};
+
+
+class LStoreNamedField FINAL : public LTemplateInstruction<0, 2, 1> {
+ public:
+  LStoreNamedField(LOperand* object, LOperand* value, LOperand* temp) {
+    inputs_[0] = object;
+    inputs_[1] = value;
+    temps_[0] = temp;
+  }
+
+  LOperand* object() { return inputs_[0]; }
+  LOperand* value() { return inputs_[1]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
+  DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
+
+  void PrintDataTo(StringStream* stream) OVERRIDE;
+
+  Representation representation() const {
+    return hydrogen()->field_representation();
+  }
+};
+
+
+class LStoreNamedGeneric FINAL : public LTemplateInstruction<0, 3, 0> {
+ public:
+  LStoreNamedGeneric(LOperand* context, LOperand* object, LOperand* value) {
+    inputs_[0] = context;
+    inputs_[1] = object;
+    inputs_[2] = value;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+  LOperand* value() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
+  DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
+
+  void PrintDataTo(StringStream* stream) OVERRIDE;
+
+  Handle<Object> name() const { return hydrogen()->name(); }
+  StrictMode strict_mode() { return hydrogen()->strict_mode(); }
+};
+
+
+class LStoreKeyed FINAL : public LTemplateInstruction<0, 3, 0> {
+ public:
+  LStoreKeyed(LOperand* object, LOperand* key, LOperand* value) {
+    inputs_[0] = object;
+    inputs_[1] = key;
+    inputs_[2] = value;
+  }
+
+  bool is_external() const { return hydrogen()->is_external(); }
+  bool is_fixed_typed_array() const {
+    return hydrogen()->is_fixed_typed_array();
+  }
+  bool is_typed_elements() const {
+    return is_external() || is_fixed_typed_array();
+  }
+  LOperand* elements() { return inputs_[0]; }
+  LOperand* key() { return inputs_[1]; }
+  LOperand* value() { return inputs_[2]; }
+  ElementsKind elements_kind() const { return hydrogen()->elements_kind(); }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
+  DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
+
+  void PrintDataTo(StringStream* stream) OVERRIDE;
+  bool NeedsCanonicalization() {
+    if (hydrogen()->value()->IsAdd() || hydrogen()->value()->IsSub() ||
+        hydrogen()->value()->IsMul() || hydrogen()->value()->IsDiv()) {
+      return false;
+    }
+    return hydrogen()->NeedsCanonicalization();
+  }
+  uint32_t base_offset() const { return hydrogen()->base_offset(); }
+};
+
+
+class LStoreKeyedGeneric FINAL : public LTemplateInstruction<0, 4, 0> {
+ public:
+  LStoreKeyedGeneric(LOperand* context, LOperand* obj, LOperand* key,
+                     LOperand* value) {
+    inputs_[0] = context;
+    inputs_[1] = obj;
+    inputs_[2] = key;
+    inputs_[3] = value;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+  LOperand* key() { return inputs_[2]; }
+  LOperand* value() { return inputs_[3]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
+  DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
+
+  void PrintDataTo(StringStream* stream) OVERRIDE;
+
+  StrictMode strict_mode() { return hydrogen()->strict_mode(); }
+};
+
+
+class LTransitionElementsKind FINAL : public LTemplateInstruction<0, 2, 1> {
+ public:
+  LTransitionElementsKind(LOperand* object, LOperand* context,
+                          LOperand* new_map_temp) {
+    inputs_[0] = object;
+    inputs_[1] = context;
+    temps_[0] = new_map_temp;
+  }
+
+  LOperand* context() { return inputs_[1]; }
+  LOperand* object() { return inputs_[0]; }
+  LOperand* new_map_temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind,
+                               "transition-elements-kind")
+  DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
+
+  void PrintDataTo(StringStream* stream) OVERRIDE;
+
+  Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
+  Handle<Map> transitioned_map() {
+    return hydrogen()->transitioned_map().handle();
+  }
+  ElementsKind from_kind() { return hydrogen()->from_kind(); }
+  ElementsKind to_kind() { return hydrogen()->to_kind(); }
+};
+
+
+class LTrapAllocationMemento FINAL : public LTemplateInstruction<0, 1, 1> {
+ public:
+  LTrapAllocationMemento(LOperand* object, LOperand* temp) {
+    inputs_[0] = object;
+    temps_[0] = temp;
+  }
+
+  LOperand* object() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(TrapAllocationMemento, "trap-allocation-memento")
+};
+
+
+class LStringAdd FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LStringAdd(LOperand* context, LOperand* left, LOperand* right) {
+    inputs_[0] = context;
+    inputs_[1] = left;
+    inputs_[2] = right;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* left() { return inputs_[1]; }
+  LOperand* right() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
+  DECLARE_HYDROGEN_ACCESSOR(StringAdd)
+};
+
+
+class LStringCharCodeAt FINAL : public LTemplateInstruction<1, 3, 0> {
+ public:
+  LStringCharCodeAt(LOperand* context, LOperand* string, LOperand* index) {
+    inputs_[0] = context;
+    inputs_[1] = string;
+    inputs_[2] = index;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* string() { return inputs_[1]; }
+  LOperand* index() { return inputs_[2]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringCharCodeAt, "string-char-code-at")
+  DECLARE_HYDROGEN_ACCESSOR(StringCharCodeAt)
+};
+
+
+class LStringCharFromCode FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+  explicit LStringCharFromCode(LOperand* context, LOperand* char_code) {
+    inputs_[0] = context;
+    inputs_[1] = char_code;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* char_code() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
+  DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
+};
+
+
+class LCheckValue FINAL : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LCheckValue(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckValue, "check-value")
+  DECLARE_HYDROGEN_ACCESSOR(CheckValue)
+};
+
+
+class LCheckInstanceType FINAL : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LCheckInstanceType(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckInstanceType, "check-instance-type")
+  DECLARE_HYDROGEN_ACCESSOR(CheckInstanceType)
+};
+
+
+class LCheckMaps FINAL : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LCheckMaps(LOperand* value = NULL) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckMaps, "check-maps")
+  DECLARE_HYDROGEN_ACCESSOR(CheckMaps)
+};
+
+
+class LCheckSmi FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LCheckSmi(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi")
+};
+
+
+class LCheckNonSmi FINAL : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LCheckNonSmi(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
+  DECLARE_HYDROGEN_ACCESSOR(CheckHeapObject)
+};
+
+
+class LClampDToUint8 FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LClampDToUint8(LOperand* unclamped) { inputs_[0] = unclamped; }
+
+  LOperand* unclamped() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8")
+};
+
+
+class LClampIToUint8 FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LClampIToUint8(LOperand* unclamped) { inputs_[0] = unclamped; }
+
+  LOperand* unclamped() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8")
+};
+
+
+class LClampTToUint8 FINAL : public LTemplateInstruction<1, 1, 1> {
+ public:
+  LClampTToUint8(LOperand* unclamped, LOperand* temp) {
+    inputs_[0] = unclamped;
+    temps_[0] = temp;
+  }
+
+  LOperand* unclamped() { return inputs_[0]; }
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
+};
+
+
+class LDoubleBits FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LDoubleBits(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(DoubleBits, "double-bits")
+  DECLARE_HYDROGEN_ACCESSOR(DoubleBits)
+};
+
+
+class LConstructDouble FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LConstructDouble(LOperand* hi, LOperand* lo) {
+    inputs_[0] = hi;
+    inputs_[1] = lo;
+  }
+
+  LOperand* hi() { return inputs_[0]; }
+  LOperand* lo() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ConstructDouble, "construct-double")
+};
+
+
+class LAllocate FINAL : public LTemplateInstruction<1, 2, 2> {
+ public:
+  LAllocate(LOperand* context, LOperand* size, LOperand* temp1,
+            LOperand* temp2) {
+    inputs_[0] = context;
+    inputs_[1] = size;
+    temps_[0] = temp1;
+    temps_[1] = temp2;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* size() { return inputs_[1]; }
+  LOperand* temp1() { return temps_[0]; }
+  LOperand* temp2() { return temps_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Allocate, "allocate")
+  DECLARE_HYDROGEN_ACCESSOR(Allocate)
+};
+
+
+class LRegExpLiteral FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LRegExpLiteral(LOperand* context) { inputs_[0] = context; }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(RegExpLiteral, "regexp-literal")
+  DECLARE_HYDROGEN_ACCESSOR(RegExpLiteral)
+};
+
+
+class LFunctionLiteral FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LFunctionLiteral(LOperand* context) { inputs_[0] = context; }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(FunctionLiteral, "function-literal")
+  DECLARE_HYDROGEN_ACCESSOR(FunctionLiteral)
+};
+
+
+class LToFastProperties FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LToFastProperties(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
+  DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
+};
+
+
+class LTypeof FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LTypeof(LOperand* context, LOperand* value) {
+    inputs_[0] = context;
+    inputs_[1] = value;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* value() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(Typeof, "typeof")
+};
+
+
+class LTypeofIsAndBranch FINAL : public LControlInstruction<1, 0> {
+ public:
+  explicit LTypeofIsAndBranch(LOperand* value) { inputs_[0] = value; }
+
+  LOperand* value() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(TypeofIsAndBranch, "typeof-is-and-branch")
+  DECLARE_HYDROGEN_ACCESSOR(TypeofIsAndBranch)
+
+  Handle<String> type_literal() { return hydrogen()->type_literal(); }
+
+  void PrintDataTo(StringStream* stream) OVERRIDE;
+};
+
+
+class LIsConstructCallAndBranch FINAL : public LControlInstruction<0, 1> {
+ public:
+  explicit LIsConstructCallAndBranch(LOperand* temp) { temps_[0] = temp; }
+
+  LOperand* temp() { return temps_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(IsConstructCallAndBranch,
+                               "is-construct-call-and-branch")
+};
+
+
+class LOsrEntry FINAL : public LTemplateInstruction<0, 0, 0> {
+ public:
+  LOsrEntry() {}
+
+  bool HasInterestingComment(LCodeGen* gen) const OVERRIDE { return false; }
+  DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
+};
+
+
+class LStackCheck FINAL : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LStackCheck(LOperand* context) { inputs_[0] = context; }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
+  DECLARE_HYDROGEN_ACCESSOR(StackCheck)
+
+  Label* done_label() { return &done_label_; }
+
+ private:
+  Label done_label_;
+};
+
+
+class LForInPrepareMap FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LForInPrepareMap(LOperand* context, LOperand* object) {
+    inputs_[0] = context;
+    inputs_[1] = object;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* object() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ForInPrepareMap, "for-in-prepare-map")
+};
+
+
+class LForInCacheArray FINAL : public LTemplateInstruction<1, 1, 0> {
+ public:
+  explicit LForInCacheArray(LOperand* map) { inputs_[0] = map; }
+
+  LOperand* map() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(ForInCacheArray, "for-in-cache-array")
+
+  int idx() { return HForInCacheArray::cast(this->hydrogen_value())->idx(); }
+};
+
+
+class LCheckMapValue FINAL : public LTemplateInstruction<0, 2, 0> {
+ public:
+  LCheckMapValue(LOperand* value, LOperand* map) {
+    inputs_[0] = value;
+    inputs_[1] = map;
+  }
+
+  LOperand* value() { return inputs_[0]; }
+  LOperand* map() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(CheckMapValue, "check-map-value")
+};
+
+
+class LLoadFieldByIndex FINAL : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LLoadFieldByIndex(LOperand* object, LOperand* index) {
+    inputs_[0] = object;
+    inputs_[1] = index;
+  }
+
+  LOperand* object() { return inputs_[0]; }
+  LOperand* index() { return inputs_[1]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(LoadFieldByIndex, "load-field-by-index")
+};
+
+
+class LStoreFrameContext : public LTemplateInstruction<0, 1, 0> {
+ public:
+  explicit LStoreFrameContext(LOperand* context) { inputs_[0] = context; }
+
+  LOperand* context() { return inputs_[0]; }
+
+  DECLARE_CONCRETE_INSTRUCTION(StoreFrameContext, "store-frame-context")
+};
+
+
+class LAllocateBlockContext : public LTemplateInstruction<1, 2, 0> {
+ public:
+  LAllocateBlockContext(LOperand* context, LOperand* function) {
+    inputs_[0] = context;
+    inputs_[1] = function;
+  }
+
+  LOperand* context() { return inputs_[0]; }
+  LOperand* function() { return inputs_[1]; }
+
+  Handle<ScopeInfo> scope_info() { return hydrogen()->scope_info(); }
+
+  DECLARE_CONCRETE_INSTRUCTION(AllocateBlockContext, "allocate-block-context")
+  DECLARE_HYDROGEN_ACCESSOR(AllocateBlockContext)
+};
+
+
+class LChunkBuilder;
+class LPlatformChunk FINAL : public LChunk {
+ public:
+  LPlatformChunk(CompilationInfo* info, HGraph* graph) : LChunk(info, graph) {}
+
+  int GetNextSpillIndex(RegisterKind kind);
+  LOperand* GetNextSpillSlot(RegisterKind kind);
+};
+
+
+class LChunkBuilder FINAL : public LChunkBuilderBase {
+ public:
+  LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
+      : LChunkBuilderBase(info, graph),
+        current_instruction_(NULL),
+        current_block_(NULL),
+        next_block_(NULL),
+        allocator_(allocator) {}
+
+  // Build the sequence for the graph.
+  LPlatformChunk* Build();
+
+// Declare methods that deal with the individual node types.
+#define DECLARE_DO(type) LInstruction* Do##type(H##type* node);
+  HYDROGEN_CONCRETE_INSTRUCTION_LIST(DECLARE_DO)
+#undef DECLARE_DO
+
+  LInstruction* DoMultiplyAdd(HMul* mul, HValue* addend);
+  LInstruction* DoMultiplySub(HValue* minuend, HMul* mul);
+  LInstruction* DoRSub(HSub* instr);
+
+  static bool HasMagicNumberForDivisor(int32_t divisor);
+
+  LInstruction* DoMathFloor(HUnaryMathOperation* instr);
+  LInstruction* DoMathRound(HUnaryMathOperation* instr);
+  LInstruction* DoMathFround(HUnaryMathOperation* instr);
+  LInstruction* DoMathAbs(HUnaryMathOperation* instr);
+  LInstruction* DoMathLog(HUnaryMathOperation* instr);
+  LInstruction* DoMathExp(HUnaryMathOperation* instr);
+  LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
+  LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
+  LInstruction* DoMathClz32(HUnaryMathOperation* instr);
+  LInstruction* DoDivByPowerOf2I(HDiv* instr);
+  LInstruction* DoDivByConstI(HDiv* instr);
+  LInstruction* DoDivI(HDiv* instr);
+  LInstruction* DoModByPowerOf2I(HMod* instr);
+  LInstruction* DoModByConstI(HMod* instr);
+  LInstruction* DoModI(HMod* instr);
+  LInstruction* DoFlooringDivByPowerOf2I(HMathFloorOfDiv* instr);
+  LInstruction* DoFlooringDivByConstI(HMathFloorOfDiv* instr);
+  LInstruction* DoFlooringDivI(HMathFloorOfDiv* instr);
+
+ private:
+  // Methods for getting operands for Use / Define / Temp.
+  LUnallocated* ToUnallocated(Register reg);
+  LUnallocated* ToUnallocated(DoubleRegister reg);
+
+  // Methods for setting up define-use relationships.
+  MUST_USE_RESULT LOperand* Use(HValue* value, LUnallocated* operand);
+  MUST_USE_RESULT LOperand* UseFixed(HValue* value, Register fixed_register);
+  MUST_USE_RESULT LOperand* UseFixedDouble(HValue* value,
+                                           DoubleRegister fixed_register);
+
+  // A value that is guaranteed to be allocated to a register.
+  // Operand created by UseRegister is guaranteed to be live until the end of
+  // instruction. This means that register allocator will not reuse it's
+  // register for any other operand inside instruction.
+  // Operand created by UseRegisterAtStart is guaranteed to be live only at
+  // instruction start. Register allocator is free to assign the same register
+  // to some other operand used inside instruction (i.e. temporary or
+  // output).
+  MUST_USE_RESULT LOperand* UseRegister(HValue* value);
+  MUST_USE_RESULT LOperand* UseRegisterAtStart(HValue* value);
+
+  // An input operand in a register that may be trashed.
+  MUST_USE_RESULT LOperand* UseTempRegister(HValue* value);
+
+  // An input operand in a register or stack slot.
+  MUST_USE_RESULT LOperand* Use(HValue* value);
+  MUST_USE_RESULT LOperand* UseAtStart(HValue* value);
+
+  // An input operand in a register, stack slot or a constant operand.
+  MUST_USE_RESULT LOperand* UseOrConstant(HValue* value);
+  MUST_USE_RESULT LOperand* UseOrConstantAtStart(HValue* value);
+
+  // An input operand in a register or a constant operand.
+  MUST_USE_RESULT LOperand* UseRegisterOrConstant(HValue* value);
+  MUST_USE_RESULT LOperand* UseRegisterOrConstantAtStart(HValue* value);
+
+  // An input operand in a constant operand.
+  MUST_USE_RESULT LOperand* UseConstant(HValue* value);
+
+  // An input operand in register, stack slot or a constant operand.
+  // Will not be moved to a register even if one is freely available.
+  MUST_USE_RESULT LOperand* UseAny(HValue* value) OVERRIDE;
+
+  // Temporary operand that must be in a register.
+  MUST_USE_RESULT LUnallocated* TempRegister();
+  MUST_USE_RESULT LUnallocated* TempDoubleRegister();
+  MUST_USE_RESULT LOperand* FixedTemp(Register reg);
+  MUST_USE_RESULT LOperand* FixedTemp(DoubleRegister reg);
+
+  // Methods for setting up define-use relationships.
+  // Return the same instruction that they are passed.
+  LInstruction* Define(LTemplateResultInstruction<1>* instr,
+                       LUnallocated* result);
+  LInstruction* DefineAsRegister(LTemplateResultInstruction<1>* instr);
+  LInstruction* DefineAsSpilled(LTemplateResultInstruction<1>* instr,
+                                int index);
+  LInstruction* DefineSameAsFirst(LTemplateResultInstruction<1>* instr);
+  LInstruction* DefineFixed(LTemplateResultInstruction<1>* instr, Register reg);
+  LInstruction* DefineFixedDouble(LTemplateResultInstruction<1>* instr,
+                                  DoubleRegister reg);
+  LInstruction* AssignEnvironment(LInstruction* instr);
+  LInstruction* AssignPointerMap(LInstruction* instr);
+
+  enum CanDeoptimize { CAN_DEOPTIMIZE_EAGERLY, CANNOT_DEOPTIMIZE_EAGERLY };
+
+  // By default we assume that instruction sequences generated for calls
+  // cannot deoptimize eagerly and we do not attach environment to this
+  // instruction.
+  LInstruction* MarkAsCall(
+      LInstruction* instr, HInstruction* hinstr,
+      CanDeoptimize can_deoptimize = CANNOT_DEOPTIMIZE_EAGERLY);
+
+  void VisitInstruction(HInstruction* current);
+  void AddInstruction(LInstruction* instr, HInstruction* current);
+
+  void DoBasicBlock(HBasicBlock* block, HBasicBlock* next_block);
+  LInstruction* DoShift(Token::Value op, HBitwiseBinaryOperation* instr);
+  LInstruction* DoArithmeticD(Token::Value op,
+                              HArithmeticBinaryOperation* instr);
+  LInstruction* DoArithmeticT(Token::Value op, HBinaryOperation* instr);
+
+  HInstruction* current_instruction_;
+  HBasicBlock* current_block_;
+  HBasicBlock* next_block_;
+  LAllocator* allocator_;
+
+  DISALLOW_COPY_AND_ASSIGN(LChunkBuilder);
+};
+
+#undef DECLARE_HYDROGEN_ACCESSOR
+#undef DECLARE_CONCRETE_INSTRUCTION
+}
+}  // namespace v8::internal
+
+#endif  // V8_PPC_LITHIUM_PPC_H_
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.cc b/deps/v8/src/ppc/macro-assembler-ppc.cc
new file mode 100644 (file)
index 0000000..0b3d729
--- /dev/null
@@ -0,0 +1,4819 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <assert.h>  // For assert
+#include <limits.h>  // For LONG_MIN, LONG_MAX.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_PPC
+
+#include "src/base/bits.h"
+#include "src/base/division-by-constant.h"
+#include "src/bootstrapper.h"
+#include "src/codegen.h"
+#include "src/cpu-profiler.h"
+#include "src/debug.h"
+#include "src/isolate-inl.h"
+#include "src/runtime/runtime.h"
+
+namespace v8 {
+namespace internal {
+
+MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
+    : Assembler(arg_isolate, buffer, size),
+      generating_stub_(false),
+      has_frame_(false) {
+  if (isolate() != NULL) {
+    code_object_ =
+        Handle<Object>(isolate()->heap()->undefined_value(), isolate());
+  }
+}
+
+
+void MacroAssembler::Jump(Register target) {
+  mtctr(target);
+  bctr();
+}
+
+
+void MacroAssembler::JumpToJSEntry(Register target) {
+  Move(ip, target);
+  Jump(ip);
+}
+
+
+void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
+                          Condition cond, CRegister cr) {
+  Label skip;
+
+  if (cond != al) b(NegateCondition(cond), &skip, cr);
+
+  DCHECK(rmode == RelocInfo::CODE_TARGET || rmode == RelocInfo::RUNTIME_ENTRY);
+
+  mov(ip, Operand(target, rmode));
+  mtctr(ip);
+  bctr();
+
+  bind(&skip);
+}
+
+
+void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
+                          CRegister cr) {
+  DCHECK(!RelocInfo::IsCodeTarget(rmode));
+  Jump(reinterpret_cast<intptr_t>(target), rmode, cond, cr);
+}
+
+
+void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
+                          Condition cond) {
+  DCHECK(RelocInfo::IsCodeTarget(rmode));
+  // 'code' is always generated ppc code, never THUMB code
+  AllowDeferredHandleDereference embedding_raw_address;
+  Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
+}
+
+
+int MacroAssembler::CallSize(Register target) { return 2 * kInstrSize; }
+
+
+void MacroAssembler::Call(Register target) {
+  BlockTrampolinePoolScope block_trampoline_pool(this);
+  Label start;
+  bind(&start);
+
+  // Statement positions are expected to be recorded when the target
+  // address is loaded.
+  positions_recorder()->WriteRecordedPositions();
+
+  // branch via link register and set LK bit for return point
+  mtctr(target);
+  bctrl();
+
+  DCHECK_EQ(CallSize(target), SizeOfCodeGeneratedSince(&start));
+}
+
+
+void MacroAssembler::CallJSEntry(Register target) {
+  DCHECK(target.is(ip));
+  Call(target);
+}
+
+
+int MacroAssembler::CallSize(Address target, RelocInfo::Mode rmode,
+                             Condition cond) {
+  Operand mov_operand = Operand(reinterpret_cast<intptr_t>(target), rmode);
+  return (2 + instructions_required_for_mov(mov_operand)) * kInstrSize;
+}
+
+
+int MacroAssembler::CallSizeNotPredictableCodeSize(Address target,
+                                                   RelocInfo::Mode rmode,
+                                                   Condition cond) {
+  return (2 + kMovInstructionsNoConstantPool) * kInstrSize;
+}
+
+
+void MacroAssembler::Call(Address target, RelocInfo::Mode rmode,
+                          Condition cond) {
+  BlockTrampolinePoolScope block_trampoline_pool(this);
+  DCHECK(cond == al);
+
+#ifdef DEBUG
+  // Check the expected size before generating code to ensure we assume the same
+  // constant pool availability (e.g., whether constant pool is full or not).
+  int expected_size = CallSize(target, rmode, cond);
+  Label start;
+  bind(&start);
+#endif
+
+  // Statement positions are expected to be recorded when the target
+  // address is loaded.
+  positions_recorder()->WriteRecordedPositions();
+
+  // This can likely be optimized to make use of bc() with 24bit relative
+  //
+  // RecordRelocInfo(x.rmode_, x.imm_);
+  // bc( BA, .... offset, LKset);
+  //
+
+  mov(ip, Operand(reinterpret_cast<intptr_t>(target), rmode));
+  mtctr(ip);
+  bctrl();
+
+  DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
+}
+
+
+int MacroAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode,
+                             TypeFeedbackId ast_id, Condition cond) {
+  AllowDeferredHandleDereference using_raw_address;
+  return CallSize(reinterpret_cast<Address>(code.location()), rmode, cond);
+}
+
+
+void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
+                          TypeFeedbackId ast_id, Condition cond) {
+  BlockTrampolinePoolScope block_trampoline_pool(this);
+  DCHECK(RelocInfo::IsCodeTarget(rmode));
+
+#ifdef DEBUG
+  // Check the expected size before generating code to ensure we assume the same
+  // constant pool availability (e.g., whether constant pool is full or not).
+  int expected_size = CallSize(code, rmode, ast_id, cond);
+  Label start;
+  bind(&start);
+#endif
+
+  if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
+    SetRecordedAstId(ast_id);
+    rmode = RelocInfo::CODE_TARGET_WITH_ID;
+  }
+  AllowDeferredHandleDereference using_raw_address;
+  Call(reinterpret_cast<Address>(code.location()), rmode, cond);
+  DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
+}
+
+
+void MacroAssembler::Ret(Condition cond) {
+  DCHECK(cond == al);
+  blr();
+}
+
+
+void MacroAssembler::Drop(int count, Condition cond) {
+  DCHECK(cond == al);
+  if (count > 0) {
+    Add(sp, sp, count * kPointerSize, r0);
+  }
+}
+
+
+void MacroAssembler::Ret(int drop, Condition cond) {
+  Drop(drop, cond);
+  Ret(cond);
+}
+
+
+void MacroAssembler::Call(Label* target) { b(target, SetLK); }
+
+
+void MacroAssembler::Push(Handle<Object> handle) {
+  mov(r0, Operand(handle));
+  push(r0);
+}
+
+
+void MacroAssembler::Move(Register dst, Handle<Object> value) {
+  AllowDeferredHandleDereference smi_check;
+  if (value->IsSmi()) {
+    LoadSmiLiteral(dst, reinterpret_cast<Smi*>(*value));
+  } else {
+    DCHECK(value->IsHeapObject());
+    if (isolate()->heap()->InNewSpace(*value)) {
+      Handle<Cell> cell = isolate()->factory()->NewCell(value);
+      mov(dst, Operand(cell));
+      LoadP(dst, FieldMemOperand(dst, Cell::kValueOffset));
+    } else {
+      mov(dst, Operand(value));
+    }
+  }
+}
+
+
+void MacroAssembler::Move(Register dst, Register src, Condition cond) {
+  DCHECK(cond == al);
+  if (!dst.is(src)) {
+    mr(dst, src);
+  }
+}
+
+
+void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) {
+  if (!dst.is(src)) {
+    fmr(dst, src);
+  }
+}
+
+
+void MacroAssembler::MultiPush(RegList regs) {
+  int16_t num_to_push = NumberOfBitsSet(regs);
+  int16_t stack_offset = num_to_push * kPointerSize;
+
+  subi(sp, sp, Operand(stack_offset));
+  for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
+    if ((regs & (1 << i)) != 0) {
+      stack_offset -= kPointerSize;
+      StoreP(ToRegister(i), MemOperand(sp, stack_offset));
+    }
+  }
+}
+
+
+void MacroAssembler::MultiPop(RegList regs) {
+  int16_t stack_offset = 0;
+
+  for (int16_t i = 0; i < kNumRegisters; i++) {
+    if ((regs & (1 << i)) != 0) {
+      LoadP(ToRegister(i), MemOperand(sp, stack_offset));
+      stack_offset += kPointerSize;
+    }
+  }
+  addi(sp, sp, Operand(stack_offset));
+}
+
+
+void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index,
+                              Condition cond) {
+  DCHECK(cond == al);
+  LoadP(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), r0);
+}
+
+
+void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index,
+                               Condition cond) {
+  DCHECK(cond == al);
+  StoreP(source, MemOperand(kRootRegister, index << kPointerSizeLog2), r0);
+}
+
+
+void MacroAssembler::InNewSpace(Register object, Register scratch,
+                                Condition cond, Label* branch) {
+  // N.B. scratch may be same register as object
+  DCHECK(cond == eq || cond == ne);
+  mov(r0, Operand(ExternalReference::new_space_mask(isolate())));
+  and_(scratch, object, r0);
+  mov(r0, Operand(ExternalReference::new_space_start(isolate())));
+  cmp(scratch, r0);
+  b(cond, branch);
+}
+
+
+void MacroAssembler::RecordWriteField(
+    Register object, int offset, Register value, Register dst,
+    LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
+    RememberedSetAction remembered_set_action, SmiCheck smi_check,
+    PointersToHereCheck pointers_to_here_check_for_value) {
+  // First, check if a write barrier is even needed. The tests below
+  // catch stores of Smis.
+  Label done;
+
+  // Skip barrier if writing a smi.
+  if (smi_check == INLINE_SMI_CHECK) {
+    JumpIfSmi(value, &done);
+  }
+
+  // Although the object register is tagged, the offset is relative to the start
+  // of the object, so so offset must be a multiple of kPointerSize.
+  DCHECK(IsAligned(offset, kPointerSize));
+
+  Add(dst, object, offset - kHeapObjectTag, r0);
+  if (emit_debug_code()) {
+    Label ok;
+    andi(r0, dst, Operand((1 << kPointerSizeLog2) - 1));
+    beq(&ok, cr0);
+    stop("Unaligned cell in write barrier");
+    bind(&ok);
+  }
+
+  RecordWrite(object, dst, value, lr_status, save_fp, remembered_set_action,
+              OMIT_SMI_CHECK, pointers_to_here_check_for_value);
+
+  bind(&done);
+
+  // Clobber clobbered input registers when running with the debug-code flag
+  // turned on to provoke errors.
+  if (emit_debug_code()) {
+    mov(value, Operand(bit_cast<intptr_t>(kZapValue + 4)));
+    mov(dst, Operand(bit_cast<intptr_t>(kZapValue + 8)));
+  }
+}
+
+
+// Will clobber 4 registers: object, map, dst, ip.  The
+// register 'object' contains a heap object pointer.
+void MacroAssembler::RecordWriteForMap(Register object, Register map,
+                                       Register dst,
+                                       LinkRegisterStatus lr_status,
+                                       SaveFPRegsMode fp_mode) {
+  if (emit_debug_code()) {
+    LoadP(dst, FieldMemOperand(map, HeapObject::kMapOffset));
+    Cmpi(dst, Operand(isolate()->factory()->meta_map()), r0);
+    Check(eq, kWrongAddressOrValuePassedToRecordWrite);
+  }
+
+  if (!FLAG_incremental_marking) {
+    return;
+  }
+
+  if (emit_debug_code()) {
+    LoadP(ip, FieldMemOperand(object, HeapObject::kMapOffset));
+    cmp(ip, map);
+    Check(eq, kWrongAddressOrValuePassedToRecordWrite);
+  }
+
+  Label done;
+
+  // A single check of the map's pages interesting flag suffices, since it is
+  // only set during incremental collection, and then it's also guaranteed that
+  // the from object's page's interesting flag is also set.  This optimization
+  // relies on the fact that maps can never be in new space.
+  CheckPageFlag(map,
+                map,  // Used as scratch.
+                MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
+
+  addi(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag));
+  if (emit_debug_code()) {
+    Label ok;
+    andi(r0, dst, Operand((1 << kPointerSizeLog2) - 1));
+    beq(&ok, cr0);
+    stop("Unaligned cell in write barrier");
+    bind(&ok);
+  }
+
+  // Record the actual write.
+  if (lr_status == kLRHasNotBeenSaved) {
+    mflr(r0);
+    push(r0);
+  }
+  RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
+                       fp_mode);
+  CallStub(&stub);
+  if (lr_status == kLRHasNotBeenSaved) {
+    pop(r0);
+    mtlr(r0);
+  }
+
+  bind(&done);
+
+  // Count number of write barriers in generated code.
+  isolate()->counters()->write_barriers_static()->Increment();
+  IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip, dst);
+
+  // Clobber clobbered registers when running with the debug-code flag
+  // turned on to provoke errors.
+  if (emit_debug_code()) {
+    mov(dst, Operand(bit_cast<intptr_t>(kZapValue + 12)));
+    mov(map, Operand(bit_cast<intptr_t>(kZapValue + 16)));
+  }
+}
+
+
+// Will clobber 4 registers: object, address, scratch, ip.  The
+// register 'object' contains a heap object pointer.  The heap object
+// tag is shifted away.
+void MacroAssembler::RecordWrite(
+    Register object, Register address, Register value,
+    LinkRegisterStatus lr_status, SaveFPRegsMode fp_mode,
+    RememberedSetAction remembered_set_action, SmiCheck smi_check,
+    PointersToHereCheck pointers_to_here_check_for_value) {
+  DCHECK(!object.is(value));
+  if (emit_debug_code()) {
+    LoadP(r0, MemOperand(address));
+    cmp(r0, value);
+    Check(eq, kWrongAddressOrValuePassedToRecordWrite);
+  }
+
+  if (remembered_set_action == OMIT_REMEMBERED_SET &&
+      !FLAG_incremental_marking) {
+    return;
+  }
+
+  // First, check if a write barrier is even needed. The tests below
+  // catch stores of smis and stores into the young generation.
+  Label done;
+
+  if (smi_check == INLINE_SMI_CHECK) {
+    JumpIfSmi(value, &done);
+  }
+
+  if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
+    CheckPageFlag(value,
+                  value,  // Used as scratch.
+                  MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
+  }
+  CheckPageFlag(object,
+                value,  // Used as scratch.
+                MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
+
+  // Record the actual write.
+  if (lr_status == kLRHasNotBeenSaved) {
+    mflr(r0);
+    push(r0);
+  }
+  RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
+                       fp_mode);
+  CallStub(&stub);
+  if (lr_status == kLRHasNotBeenSaved) {
+    pop(r0);
+    mtlr(r0);
+  }
+
+  bind(&done);
+
+  // Count number of write barriers in generated code.
+  isolate()->counters()->write_barriers_static()->Increment();
+  IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip,
+                   value);
+
+  // Clobber clobbered registers when running with the debug-code flag
+  // turned on to provoke errors.
+  if (emit_debug_code()) {
+    mov(address, Operand(bit_cast<intptr_t>(kZapValue + 12)));
+    mov(value, Operand(bit_cast<intptr_t>(kZapValue + 16)));
+  }
+}
+
+
+void MacroAssembler::RememberedSetHelper(Register object,  // For debug tests.
+                                         Register address, Register scratch,
+                                         SaveFPRegsMode fp_mode,
+                                         RememberedSetFinalAction and_then) {
+  Label done;
+  if (emit_debug_code()) {
+    Label ok;
+    JumpIfNotInNewSpace(object, scratch, &ok);
+    stop("Remembered set pointer is in new space");
+    bind(&ok);
+  }
+  // Load store buffer top.
+  ExternalReference store_buffer =
+      ExternalReference::store_buffer_top(isolate());
+  mov(ip, Operand(store_buffer));
+  LoadP(scratch, MemOperand(ip));
+  // Store pointer to buffer and increment buffer top.
+  StoreP(address, MemOperand(scratch));
+  addi(scratch, scratch, Operand(kPointerSize));
+  // Write back new top of buffer.
+  StoreP(scratch, MemOperand(ip));
+  // Call stub on end of buffer.
+  // Check for end of buffer.
+  mov(r0, Operand(StoreBuffer::kStoreBufferOverflowBit));
+  and_(r0, scratch, r0, SetRC);
+
+  if (and_then == kFallThroughAtEnd) {
+    beq(&done, cr0);
+  } else {
+    DCHECK(and_then == kReturnAtEnd);
+    beq(&done, cr0);
+  }
+  mflr(r0);
+  push(r0);
+  StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
+  CallStub(&store_buffer_overflow);
+  pop(r0);
+  mtlr(r0);
+  bind(&done);
+  if (and_then == kReturnAtEnd) {
+    Ret();
+  }
+}
+
+
+void MacroAssembler::PushFixedFrame(Register marker_reg) {
+  mflr(r0);
+#if V8_OOL_CONSTANT_POOL
+  if (marker_reg.is_valid()) {
+    Push(r0, fp, kConstantPoolRegister, cp, marker_reg);
+  } else {
+    Push(r0, fp, kConstantPoolRegister, cp);
+  }
+#else
+  if (marker_reg.is_valid()) {
+    Push(r0, fp, cp, marker_reg);
+  } else {
+    Push(r0, fp, cp);
+  }
+#endif
+}
+
+
+void MacroAssembler::PopFixedFrame(Register marker_reg) {
+#if V8_OOL_CONSTANT_POOL
+  if (marker_reg.is_valid()) {
+    Pop(r0, fp, kConstantPoolRegister, cp, marker_reg);
+  } else {
+    Pop(r0, fp, kConstantPoolRegister, cp);
+  }
+#else
+  if (marker_reg.is_valid()) {
+    Pop(r0, fp, cp, marker_reg);
+  } else {
+    Pop(r0, fp, cp);
+  }
+#endif
+  mtlr(r0);
+}
+
+
+// Push and pop all registers that can hold pointers.
+void MacroAssembler::PushSafepointRegisters() {
+  // Safepoints expect a block of kNumSafepointRegisters values on the
+  // stack, so adjust the stack for unsaved registers.
+  const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
+  DCHECK(num_unsaved >= 0);
+  if (num_unsaved > 0) {
+    subi(sp, sp, Operand(num_unsaved * kPointerSize));
+  }
+  MultiPush(kSafepointSavedRegisters);
+}
+
+
+void MacroAssembler::PopSafepointRegisters() {
+  const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
+  MultiPop(kSafepointSavedRegisters);
+  if (num_unsaved > 0) {
+    addi(sp, sp, Operand(num_unsaved * kPointerSize));
+  }
+}
+
+
+void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
+  StoreP(src, SafepointRegisterSlot(dst));
+}
+
+
+void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
+  LoadP(dst, SafepointRegisterSlot(src));
+}
+
+
+int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
+  // The registers are pushed starting with the highest encoding,
+  // which means that lowest encodings are closest to the stack pointer.
+  RegList regs = kSafepointSavedRegisters;
+  int index = 0;
+
+  DCHECK(reg_code >= 0 && reg_code < kNumRegisters);
+
+  for (int16_t i = 0; i < reg_code; i++) {
+    if ((regs & (1 << i)) != 0) {
+      index++;
+    }
+  }
+
+  return index;
+}
+
+
+MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
+  return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
+}
+
+
+MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
+  // General purpose registers are pushed last on the stack.
+  int doubles_size = DoubleRegister::NumAllocatableRegisters() * kDoubleSize;
+  int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
+  return MemOperand(sp, doubles_size + register_offset);
+}
+
+
+void MacroAssembler::CanonicalizeNaN(const DoubleRegister dst,
+                                     const DoubleRegister src) {
+  Label done;
+
+  // Test for NaN
+  fcmpu(src, src);
+
+  if (dst.is(src)) {
+    bordered(&done);
+  } else {
+    Label is_nan;
+    bunordered(&is_nan);
+    fmr(dst, src);
+    b(&done);
+    bind(&is_nan);
+  }
+
+  // Replace with canonical NaN.
+  double nan_value = FixedDoubleArray::canonical_not_the_hole_nan_as_double();
+  LoadDoubleLiteral(dst, nan_value, r0);
+
+  bind(&done);
+}
+
+
+void MacroAssembler::ConvertIntToDouble(Register src,
+                                        DoubleRegister double_dst) {
+  MovIntToDouble(double_dst, src, r0);
+  fcfid(double_dst, double_dst);
+}
+
+
+void MacroAssembler::ConvertUnsignedIntToDouble(Register src,
+                                                DoubleRegister double_dst) {
+  MovUnsignedIntToDouble(double_dst, src, r0);
+  fcfid(double_dst, double_dst);
+}
+
+
+void MacroAssembler::ConvertIntToFloat(const DoubleRegister dst,
+                                       const Register src,
+                                       const Register int_scratch) {
+  MovIntToDouble(dst, src, int_scratch);
+  fcfid(dst, dst);
+  frsp(dst, dst);
+}
+
+
+void MacroAssembler::ConvertDoubleToInt64(const DoubleRegister double_input,
+#if !V8_TARGET_ARCH_PPC64
+                                          const Register dst_hi,
+#endif
+                                          const Register dst,
+                                          const DoubleRegister double_dst,
+                                          FPRoundingMode rounding_mode) {
+  if (rounding_mode == kRoundToZero) {
+    fctidz(double_dst, double_input);
+  } else {
+    SetRoundingMode(rounding_mode);
+    fctid(double_dst, double_input);
+    ResetRoundingMode();
+  }
+
+  MovDoubleToInt64(
+#if !V8_TARGET_ARCH_PPC64
+      dst_hi,
+#endif
+      dst, double_dst);
+}
+
+
+#if V8_OOL_CONSTANT_POOL
+void MacroAssembler::LoadConstantPoolPointerRegister(
+    CodeObjectAccessMethod access_method, int ip_code_entry_delta) {
+  Register base;
+  int constant_pool_offset = Code::kConstantPoolOffset - Code::kHeaderSize;
+  if (access_method == CAN_USE_IP) {
+    base = ip;
+    constant_pool_offset += ip_code_entry_delta;
+  } else {
+    DCHECK(access_method == CONSTRUCT_INTERNAL_REFERENCE);
+    base = kConstantPoolRegister;
+    ConstantPoolUnavailableScope constant_pool_unavailable(this);
+
+    // CheckBuffer() is called too frequently. This will pre-grow
+    // the buffer if needed to avoid spliting the relocation and instructions
+    EnsureSpaceFor(kMovInstructionsNoConstantPool * kInstrSize);
+
+    uintptr_t code_start = reinterpret_cast<uintptr_t>(pc_) - pc_offset();
+    mov(base, Operand(code_start, RelocInfo::INTERNAL_REFERENCE));
+  }
+  LoadP(kConstantPoolRegister, MemOperand(base, constant_pool_offset));
+}
+#endif
+
+
+void MacroAssembler::StubPrologue(int prologue_offset) {
+  LoadSmiLiteral(r11, Smi::FromInt(StackFrame::STUB));
+  PushFixedFrame(r11);
+  // Adjust FP to point to saved FP.
+  addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+#if V8_OOL_CONSTANT_POOL
+  // ip contains prologue address
+  LoadConstantPoolPointerRegister(CAN_USE_IP, -prologue_offset);
+  set_ool_constant_pool_available(true);
+#endif
+}
+
+
+void MacroAssembler::Prologue(bool code_pre_aging, int prologue_offset) {
+  {
+    PredictableCodeSizeScope predictible_code_size_scope(
+        this, kNoCodeAgeSequenceLength);
+    Assembler::BlockTrampolinePoolScope block_trampoline_pool(this);
+    // The following instructions must remain together and unmodified
+    // for code aging to work properly.
+    if (code_pre_aging) {
+      // Pre-age the code.
+      // This matches the code found in PatchPlatformCodeAge()
+      Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
+      intptr_t target = reinterpret_cast<intptr_t>(stub->instruction_start());
+      // Don't use Call -- we need to preserve ip and lr
+      nop();  // marker to detect sequence (see IsOld)
+      mov(r3, Operand(target));
+      Jump(r3);
+      for (int i = 0; i < kCodeAgingSequenceNops; i++) {
+        nop();
+      }
+    } else {
+      // This matches the code found in GetNoCodeAgeSequence()
+      PushFixedFrame(r4);
+      // Adjust fp to point to saved fp.
+      addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+      for (int i = 0; i < kNoCodeAgeSequenceNops; i++) {
+        nop();
+      }
+    }
+  }
+#if V8_OOL_CONSTANT_POOL
+  // ip contains prologue address
+  LoadConstantPoolPointerRegister(CAN_USE_IP, -prologue_offset);
+  set_ool_constant_pool_available(true);
+#endif
+}
+
+
+void MacroAssembler::EnterFrame(StackFrame::Type type,
+                                bool load_constant_pool_pointer_reg) {
+  if (FLAG_enable_ool_constant_pool && load_constant_pool_pointer_reg) {
+    PushFixedFrame();
+#if V8_OOL_CONSTANT_POOL
+    // This path should not rely on ip containing code entry.
+    LoadConstantPoolPointerRegister(CONSTRUCT_INTERNAL_REFERENCE);
+#endif
+    LoadSmiLiteral(ip, Smi::FromInt(type));
+    push(ip);
+  } else {
+    LoadSmiLiteral(ip, Smi::FromInt(type));
+    PushFixedFrame(ip);
+  }
+  // Adjust FP to point to saved FP.
+  addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
+
+  mov(r0, Operand(CodeObject()));
+  push(r0);
+}
+
+
+int MacroAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
+#if V8_OOL_CONSTANT_POOL
+  ConstantPoolUnavailableScope constant_pool_unavailable(this);
+#endif
+  // r3: preserved
+  // r4: preserved
+  // r5: preserved
+
+  // Drop the execution stack down to the frame pointer and restore
+  // the caller frame pointer, return address and constant pool pointer.
+  int frame_ends;
+  LoadP(r0, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
+  LoadP(ip, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
+#if V8_OOL_CONSTANT_POOL
+  const int exitOffset = ExitFrameConstants::kConstantPoolOffset;
+  const int standardOffset = StandardFrameConstants::kConstantPoolOffset;
+  const int offset = ((type == StackFrame::EXIT) ? exitOffset : standardOffset);
+  LoadP(kConstantPoolRegister, MemOperand(fp, offset));
+#endif
+  mtlr(r0);
+  frame_ends = pc_offset();
+  Add(sp, fp, StandardFrameConstants::kCallerSPOffset + stack_adjustment, r0);
+  mr(fp, ip);
+  return frame_ends;
+}
+
+
+// ExitFrame layout (probably wrongish.. needs updating)
+//
+//  SP -> previousSP
+//        LK reserved
+//        code
+//        sp_on_exit (for debug?)
+// oldSP->prev SP
+//        LK
+//        <parameters on stack>
+
+// Prior to calling EnterExitFrame, we've got a bunch of parameters
+// on the stack that we need to wrap a real frame around.. so first
+// we reserve a slot for LK and push the previous SP which is captured
+// in the fp register (r31)
+// Then - we buy a new frame
+
+void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
+  // Set up the frame structure on the stack.
+  DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
+  DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
+  DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
+  DCHECK(stack_space > 0);
+
+  // This is an opportunity to build a frame to wrap
+  // all of the pushes that have happened inside of V8
+  // since we were called from C code
+
+  // replicate ARM frame - TODO make this more closely follow PPC ABI
+  mflr(r0);
+  Push(r0, fp);
+  mr(fp, sp);
+  // Reserve room for saved entry sp and code object.
+  subi(sp, sp, Operand(ExitFrameConstants::kFrameSize));
+
+  if (emit_debug_code()) {
+    li(r8, Operand::Zero());
+    StoreP(r8, MemOperand(fp, ExitFrameConstants::kSPOffset));
+  }
+#if V8_OOL_CONSTANT_POOL
+  StoreP(kConstantPoolRegister,
+         MemOperand(fp, ExitFrameConstants::kConstantPoolOffset));
+#endif
+  mov(r8, Operand(CodeObject()));
+  StoreP(r8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
+
+  // Save the frame pointer and the context in top.
+  mov(r8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
+  StoreP(fp, MemOperand(r8));
+  mov(r8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
+  StoreP(cp, MemOperand(r8));
+
+  // Optionally save all volatile double registers.
+  if (save_doubles) {
+    SaveFPRegs(sp, 0, DoubleRegister::kNumVolatileRegisters);
+    // Note that d0 will be accessible at
+    //   fp - ExitFrameConstants::kFrameSize -
+    //   kNumVolatileRegisters * kDoubleSize,
+    // since the sp slot and code slot were pushed after the fp.
+  }
+
+  addi(sp, sp, Operand(-stack_space * kPointerSize));
+
+  // Allocate and align the frame preparing for calling the runtime
+  // function.
+  const int frame_alignment = ActivationFrameAlignment();
+  if (frame_alignment > kPointerSize) {
+    DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
+    ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment)));
+  }
+  li(r0, Operand::Zero());
+  StorePU(r0, MemOperand(sp, -kNumRequiredStackFrameSlots * kPointerSize));
+
+  // Set the exit frame sp value to point just before the return address
+  // location.
+  addi(r8, sp, Operand((kStackFrameExtraParamSlot + 1) * kPointerSize));
+  StoreP(r8, MemOperand(fp, ExitFrameConstants::kSPOffset));
+}
+
+
+void MacroAssembler::InitializeNewString(Register string, Register length,
+                                         Heap::RootListIndex map_index,
+                                         Register scratch1, Register scratch2) {
+  SmiTag(scratch1, length);
+  LoadRoot(scratch2, map_index);
+  StoreP(scratch1, FieldMemOperand(string, String::kLengthOffset), r0);
+  li(scratch1, Operand(String::kEmptyHashField));
+  StoreP(scratch2, FieldMemOperand(string, HeapObject::kMapOffset), r0);
+  StoreP(scratch1, FieldMemOperand(string, String::kHashFieldSlot), r0);
+}
+
+
+int MacroAssembler::ActivationFrameAlignment() {
+#if !defined(USE_SIMULATOR)
+  // Running on the real platform. Use the alignment as mandated by the local
+  // environment.
+  // Note: This will break if we ever start generating snapshots on one PPC
+  // platform for another PPC platform with a different alignment.
+  return base::OS::ActivationFrameAlignment();
+#else  // Simulated
+  // If we are using the simulator then we should always align to the expected
+  // alignment. As the simulator is used to generate snapshots we do not know
+  // if the target platform will need alignment, so this is controlled from a
+  // flag.
+  return FLAG_sim_stack_alignment;
+#endif
+}
+
+
+void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
+                                    bool restore_context) {
+#if V8_OOL_CONSTANT_POOL
+  ConstantPoolUnavailableScope constant_pool_unavailable(this);
+#endif
+  // Optionally restore all double registers.
+  if (save_doubles) {
+    // Calculate the stack location of the saved doubles and restore them.
+    const int kNumRegs = DoubleRegister::kNumVolatileRegisters;
+    const int offset =
+        (ExitFrameConstants::kFrameSize + kNumRegs * kDoubleSize);
+    addi(r6, fp, Operand(-offset));
+    RestoreFPRegs(r6, 0, kNumRegs);
+  }
+
+  // Clear top frame.
+  li(r6, Operand::Zero());
+  mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
+  StoreP(r6, MemOperand(ip));
+
+  // Restore current context from top and clear it in debug mode.
+  if (restore_context) {
+    mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
+    LoadP(cp, MemOperand(ip));
+  }
+#ifdef DEBUG
+  mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
+  StoreP(r6, MemOperand(ip));
+#endif
+
+  // Tear down the exit frame, pop the arguments, and return.
+  LeaveFrame(StackFrame::EXIT);
+
+  if (argument_count.is_valid()) {
+    ShiftLeftImm(argument_count, argument_count, Operand(kPointerSizeLog2));
+    add(sp, sp, argument_count);
+  }
+}
+
+
+void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) {
+  Move(dst, d1);
+}
+
+
+void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) {
+  Move(dst, d1);
+}
+
+
+void MacroAssembler::InvokePrologue(const ParameterCount& expected,
+                                    const ParameterCount& actual,
+                                    Handle<Code> code_constant,
+                                    Register code_reg, Label* done,
+                                    bool* definitely_mismatches,
+                                    InvokeFlag flag,
+                                    const CallWrapper& call_wrapper) {
+  bool definitely_matches = false;
+  *definitely_mismatches = false;
+  Label regular_invoke;
+
+  // Check whether the expected and actual arguments count match. If not,
+  // setup registers according to contract with ArgumentsAdaptorTrampoline:
+  //  r3: actual arguments count
+  //  r4: function (passed through to callee)
+  //  r5: expected arguments count
+
+  // The code below is made a lot easier because the calling code already sets
+  // up actual and expected registers according to the contract if values are
+  // passed in registers.
+
+  // ARM has some sanity checks as per below, considering add them for PPC
+  //  DCHECK(actual.is_immediate() || actual.reg().is(r3));
+  //  DCHECK(expected.is_immediate() || expected.reg().is(r5));
+  //  DCHECK((!code_constant.is_null() && code_reg.is(no_reg))
+  //          || code_reg.is(r6));
+
+  if (expected.is_immediate()) {
+    DCHECK(actual.is_immediate());
+    if (expected.immediate() == actual.immediate()) {
+      definitely_matches = true;
+    } else {
+      mov(r3, Operand(actual.immediate()));
+      const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
+      if (expected.immediate() == sentinel) {
+        // Don't worry about adapting arguments for builtins that
+        // don't want that done. Skip adaption code by making it look
+        // like we have a match between expected and actual number of
+        // arguments.
+        definitely_matches = true;
+      } else {
+        *definitely_mismatches = true;
+        mov(r5, Operand(expected.immediate()));
+      }
+    }
+  } else {
+    if (actual.is_immediate()) {
+      cmpi(expected.reg(), Operand(actual.immediate()));
+      beq(&regular_invoke);
+      mov(r3, Operand(actual.immediate()));
+    } else {
+      cmp(expected.reg(), actual.reg());
+      beq(&regular_invoke);
+    }
+  }
+
+  if (!definitely_matches) {
+    if (!code_constant.is_null()) {
+      mov(r6, Operand(code_constant));
+      addi(r6, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
+    }
+
+    Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
+    if (flag == CALL_FUNCTION) {
+      call_wrapper.BeforeCall(CallSize(adaptor));
+      Call(adaptor);
+      call_wrapper.AfterCall();
+      if (!*definitely_mismatches) {
+        b(done);
+      }
+    } else {
+      Jump(adaptor, RelocInfo::CODE_TARGET);
+    }
+    bind(&regular_invoke);
+  }
+}
+
+
+void MacroAssembler::InvokeCode(Register code, const ParameterCount& expected,
+                                const ParameterCount& actual, InvokeFlag flag,
+                                const CallWrapper& call_wrapper) {
+  // You can't call a function without a valid frame.
+  DCHECK(flag == JUMP_FUNCTION || has_frame());
+
+  Label done;
+  bool definitely_mismatches = false;
+  InvokePrologue(expected, actual, Handle<Code>::null(), code, &done,
+                 &definitely_mismatches, flag, call_wrapper);
+  if (!definitely_mismatches) {
+    if (flag == CALL_FUNCTION) {
+      call_wrapper.BeforeCall(CallSize(code));
+      CallJSEntry(code);
+      call_wrapper.AfterCall();
+    } else {
+      DCHECK(flag == JUMP_FUNCTION);
+      JumpToJSEntry(code);
+    }
+
+    // Continue here if InvokePrologue does handle the invocation due to
+    // mismatched parameter counts.
+    bind(&done);
+  }
+}
+
+
+void MacroAssembler::InvokeFunction(Register fun, const ParameterCount& actual,
+                                    InvokeFlag flag,
+                                    const CallWrapper& call_wrapper) {
+  // You can't call a function without a valid frame.
+  DCHECK(flag == JUMP_FUNCTION || has_frame());
+
+  // Contract with called JS functions requires that function is passed in r4.
+  DCHECK(fun.is(r4));
+
+  Register expected_reg = r5;
+  Register code_reg = ip;
+
+  LoadP(code_reg, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
+  LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
+  LoadWordArith(expected_reg,
+                FieldMemOperand(
+                    code_reg, SharedFunctionInfo::kFormalParameterCountOffset));
+#if !defined(V8_TARGET_ARCH_PPC64)
+  SmiUntag(expected_reg);
+#endif
+  LoadP(code_reg, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
+
+  ParameterCount expected(expected_reg);
+  InvokeCode(code_reg, expected, actual, flag, call_wrapper);
+}
+
+
+void MacroAssembler::InvokeFunction(Register function,
+                                    const ParameterCount& expected,
+                                    const ParameterCount& actual,
+                                    InvokeFlag flag,
+                                    const CallWrapper& call_wrapper) {
+  // You can't call a function without a valid frame.
+  DCHECK(flag == JUMP_FUNCTION || has_frame());
+
+  // Contract with called JS functions requires that function is passed in r4.
+  DCHECK(function.is(r4));
+
+  // Get the function and setup the context.
+  LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
+
+  // We call indirectly through the code field in the function to
+  // allow recompilation to take effect without changing any of the
+  // call sites.
+  LoadP(ip, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
+  InvokeCode(ip, expected, actual, flag, call_wrapper);
+}
+
+
+void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
+                                    const ParameterCount& expected,
+                                    const ParameterCount& actual,
+                                    InvokeFlag flag,
+                                    const CallWrapper& call_wrapper) {
+  Move(r4, function);
+  InvokeFunction(r4, expected, actual, flag, call_wrapper);
+}
+
+
+void MacroAssembler::IsObjectJSObjectType(Register heap_object, Register map,
+                                          Register scratch, Label* fail) {
+  LoadP(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
+  IsInstanceJSObjectType(map, scratch, fail);
+}
+
+
+void MacroAssembler::IsInstanceJSObjectType(Register map, Register scratch,
+                                            Label* fail) {
+  lbz(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
+  cmpi(scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
+  blt(fail);
+  cmpi(scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
+  bgt(fail);
+}
+
+
+void MacroAssembler::IsObjectJSStringType(Register object, Register scratch,
+                                          Label* fail) {
+  DCHECK(kNotStringTag != 0);
+
+  LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+  lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+  andi(r0, scratch, Operand(kIsNotStringMask));
+  bne(fail, cr0);
+}
+
+
+void MacroAssembler::IsObjectNameType(Register object, Register scratch,
+                                      Label* fail) {
+  LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+  lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+  cmpi(scratch, Operand(LAST_NAME_TYPE));
+  bgt(fail);
+}
+
+
+void MacroAssembler::DebugBreak() {
+  li(r3, Operand::Zero());
+  mov(r4, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
+  CEntryStub ces(isolate(), 1);
+  DCHECK(AllowThisStubCall(&ces));
+  Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
+}
+
+
+void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
+                                    int handler_index) {
+  // Adjust this code if not the case.
+  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
+
+  // For the JSEntry handler, we must preserve r1-r7, r0,r8-r15 are available.
+  // We want the stack to look like
+  // sp -> NextOffset
+  //       CodeObject
+  //       state
+  //       context
+  //       frame pointer
+
+  // Link the current handler as the next handler.
+  mov(r8, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
+  LoadP(r0, MemOperand(r8));
+  StorePU(r0, MemOperand(sp, -StackHandlerConstants::kSize));
+  // Set this new handler as the current one.
+  StoreP(sp, MemOperand(r8));
+
+  if (kind == StackHandler::JS_ENTRY) {
+    li(r8, Operand::Zero());  // NULL frame pointer.
+    StoreP(r8, MemOperand(sp, StackHandlerConstants::kFPOffset));
+    LoadSmiLiteral(r8, Smi::FromInt(0));  // Indicates no context.
+    StoreP(r8, MemOperand(sp, StackHandlerConstants::kContextOffset));
+  } else {
+    // still not sure if fp is right
+    StoreP(fp, MemOperand(sp, StackHandlerConstants::kFPOffset));
+    StoreP(cp, MemOperand(sp, StackHandlerConstants::kContextOffset));
+  }
+  unsigned state = StackHandler::IndexField::encode(handler_index) |
+                   StackHandler::KindField::encode(kind);
+  LoadIntLiteral(r8, state);
+  StoreP(r8, MemOperand(sp, StackHandlerConstants::kStateOffset));
+  mov(r8, Operand(CodeObject()));
+  StoreP(r8, MemOperand(sp, StackHandlerConstants::kCodeOffset));
+}
+
+
+void MacroAssembler::PopTryHandler() {
+  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+  pop(r4);
+  mov(ip, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
+  addi(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
+  StoreP(r4, MemOperand(ip));
+}
+
+
+// PPC - make use of ip as a temporary register
+void MacroAssembler::JumpToHandlerEntry() {
+// Compute the handler entry address and jump to it.  The handler table is
+// a fixed array of (smi-tagged) code offsets.
+// r3 = exception, r4 = code object, r5 = state.
+#if V8_OOL_CONSTANT_POOL
+  ConstantPoolUnavailableScope constant_pool_unavailable(this);
+  LoadP(kConstantPoolRegister, FieldMemOperand(r4, Code::kConstantPoolOffset));
+#endif
+  LoadP(r6, FieldMemOperand(r4, Code::kHandlerTableOffset));  // Handler table.
+  addi(r6, r6, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
+  srwi(r5, r5, Operand(StackHandler::kKindWidth));  // Handler index.
+  slwi(ip, r5, Operand(kPointerSizeLog2));
+  add(ip, r6, ip);
+  LoadP(r5, MemOperand(ip));  // Smi-tagged offset.
+  addi(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));  // Code start.
+  SmiUntag(ip, r5);
+  add(r0, r4, ip);
+  mtctr(r0);
+  bctr();
+}
+
+
+void MacroAssembler::Throw(Register value) {
+  // Adjust this code if not the case.
+  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
+  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
+  Label skip;
+
+  // The exception is expected in r3.
+  if (!value.is(r3)) {
+    mr(r3, value);
+  }
+  // Drop the stack pointer to the top of the top handler.
+  mov(r6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
+  LoadP(sp, MemOperand(r6));
+  // Restore the next handler.
+  pop(r5);
+  StoreP(r5, MemOperand(r6));
+
+  // Get the code object (r4) and state (r5).  Restore the context and frame
+  // pointer.
+  pop(r4);
+  pop(r5);
+  pop(cp);
+  pop(fp);
+
+  // If the handler is a JS frame, restore the context to the frame.
+  // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
+  // or cp.
+  cmpi(cp, Operand::Zero());
+  beq(&skip);
+  StoreP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
+  bind(&skip);
+
+  JumpToHandlerEntry();
+}
+
+
+void MacroAssembler::ThrowUncatchable(Register value) {
+  // Adjust this code if not the case.
+  STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
+  STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
+
+  // The exception is expected in r3.
+  if (!value.is(r3)) {
+    mr(r3, value);
+  }
+  // Drop the stack pointer to the top of the top stack handler.
+  mov(r6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
+  LoadP(sp, MemOperand(r6));
+
+  // Unwind the handlers until the ENTRY handler is found.
+  Label fetch_next, check_kind;
+  b(&check_kind);
+  bind(&fetch_next);
+  LoadP(sp, MemOperand(sp, StackHandlerConstants::kNextOffset));
+
+  bind(&check_kind);
+  STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
+  LoadP(r5, MemOperand(sp, StackHandlerConstants::kStateOffset));
+  andi(r0, r5, Operand(StackHandler::KindField::kMask));
+  bne(&fetch_next, cr0);
+
+  // Set the top handler address to next handler past the top ENTRY handler.
+  pop(r5);
+  StoreP(r5, MemOperand(r6));
+  // Get the code object (r4) and state (r5).  Clear the context and frame
+  // pointer (0 was saved in the handler).
+  pop(r4);
+  pop(r5);
+  pop(cp);
+  pop(fp);
+
+  JumpToHandlerEntry();
+}
+
+
+void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
+                                            Register scratch, Label* miss) {
+  Label same_contexts;
+
+  DCHECK(!holder_reg.is(scratch));
+  DCHECK(!holder_reg.is(ip));
+  DCHECK(!scratch.is(ip));
+
+  // Load current lexical context from the stack frame.
+  LoadP(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
+// In debug mode, make sure the lexical context is set.
+#ifdef DEBUG
+  cmpi(scratch, Operand::Zero());
+  Check(ne, kWeShouldNotHaveAnEmptyLexicalContext);
+#endif
+
+  // Load the native context of the current context.
+  int offset =
+      Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
+  LoadP(scratch, FieldMemOperand(scratch, offset));
+  LoadP(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
+
+  // Check the context is a native context.
+  if (emit_debug_code()) {
+    // Cannot use ip as a temporary in this verification code. Due to the fact
+    // that ip is clobbered as part of cmp with an object Operand.
+    push(holder_reg);  // Temporarily save holder on the stack.
+    // Read the first word and compare to the native_context_map.
+    LoadP(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
+    LoadRoot(ip, Heap::kNativeContextMapRootIndex);
+    cmp(holder_reg, ip);
+    Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
+    pop(holder_reg);  // Restore holder.
+  }
+
+  // Check if both contexts are the same.
+  LoadP(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
+  cmp(scratch, ip);
+  beq(&same_contexts);
+
+  // Check the context is a native context.
+  if (emit_debug_code()) {
+    // Cannot use ip as a temporary in this verification code. Due to the fact
+    // that ip is clobbered as part of cmp with an object Operand.
+    push(holder_reg);    // Temporarily save holder on the stack.
+    mr(holder_reg, ip);  // Move ip to its holding place.
+    LoadRoot(ip, Heap::kNullValueRootIndex);
+    cmp(holder_reg, ip);
+    Check(ne, kJSGlobalProxyContextShouldNotBeNull);
+
+    LoadP(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
+    LoadRoot(ip, Heap::kNativeContextMapRootIndex);
+    cmp(holder_reg, ip);
+    Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
+    // Restore ip is not needed. ip is reloaded below.
+    pop(holder_reg);  // Restore holder.
+    // Restore ip to holder's context.
+    LoadP(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
+  }
+
+  // Check that the security token in the calling global object is
+  // compatible with the security token in the receiving global
+  // object.
+  int token_offset =
+      Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
+
+  LoadP(scratch, FieldMemOperand(scratch, token_offset));
+  LoadP(ip, FieldMemOperand(ip, token_offset));
+  cmp(scratch, ip);
+  bne(miss);
+
+  bind(&same_contexts);
+}
+
+
+// Compute the hash code from the untagged key.  This must be kept in sync with
+// ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
+// code-stub-hydrogen.cc
+void MacroAssembler::GetNumberHash(Register t0, Register scratch) {
+  // First of all we assign the hash seed to scratch.
+  LoadRoot(scratch, Heap::kHashSeedRootIndex);
+  SmiUntag(scratch);
+
+  // Xor original key with a seed.
+  xor_(t0, t0, scratch);
+
+  // Compute the hash code from the untagged key.  This must be kept in sync
+  // with ComputeIntegerHash in utils.h.
+  //
+  // hash = ~hash + (hash << 15);
+  notx(scratch, t0);
+  slwi(t0, t0, Operand(15));
+  add(t0, scratch, t0);
+  // hash = hash ^ (hash >> 12);
+  srwi(scratch, t0, Operand(12));
+  xor_(t0, t0, scratch);
+  // hash = hash + (hash << 2);
+  slwi(scratch, t0, Operand(2));
+  add(t0, t0, scratch);
+  // hash = hash ^ (hash >> 4);
+  srwi(scratch, t0, Operand(4));
+  xor_(t0, t0, scratch);
+  // hash = hash * 2057;
+  mr(r0, t0);
+  slwi(scratch, t0, Operand(3));
+  add(t0, t0, scratch);
+  slwi(scratch, r0, Operand(11));
+  add(t0, t0, scratch);
+  // hash = hash ^ (hash >> 16);
+  srwi(scratch, t0, Operand(16));
+  xor_(t0, t0, scratch);
+}
+
+
+void MacroAssembler::LoadFromNumberDictionary(Label* miss, Register elements,
+                                              Register key, Register result,
+                                              Register t0, Register t1,
+                                              Register t2) {
+  // Register use:
+  //
+  // elements - holds the slow-case elements of the receiver on entry.
+  //            Unchanged unless 'result' is the same register.
+  //
+  // key      - holds the smi key on entry.
+  //            Unchanged unless 'result' is the same register.
+  //
+  // result   - holds the result on exit if the load succeeded.
+  //            Allowed to be the same as 'key' or 'result'.
+  //            Unchanged on bailout so 'key' or 'result' can be used
+  //            in further computation.
+  //
+  // Scratch registers:
+  //
+  // t0 - holds the untagged key on entry and holds the hash once computed.
+  //
+  // t1 - used to hold the capacity mask of the dictionary
+  //
+  // t2 - used for the index into the dictionary.
+  Label done;
+
+  GetNumberHash(t0, t1);
+
+  // Compute the capacity mask.
+  LoadP(t1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
+  SmiUntag(t1);
+  subi(t1, t1, Operand(1));
+
+  // Generate an unrolled loop that performs a few probes before giving up.
+  for (int i = 0; i < kNumberDictionaryProbes; i++) {
+    // Use t2 for index calculations and keep the hash intact in t0.
+    mr(t2, t0);
+    // Compute the masked index: (hash + i + i * i) & mask.
+    if (i > 0) {
+      addi(t2, t2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
+    }
+    and_(t2, t2, t1);
+
+    // Scale the index by multiplying by the element size.
+    DCHECK(SeededNumberDictionary::kEntrySize == 3);
+    slwi(ip, t2, Operand(1));
+    add(t2, t2, ip);  // t2 = t2 * 3
+
+    // Check if the key is identical to the name.
+    slwi(t2, t2, Operand(kPointerSizeLog2));
+    add(t2, elements, t2);
+    LoadP(ip,
+          FieldMemOperand(t2, SeededNumberDictionary::kElementsStartOffset));
+    cmp(key, ip);
+    if (i != kNumberDictionaryProbes - 1) {
+      beq(&done);
+    } else {
+      bne(miss);
+    }
+  }
+
+  bind(&done);
+  // Check that the value is a field property.
+  // t2: elements + (index * kPointerSize)
+  const int kDetailsOffset =
+      SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
+  LoadP(t1, FieldMemOperand(t2, kDetailsOffset));
+  LoadSmiLiteral(ip, Smi::FromInt(PropertyDetails::TypeField::kMask));
+  DCHECK_EQ(FIELD, 0);
+  and_(r0, t1, ip, SetRC);
+  bne(miss, cr0);
+
+  // Get the value at the masked, scaled index and return.
+  const int kValueOffset =
+      SeededNumberDictionary::kElementsStartOffset + kPointerSize;
+  LoadP(result, FieldMemOperand(t2, kValueOffset));
+}
+
+
+void MacroAssembler::Allocate(int object_size, Register result,
+                              Register scratch1, Register scratch2,
+                              Label* gc_required, AllocationFlags flags) {
+  DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
+  if (!FLAG_inline_new) {
+    if (emit_debug_code()) {
+      // Trash the registers to simulate an allocation failure.
+      li(result, Operand(0x7091));
+      li(scratch1, Operand(0x7191));
+      li(scratch2, Operand(0x7291));
+    }
+    b(gc_required);
+    return;
+  }
+
+  DCHECK(!result.is(scratch1));
+  DCHECK(!result.is(scratch2));
+  DCHECK(!scratch1.is(scratch2));
+  DCHECK(!scratch1.is(ip));
+  DCHECK(!scratch2.is(ip));
+
+  // Make object size into bytes.
+  if ((flags & SIZE_IN_WORDS) != 0) {
+    object_size *= kPointerSize;
+  }
+  DCHECK_EQ(0, static_cast<int>(object_size & kObjectAlignmentMask));
+
+  // Check relative positions of allocation top and limit addresses.
+  ExternalReference allocation_top =
+      AllocationUtils::GetAllocationTopReference(isolate(), flags);
+  ExternalReference allocation_limit =
+      AllocationUtils::GetAllocationLimitReference(isolate(), flags);
+
+  intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
+  intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
+  DCHECK((limit - top) == kPointerSize);
+
+  // Set up allocation top address register.
+  Register topaddr = scratch1;
+  mov(topaddr, Operand(allocation_top));
+
+  // This code stores a temporary value in ip. This is OK, as the code below
+  // does not need ip for implicit literal generation.
+  if ((flags & RESULT_CONTAINS_TOP) == 0) {
+    // Load allocation top into result and allocation limit into ip.
+    LoadP(result, MemOperand(topaddr));
+    LoadP(ip, MemOperand(topaddr, kPointerSize));
+  } else {
+    if (emit_debug_code()) {
+      // Assert that result actually contains top on entry. ip is used
+      // immediately below so this use of ip does not cause difference with
+      // respect to register content between debug and release mode.
+      LoadP(ip, MemOperand(topaddr));
+      cmp(result, ip);
+      Check(eq, kUnexpectedAllocationTop);
+    }
+    // Load allocation limit into ip. Result already contains allocation top.
+    LoadP(ip, MemOperand(topaddr, limit - top), r0);
+  }
+
+  if ((flags & DOUBLE_ALIGNMENT) != 0) {
+    // Align the next allocation. Storing the filler map without checking top is
+    // safe in new-space because the limit of the heap is aligned there.
+    DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
+#if V8_TARGET_ARCH_PPC64
+    STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
+#else
+    STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
+    andi(scratch2, result, Operand(kDoubleAlignmentMask));
+    Label aligned;
+    beq(&aligned, cr0);
+    if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
+      cmpl(result, ip);
+      bge(gc_required);
+    }
+    mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
+    stw(scratch2, MemOperand(result));
+    addi(result, result, Operand(kDoubleSize / 2));
+    bind(&aligned);
+#endif
+  }
+
+  // Calculate new top and bail out if new space is exhausted. Use result
+  // to calculate the new top.
+  sub(r0, ip, result);
+  if (is_int16(object_size)) {
+    cmpi(r0, Operand(object_size));
+    blt(gc_required);
+    addi(scratch2, result, Operand(object_size));
+  } else {
+    Cmpi(r0, Operand(object_size), scratch2);
+    blt(gc_required);
+    add(scratch2, result, scratch2);
+  }
+  StoreP(scratch2, MemOperand(topaddr));
+
+  // Tag object if requested.
+  if ((flags & TAG_OBJECT) != 0) {
+    addi(result, result, Operand(kHeapObjectTag));
+  }
+}
+
+
+void MacroAssembler::Allocate(Register object_size, Register result,
+                              Register scratch1, Register scratch2,
+                              Label* gc_required, AllocationFlags flags) {
+  if (!FLAG_inline_new) {
+    if (emit_debug_code()) {
+      // Trash the registers to simulate an allocation failure.
+      li(result, Operand(0x7091));
+      li(scratch1, Operand(0x7191));
+      li(scratch2, Operand(0x7291));
+    }
+    b(gc_required);
+    return;
+  }
+
+  // Assert that the register arguments are different and that none of
+  // them are ip. ip is used explicitly in the code generated below.
+  DCHECK(!result.is(scratch1));
+  DCHECK(!result.is(scratch2));
+  DCHECK(!scratch1.is(scratch2));
+  DCHECK(!object_size.is(ip));
+  DCHECK(!result.is(ip));
+  DCHECK(!scratch1.is(ip));
+  DCHECK(!scratch2.is(ip));
+
+  // Check relative positions of allocation top and limit addresses.
+  ExternalReference allocation_top =
+      AllocationUtils::GetAllocationTopReference(isolate(), flags);
+  ExternalReference allocation_limit =
+      AllocationUtils::GetAllocationLimitReference(isolate(), flags);
+  intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
+  intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
+  DCHECK((limit - top) == kPointerSize);
+
+  // Set up allocation top address.
+  Register topaddr = scratch1;
+  mov(topaddr, Operand(allocation_top));
+
+  // This code stores a temporary value in ip. This is OK, as the code below
+  // does not need ip for implicit literal generation.
+  if ((flags & RESULT_CONTAINS_TOP) == 0) {
+    // Load allocation top into result and allocation limit into ip.
+    LoadP(result, MemOperand(topaddr));
+    LoadP(ip, MemOperand(topaddr, kPointerSize));
+  } else {
+    if (emit_debug_code()) {
+      // Assert that result actually contains top on entry. ip is used
+      // immediately below so this use of ip does not cause difference with
+      // respect to register content between debug and release mode.
+      LoadP(ip, MemOperand(topaddr));
+      cmp(result, ip);
+      Check(eq, kUnexpectedAllocationTop);
+    }
+    // Load allocation limit into ip. Result already contains allocation top.
+    LoadP(ip, MemOperand(topaddr, limit - top));
+  }
+
+  if ((flags & DOUBLE_ALIGNMENT) != 0) {
+    // Align the next allocation. Storing the filler map without checking top is
+    // safe in new-space because the limit of the heap is aligned there.
+    DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
+#if V8_TARGET_ARCH_PPC64
+    STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
+#else
+    STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
+    andi(scratch2, result, Operand(kDoubleAlignmentMask));
+    Label aligned;
+    beq(&aligned, cr0);
+    if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
+      cmpl(result, ip);
+      bge(gc_required);
+    }
+    mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
+    stw(scratch2, MemOperand(result));
+    addi(result, result, Operand(kDoubleSize / 2));
+    bind(&aligned);
+#endif
+  }
+
+  // Calculate new top and bail out if new space is exhausted. Use result
+  // to calculate the new top. Object size may be in words so a shift is
+  // required to get the number of bytes.
+  sub(r0, ip, result);
+  if ((flags & SIZE_IN_WORDS) != 0) {
+    ShiftLeftImm(scratch2, object_size, Operand(kPointerSizeLog2));
+    cmp(r0, scratch2);
+    blt(gc_required);
+    add(scratch2, result, scratch2);
+  } else {
+    cmp(r0, object_size);
+    blt(gc_required);
+    add(scratch2, result, object_size);
+  }
+
+  // Update allocation top. result temporarily holds the new top.
+  if (emit_debug_code()) {
+    andi(r0, scratch2, Operand(kObjectAlignmentMask));
+    Check(eq, kUnalignedAllocationInNewSpace, cr0);
+  }
+  StoreP(scratch2, MemOperand(topaddr));
+
+  // Tag object if requested.
+  if ((flags & TAG_OBJECT) != 0) {
+    addi(result, result, Operand(kHeapObjectTag));
+  }
+}
+
+
+void MacroAssembler::UndoAllocationInNewSpace(Register object,
+                                              Register scratch) {
+  ExternalReference new_space_allocation_top =
+      ExternalReference::new_space_allocation_top_address(isolate());
+
+  // Make sure the object has no tag before resetting top.
+  mov(r0, Operand(~kHeapObjectTagMask));
+  and_(object, object, r0);
+// was.. and_(object, object, Operand(~kHeapObjectTagMask));
+#ifdef DEBUG
+  // Check that the object un-allocated is below the current top.
+  mov(scratch, Operand(new_space_allocation_top));
+  LoadP(scratch, MemOperand(scratch));
+  cmp(object, scratch);
+  Check(lt, kUndoAllocationOfNonAllocatedMemory);
+#endif
+  // Write the address of the object to un-allocate as the current top.
+  mov(scratch, Operand(new_space_allocation_top));
+  StoreP(object, MemOperand(scratch));
+}
+
+
+void MacroAssembler::AllocateTwoByteString(Register result, Register length,
+                                           Register scratch1, Register scratch2,
+                                           Register scratch3,
+                                           Label* gc_required) {
+  // Calculate the number of bytes needed for the characters in the string while
+  // observing object alignment.
+  DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+  slwi(scratch1, length, Operand(1));  // Length in bytes, not chars.
+  addi(scratch1, scratch1,
+       Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize));
+  mov(r0, Operand(~kObjectAlignmentMask));
+  and_(scratch1, scratch1, r0);
+
+  // Allocate two-byte string in new space.
+  Allocate(scratch1, result, scratch2, scratch3, gc_required, TAG_OBJECT);
+
+  // Set the map, length and hash field.
+  InitializeNewString(result, length, Heap::kStringMapRootIndex, scratch1,
+                      scratch2);
+}
+
+
+void MacroAssembler::AllocateOneByteString(Register result, Register length,
+                                           Register scratch1, Register scratch2,
+                                           Register scratch3,
+                                           Label* gc_required) {
+  // Calculate the number of bytes needed for the characters in the string while
+  // observing object alignment.
+  DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
+  DCHECK(kCharSize == 1);
+  addi(scratch1, length,
+       Operand(kObjectAlignmentMask + SeqOneByteString::kHeaderSize));
+  li(r0, Operand(~kObjectAlignmentMask));
+  and_(scratch1, scratch1, r0);
+
+  // Allocate one-byte string in new space.
+  Allocate(scratch1, result, scratch2, scratch3, gc_required, TAG_OBJECT);
+
+  // Set the map, length and hash field.
+  InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
+                      scratch1, scratch2);
+}
+
+
+void MacroAssembler::AllocateTwoByteConsString(Register result, Register length,
+                                               Register scratch1,
+                                               Register scratch2,
+                                               Label* gc_required) {
+  Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
+           TAG_OBJECT);
+
+  InitializeNewString(result, length, Heap::kConsStringMapRootIndex, scratch1,
+                      scratch2);
+}
+
+
+void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
+                                               Register scratch1,
+                                               Register scratch2,
+                                               Label* gc_required) {
+  Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
+           TAG_OBJECT);
+
+  InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
+                      scratch1, scratch2);
+}
+
+
+void MacroAssembler::AllocateTwoByteSlicedString(Register result,
+                                                 Register length,
+                                                 Register scratch1,
+                                                 Register scratch2,
+                                                 Label* gc_required) {
+  Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
+           TAG_OBJECT);
+
+  InitializeNewString(result, length, Heap::kSlicedStringMapRootIndex, scratch1,
+                      scratch2);
+}
+
+
+void MacroAssembler::AllocateOneByteSlicedString(Register result,
+                                                 Register length,
+                                                 Register scratch1,
+                                                 Register scratch2,
+                                                 Label* gc_required) {
+  Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
+           TAG_OBJECT);
+
+  InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
+                      scratch1, scratch2);
+}
+
+
+void MacroAssembler::CompareObjectType(Register object, Register map,
+                                       Register type_reg, InstanceType type) {
+  const Register temp = type_reg.is(no_reg) ? r0 : type_reg;
+
+  LoadP(map, FieldMemOperand(object, HeapObject::kMapOffset));
+  CompareInstanceType(map, temp, type);
+}
+
+
+void MacroAssembler::CheckObjectTypeRange(Register object, Register map,
+                                          InstanceType min_type,
+                                          InstanceType max_type,
+                                          Label* false_label) {
+  STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
+  STATIC_ASSERT(LAST_TYPE < 256);
+  LoadP(map, FieldMemOperand(object, HeapObject::kMapOffset));
+  lbz(ip, FieldMemOperand(map, Map::kInstanceTypeOffset));
+  subi(ip, ip, Operand(min_type));
+  cmpli(ip, Operand(max_type - min_type));
+  bgt(false_label);
+}
+
+
+void MacroAssembler::CompareInstanceType(Register map, Register type_reg,
+                                         InstanceType type) {
+  STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
+  STATIC_ASSERT(LAST_TYPE < 256);
+  lbz(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
+  cmpi(type_reg, Operand(type));
+}
+
+
+void MacroAssembler::CompareRoot(Register obj, Heap::RootListIndex index) {
+  DCHECK(!obj.is(r0));
+  LoadRoot(r0, index);
+  cmp(obj, r0);
+}
+
+
+void MacroAssembler::CheckFastElements(Register map, Register scratch,
+                                       Label* fail) {
+  STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+  STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+  STATIC_ASSERT(FAST_ELEMENTS == 2);
+  STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+  lbz(scratch, FieldMemOperand(map, Map::kBitField2Offset));
+  STATIC_ASSERT(Map::kMaximumBitField2FastHoleyElementValue < 0x8000);
+  cmpli(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
+  bgt(fail);
+}
+
+
+void MacroAssembler::CheckFastObjectElements(Register map, Register scratch,
+                                             Label* fail) {
+  STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+  STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+  STATIC_ASSERT(FAST_ELEMENTS == 2);
+  STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
+  lbz(scratch, FieldMemOperand(map, Map::kBitField2Offset));
+  cmpli(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
+  ble(fail);
+  cmpli(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
+  bgt(fail);
+}
+
+
+void MacroAssembler::CheckFastSmiElements(Register map, Register scratch,
+                                          Label* fail) {
+  STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
+  STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
+  lbz(scratch, FieldMemOperand(map, Map::kBitField2Offset));
+  cmpli(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
+  bgt(fail);
+}
+
+
+void MacroAssembler::StoreNumberToDoubleElements(
+    Register value_reg, Register key_reg, Register elements_reg,
+    Register scratch1, DoubleRegister double_scratch, Label* fail,
+    int elements_offset) {
+  Label smi_value, store;
+
+  // Handle smi values specially.
+  JumpIfSmi(value_reg, &smi_value);
+
+  // Ensure that the object is a heap number
+  CheckMap(value_reg, scratch1, isolate()->factory()->heap_number_map(), fail,
+           DONT_DO_SMI_CHECK);
+
+  lfd(double_scratch, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
+  // Force a canonical NaN.
+  CanonicalizeNaN(double_scratch);
+  b(&store);
+
+  bind(&smi_value);
+  SmiToDouble(double_scratch, value_reg);
+
+  bind(&store);
+  SmiToDoubleArrayOffset(scratch1, key_reg);
+  add(scratch1, elements_reg, scratch1);
+  stfd(double_scratch, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize -
+                                                     elements_offset));
+}
+
+
+void MacroAssembler::AddAndCheckForOverflow(Register dst, Register left,
+                                            Register right,
+                                            Register overflow_dst,
+                                            Register scratch) {
+  DCHECK(!dst.is(overflow_dst));
+  DCHECK(!dst.is(scratch));
+  DCHECK(!overflow_dst.is(scratch));
+  DCHECK(!overflow_dst.is(left));
+  DCHECK(!overflow_dst.is(right));
+
+  // C = A+B; C overflows if A/B have same sign and C has diff sign than A
+  if (dst.is(left)) {
+    mr(scratch, left);            // Preserve left.
+    add(dst, left, right);        // Left is overwritten.
+    xor_(scratch, dst, scratch);  // Original left.
+    xor_(overflow_dst, dst, right);
+  } else if (dst.is(right)) {
+    mr(scratch, right);           // Preserve right.
+    add(dst, left, right);        // Right is overwritten.
+    xor_(scratch, dst, scratch);  // Original right.
+    xor_(overflow_dst, dst, left);
+  } else {
+    add(dst, left, right);
+    xor_(overflow_dst, dst, left);
+    xor_(scratch, dst, right);
+  }
+  and_(overflow_dst, scratch, overflow_dst, SetRC);
+}
+
+
+void MacroAssembler::AddAndCheckForOverflow(Register dst, Register left,
+                                            intptr_t right,
+                                            Register overflow_dst,
+                                            Register scratch) {
+  Register original_left = left;
+  DCHECK(!dst.is(overflow_dst));
+  DCHECK(!dst.is(scratch));
+  DCHECK(!overflow_dst.is(scratch));
+  DCHECK(!overflow_dst.is(left));
+
+  // C = A+B; C overflows if A/B have same sign and C has diff sign than A
+  if (dst.is(left)) {
+    // Preserve left.
+    original_left = overflow_dst;
+    mr(original_left, left);
+  }
+  Add(dst, left, right, scratch);
+  xor_(overflow_dst, dst, original_left);
+  if (right >= 0) {
+    and_(overflow_dst, overflow_dst, dst, SetRC);
+  } else {
+    andc(overflow_dst, overflow_dst, dst, SetRC);
+  }
+}
+
+
+void MacroAssembler::SubAndCheckForOverflow(Register dst, Register left,
+                                            Register right,
+                                            Register overflow_dst,
+                                            Register scratch) {
+  DCHECK(!dst.is(overflow_dst));
+  DCHECK(!dst.is(scratch));
+  DCHECK(!overflow_dst.is(scratch));
+  DCHECK(!overflow_dst.is(left));
+  DCHECK(!overflow_dst.is(right));
+
+  // C = A-B; C overflows if A/B have diff signs and C has diff sign than A
+  if (dst.is(left)) {
+    mr(scratch, left);      // Preserve left.
+    sub(dst, left, right);  // Left is overwritten.
+    xor_(overflow_dst, dst, scratch);
+    xor_(scratch, scratch, right);
+    and_(overflow_dst, overflow_dst, scratch, SetRC);
+  } else if (dst.is(right)) {
+    mr(scratch, right);     // Preserve right.
+    sub(dst, left, right);  // Right is overwritten.
+    xor_(overflow_dst, dst, left);
+    xor_(scratch, left, scratch);
+    and_(overflow_dst, overflow_dst, scratch, SetRC);
+  } else {
+    sub(dst, left, right);
+    xor_(overflow_dst, dst, left);
+    xor_(scratch, left, right);
+    and_(overflow_dst, scratch, overflow_dst, SetRC);
+  }
+}
+
+
+void MacroAssembler::CompareMap(Register obj, Register scratch, Handle<Map> map,
+                                Label* early_success) {
+  LoadP(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
+  CompareMap(scratch, map, early_success);
+}
+
+
+void MacroAssembler::CompareMap(Register obj_map, Handle<Map> map,
+                                Label* early_success) {
+  mov(r0, Operand(map));
+  cmp(obj_map, r0);
+}
+
+
+void MacroAssembler::CheckMap(Register obj, Register scratch, Handle<Map> map,
+                              Label* fail, SmiCheckType smi_check_type) {
+  if (smi_check_type == DO_SMI_CHECK) {
+    JumpIfSmi(obj, fail);
+  }
+
+  Label success;
+  CompareMap(obj, scratch, map, &success);
+  bne(fail);
+  bind(&success);
+}
+
+
+void MacroAssembler::CheckMap(Register obj, Register scratch,
+                              Heap::RootListIndex index, Label* fail,
+                              SmiCheckType smi_check_type) {
+  if (smi_check_type == DO_SMI_CHECK) {
+    JumpIfSmi(obj, fail);
+  }
+  LoadP(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
+  LoadRoot(r0, index);
+  cmp(scratch, r0);
+  bne(fail);
+}
+
+
+void MacroAssembler::DispatchMap(Register obj, Register scratch,
+                                 Handle<Map> map, Handle<Code> success,
+                                 SmiCheckType smi_check_type) {
+  Label fail;
+  if (smi_check_type == DO_SMI_CHECK) {
+    JumpIfSmi(obj, &fail);
+  }
+  LoadP(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
+  mov(r0, Operand(map));
+  cmp(scratch, r0);
+  bne(&fail);
+  Jump(success, RelocInfo::CODE_TARGET, al);
+  bind(&fail);
+}
+
+
+void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
+                                             Register scratch, Label* miss,
+                                             bool miss_on_bound_function) {
+  Label non_instance;
+  if (miss_on_bound_function) {
+    // Check that the receiver isn't a smi.
+    JumpIfSmi(function, miss);
+
+    // Check that the function really is a function.  Load map into result reg.
+    CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
+    bne(miss);
+
+    LoadP(scratch,
+          FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
+    lwz(scratch,
+        FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
+    TestBit(scratch,
+#if V8_TARGET_ARCH_PPC64
+            SharedFunctionInfo::kBoundFunction,
+#else
+            SharedFunctionInfo::kBoundFunction + kSmiTagSize,
+#endif
+            r0);
+    bne(miss, cr0);
+
+    // Make sure that the function has an instance prototype.
+    lbz(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
+    andi(r0, scratch, Operand(1 << Map::kHasNonInstancePrototype));
+    bne(&non_instance, cr0);
+  }
+
+  // Get the prototype or initial map from the function.
+  LoadP(result,
+        FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+
+  // If the prototype or initial map is the hole, don't return it and
+  // simply miss the cache instead. This will allow us to allocate a
+  // prototype object on-demand in the runtime system.
+  LoadRoot(r0, Heap::kTheHoleValueRootIndex);
+  cmp(result, r0);
+  beq(miss);
+
+  // If the function does not have an initial map, we're done.
+  Label done;
+  CompareObjectType(result, scratch, scratch, MAP_TYPE);
+  bne(&done);
+
+  // Get the prototype from the initial map.
+  LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset));
+
+  if (miss_on_bound_function) {
+    b(&done);
+
+    // Non-instance prototype: Fetch prototype from constructor field
+    // in initial map.
+    bind(&non_instance);
+    LoadP(result, FieldMemOperand(result, Map::kConstructorOffset));
+  }
+
+  // All done.
+  bind(&done);
+}
+
+
+void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id,
+                              Condition cond) {
+  DCHECK(AllowThisStubCall(stub));  // Stub calls are not allowed in some stubs.
+  Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond);
+}
+
+
+void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
+  Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
+}
+
+
+static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
+  return ref0.address() - ref1.address();
+}
+
+
+void MacroAssembler::CallApiFunctionAndReturn(
+    Register function_address, ExternalReference thunk_ref, int stack_space,
+    MemOperand return_value_operand, MemOperand* context_restore_operand) {
+  ExternalReference next_address =
+      ExternalReference::handle_scope_next_address(isolate());
+  const int kNextOffset = 0;
+  const int kLimitOffset = AddressOffset(
+      ExternalReference::handle_scope_limit_address(isolate()), next_address);
+  const int kLevelOffset = AddressOffset(
+      ExternalReference::handle_scope_level_address(isolate()), next_address);
+
+  DCHECK(function_address.is(r4) || function_address.is(r5));
+  Register scratch = r6;
+
+  Label profiler_disabled;
+  Label end_profiler_check;
+  mov(scratch, Operand(ExternalReference::is_profiling_address(isolate())));
+  lbz(scratch, MemOperand(scratch, 0));
+  cmpi(scratch, Operand::Zero());
+  beq(&profiler_disabled);
+
+  // Additional parameter is the address of the actual callback.
+  mov(scratch, Operand(thunk_ref));
+  jmp(&end_profiler_check);
+
+  bind(&profiler_disabled);
+  mr(scratch, function_address);
+  bind(&end_profiler_check);
+
+  // Allocate HandleScope in callee-save registers.
+  // r17 - next_address
+  // r14 - next_address->kNextOffset
+  // r15 - next_address->kLimitOffset
+  // r16 - next_address->kLevelOffset
+  mov(r17, Operand(next_address));
+  LoadP(r14, MemOperand(r17, kNextOffset));
+  LoadP(r15, MemOperand(r17, kLimitOffset));
+  lwz(r16, MemOperand(r17, kLevelOffset));
+  addi(r16, r16, Operand(1));
+  stw(r16, MemOperand(r17, kLevelOffset));
+
+  if (FLAG_log_timer_events) {
+    FrameScope frame(this, StackFrame::MANUAL);
+    PushSafepointRegisters();
+    PrepareCallCFunction(1, r3);
+    mov(r3, Operand(ExternalReference::isolate_address(isolate())));
+    CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
+    PopSafepointRegisters();
+  }
+
+  // Native call returns to the DirectCEntry stub which redirects to the
+  // return address pushed on stack (could have moved after GC).
+  // DirectCEntry stub itself is generated early and never moves.
+  DirectCEntryStub stub(isolate());
+  stub.GenerateCall(this, scratch);
+
+  if (FLAG_log_timer_events) {
+    FrameScope frame(this, StackFrame::MANUAL);
+    PushSafepointRegisters();
+    PrepareCallCFunction(1, r3);
+    mov(r3, Operand(ExternalReference::isolate_address(isolate())));
+    CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
+    PopSafepointRegisters();
+  }
+
+  Label promote_scheduled_exception;
+  Label exception_handled;
+  Label delete_allocated_handles;
+  Label leave_exit_frame;
+  Label return_value_loaded;
+
+  // load value from ReturnValue
+  LoadP(r3, return_value_operand);
+  bind(&return_value_loaded);
+  // No more valid handles (the result handle was the last one). Restore
+  // previous handle scope.
+  StoreP(r14, MemOperand(r17, kNextOffset));
+  if (emit_debug_code()) {
+    lwz(r4, MemOperand(r17, kLevelOffset));
+    cmp(r4, r16);
+    Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
+  }
+  subi(r16, r16, Operand(1));
+  stw(r16, MemOperand(r17, kLevelOffset));
+  LoadP(r0, MemOperand(r17, kLimitOffset));
+  cmp(r15, r0);
+  bne(&delete_allocated_handles);
+
+  // Check if the function scheduled an exception.
+  bind(&leave_exit_frame);
+  LoadRoot(r14, Heap::kTheHoleValueRootIndex);
+  mov(r15, Operand(ExternalReference::scheduled_exception_address(isolate())));
+  LoadP(r15, MemOperand(r15));
+  cmp(r14, r15);
+  bne(&promote_scheduled_exception);
+  bind(&exception_handled);
+
+  bool restore_context = context_restore_operand != NULL;
+  if (restore_context) {
+    LoadP(cp, *context_restore_operand);
+  }
+  // LeaveExitFrame expects unwind space to be in a register.
+  mov(r14, Operand(stack_space));
+  LeaveExitFrame(false, r14, !restore_context);
+  blr();
+
+  bind(&promote_scheduled_exception);
+  {
+    FrameScope frame(this, StackFrame::INTERNAL);
+    CallExternalReference(
+        ExternalReference(Runtime::kPromoteScheduledException, isolate()), 0);
+  }
+  jmp(&exception_handled);
+
+  // HandleScope limit has changed. Delete allocated extensions.
+  bind(&delete_allocated_handles);
+  StoreP(r15, MemOperand(r17, kLimitOffset));
+  mr(r14, r3);
+  PrepareCallCFunction(1, r15);
+  mov(r3, Operand(ExternalReference::isolate_address(isolate())));
+  CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate()),
+                1);
+  mr(r3, r14);
+  b(&leave_exit_frame);
+}
+
+
+bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
+  return has_frame_ || !stub->SometimesSetsUpAFrame();
+}
+
+
+void MacroAssembler::IndexFromHash(Register hash, Register index) {
+  // If the hash field contains an array index pick it out. The assert checks
+  // that the constants for the maximum number of digits for an array index
+  // cached in the hash field and the number of bits reserved for it does not
+  // conflict.
+  DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
+         (1 << String::kArrayIndexValueBits));
+  DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash);
+}
+
+
+void MacroAssembler::SmiToDouble(DoubleRegister value, Register smi) {
+  SmiUntag(ip, smi);
+  ConvertIntToDouble(ip, value);
+}
+
+
+void MacroAssembler::TestDoubleIsInt32(DoubleRegister double_input,
+                                       Register scratch1, Register scratch2,
+                                       DoubleRegister double_scratch) {
+  TryDoubleToInt32Exact(scratch1, double_input, scratch2, double_scratch);
+}
+
+
+void MacroAssembler::TryDoubleToInt32Exact(Register result,
+                                           DoubleRegister double_input,
+                                           Register scratch,
+                                           DoubleRegister double_scratch) {
+  Label done;
+  DCHECK(!double_input.is(double_scratch));
+
+  ConvertDoubleToInt64(double_input,
+#if !V8_TARGET_ARCH_PPC64
+                       scratch,
+#endif
+                       result, double_scratch);
+
+#if V8_TARGET_ARCH_PPC64
+  TestIfInt32(result, scratch, r0);
+#else
+  TestIfInt32(scratch, result, r0);
+#endif
+  bne(&done);
+
+  // convert back and compare
+  fcfid(double_scratch, double_scratch);
+  fcmpu(double_scratch, double_input);
+  bind(&done);
+}
+
+
+void MacroAssembler::TryInt32Floor(Register result, DoubleRegister double_input,
+                                   Register input_high, Register scratch,
+                                   DoubleRegister double_scratch, Label* done,
+                                   Label* exact) {
+  DCHECK(!result.is(input_high));
+  DCHECK(!double_input.is(double_scratch));
+  Label exception;
+
+  MovDoubleHighToInt(input_high, double_input);
+
+  // Test for NaN/Inf
+  ExtractBitMask(result, input_high, HeapNumber::kExponentMask);
+  cmpli(result, Operand(0x7ff));
+  beq(&exception);
+
+  // Convert (rounding to -Inf)
+  ConvertDoubleToInt64(double_input,
+#if !V8_TARGET_ARCH_PPC64
+                       scratch,
+#endif
+                       result, double_scratch, kRoundToMinusInf);
+
+// Test for overflow
+#if V8_TARGET_ARCH_PPC64
+  TestIfInt32(result, scratch, r0);
+#else
+  TestIfInt32(scratch, result, r0);
+#endif
+  bne(&exception);
+
+  // Test for exactness
+  fcfid(double_scratch, double_scratch);
+  fcmpu(double_scratch, double_input);
+  beq(exact);
+  b(done);
+
+  bind(&exception);
+}
+
+
+void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
+                                                DoubleRegister double_input,
+                                                Label* done) {
+  DoubleRegister double_scratch = kScratchDoubleReg;
+  Register scratch = ip;
+
+  ConvertDoubleToInt64(double_input,
+#if !V8_TARGET_ARCH_PPC64
+                       scratch,
+#endif
+                       result, double_scratch);
+
+// Test for overflow
+#if V8_TARGET_ARCH_PPC64
+  TestIfInt32(result, scratch, r0);
+#else
+  TestIfInt32(scratch, result, r0);
+#endif
+  beq(done);
+}
+
+
+void MacroAssembler::TruncateDoubleToI(Register result,
+                                       DoubleRegister double_input) {
+  Label done;
+
+  TryInlineTruncateDoubleToI(result, double_input, &done);
+
+  // If we fell through then inline version didn't succeed - call stub instead.
+  mflr(r0);
+  push(r0);
+  // Put input on stack.
+  stfdu(double_input, MemOperand(sp, -kDoubleSize));
+
+  DoubleToIStub stub(isolate(), sp, result, 0, true, true);
+  CallStub(&stub);
+
+  addi(sp, sp, Operand(kDoubleSize));
+  pop(r0);
+  mtlr(r0);
+
+  bind(&done);
+}
+
+
+void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) {
+  Label done;
+  DoubleRegister double_scratch = kScratchDoubleReg;
+  DCHECK(!result.is(object));
+
+  lfd(double_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
+  TryInlineTruncateDoubleToI(result, double_scratch, &done);
+
+  // If we fell through then inline version didn't succeed - call stub instead.
+  mflr(r0);
+  push(r0);
+  DoubleToIStub stub(isolate(), object, result,
+                     HeapNumber::kValueOffset - kHeapObjectTag, true, true);
+  CallStub(&stub);
+  pop(r0);
+  mtlr(r0);
+
+  bind(&done);
+}
+
+
+void MacroAssembler::TruncateNumberToI(Register object, Register result,
+                                       Register heap_number_map,
+                                       Register scratch1, Label* not_number) {
+  Label done;
+  DCHECK(!result.is(object));
+
+  UntagAndJumpIfSmi(result, object, &done);
+  JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
+  TruncateHeapNumberToI(result, object);
+
+  bind(&done);
+}
+
+
+void MacroAssembler::GetLeastBitsFromSmi(Register dst, Register src,
+                                         int num_least_bits) {
+#if V8_TARGET_ARCH_PPC64
+  rldicl(dst, src, kBitsPerPointer - kSmiShift,
+         kBitsPerPointer - num_least_bits);
+#else
+  rlwinm(dst, src, kBitsPerPointer - kSmiShift,
+         kBitsPerPointer - num_least_bits, 31);
+#endif
+}
+
+
+void MacroAssembler::GetLeastBitsFromInt32(Register dst, Register src,
+                                           int num_least_bits) {
+  rlwinm(dst, src, 0, 32 - num_least_bits, 31);
+}
+
+
+void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
+                                 SaveFPRegsMode save_doubles) {
+  // All parameters are on the stack.  r3 has the return value after call.
+
+  // If the expected number of arguments of the runtime function is
+  // constant, we check that the actual number of arguments match the
+  // expectation.
+  CHECK(f->nargs < 0 || f->nargs == num_arguments);
+
+  // TODO(1236192): Most runtime routines don't need the number of
+  // arguments passed in because it is constant. At some point we
+  // should remove this need and make the runtime routine entry code
+  // smarter.
+  mov(r3, Operand(num_arguments));
+  mov(r4, Operand(ExternalReference(f, isolate())));
+  CEntryStub stub(isolate(),
+#if V8_TARGET_ARCH_PPC64
+                  f->result_size,
+#else
+                  1,
+#endif
+                  save_doubles);
+  CallStub(&stub);
+}
+
+
+void MacroAssembler::CallExternalReference(const ExternalReference& ext,
+                                           int num_arguments) {
+  mov(r3, Operand(num_arguments));
+  mov(r4, Operand(ext));
+
+  CEntryStub stub(isolate(), 1);
+  CallStub(&stub);
+}
+
+
+void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
+                                               int num_arguments,
+                                               int result_size) {
+  // TODO(1236192): Most runtime routines don't need the number of
+  // arguments passed in because it is constant. At some point we
+  // should remove this need and make the runtime routine entry code
+  // smarter.
+  mov(r3, Operand(num_arguments));
+  JumpToExternalReference(ext);
+}
+
+
+void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid, int num_arguments,
+                                     int result_size) {
+  TailCallExternalReference(ExternalReference(fid, isolate()), num_arguments,
+                            result_size);
+}
+
+
+void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
+  mov(r4, Operand(builtin));
+  CEntryStub stub(isolate(), 1);
+  Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
+}
+
+
+void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag,
+                                   const CallWrapper& call_wrapper) {
+  // You can't call a builtin without a valid frame.
+  DCHECK(flag == JUMP_FUNCTION || has_frame());
+
+  GetBuiltinEntry(ip, id);
+  if (flag == CALL_FUNCTION) {
+    call_wrapper.BeforeCall(CallSize(ip));
+    CallJSEntry(ip);
+    call_wrapper.AfterCall();
+  } else {
+    DCHECK(flag == JUMP_FUNCTION);
+    JumpToJSEntry(ip);
+  }
+}
+
+
+void MacroAssembler::GetBuiltinFunction(Register target,
+                                        Builtins::JavaScript id) {
+  // Load the builtins object into target register.
+  LoadP(target,
+        MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+  LoadP(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
+  // Load the JavaScript builtin function from the builtins object.
+  LoadP(target,
+        FieldMemOperand(target, JSBuiltinsObject::OffsetOfFunctionWithId(id)),
+        r0);
+}
+
+
+void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
+  DCHECK(!target.is(r4));
+  GetBuiltinFunction(r4, id);
+  // Load the code entry point from the builtins object.
+  LoadP(target, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
+}
+
+
+void MacroAssembler::SetCounter(StatsCounter* counter, int value,
+                                Register scratch1, Register scratch2) {
+  if (FLAG_native_code_counters && counter->Enabled()) {
+    mov(scratch1, Operand(value));
+    mov(scratch2, Operand(ExternalReference(counter)));
+    stw(scratch1, MemOperand(scratch2));
+  }
+}
+
+
+void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
+                                      Register scratch1, Register scratch2) {
+  DCHECK(value > 0);
+  if (FLAG_native_code_counters && counter->Enabled()) {
+    mov(scratch2, Operand(ExternalReference(counter)));
+    lwz(scratch1, MemOperand(scratch2));
+    addi(scratch1, scratch1, Operand(value));
+    stw(scratch1, MemOperand(scratch2));
+  }
+}
+
+
+void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
+                                      Register scratch1, Register scratch2) {
+  DCHECK(value > 0);
+  if (FLAG_native_code_counters && counter->Enabled()) {
+    mov(scratch2, Operand(ExternalReference(counter)));
+    lwz(scratch1, MemOperand(scratch2));
+    subi(scratch1, scratch1, Operand(value));
+    stw(scratch1, MemOperand(scratch2));
+  }
+}
+
+
+void MacroAssembler::Assert(Condition cond, BailoutReason reason,
+                            CRegister cr) {
+  if (emit_debug_code()) Check(cond, reason, cr);
+}
+
+
+void MacroAssembler::AssertFastElements(Register elements) {
+  if (emit_debug_code()) {
+    DCHECK(!elements.is(r0));
+    Label ok;
+    push(elements);
+    LoadP(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
+    LoadRoot(r0, Heap::kFixedArrayMapRootIndex);
+    cmp(elements, r0);
+    beq(&ok);
+    LoadRoot(r0, Heap::kFixedDoubleArrayMapRootIndex);
+    cmp(elements, r0);
+    beq(&ok);
+    LoadRoot(r0, Heap::kFixedCOWArrayMapRootIndex);
+    cmp(elements, r0);
+    beq(&ok);
+    Abort(kJSObjectWithFastElementsMapHasSlowElements);
+    bind(&ok);
+    pop(elements);
+  }
+}
+
+
+void MacroAssembler::Check(Condition cond, BailoutReason reason, CRegister cr) {
+  Label L;
+  b(cond, &L, cr);
+  Abort(reason);
+  // will not return here
+  bind(&L);
+}
+
+
+void MacroAssembler::Abort(BailoutReason reason) {
+  Label abort_start;
+  bind(&abort_start);
+#ifdef DEBUG
+  const char* msg = GetBailoutReason(reason);
+  if (msg != NULL) {
+    RecordComment("Abort message: ");
+    RecordComment(msg);
+  }
+
+  if (FLAG_trap_on_abort) {
+    stop(msg);
+    return;
+  }
+#endif
+
+  LoadSmiLiteral(r0, Smi::FromInt(reason));
+  push(r0);
+  // Disable stub call restrictions to always allow calls to abort.
+  if (!has_frame_) {
+    // We don't actually want to generate a pile of code for this, so just
+    // claim there is a stack frame, without generating one.
+    FrameScope scope(this, StackFrame::NONE);
+    CallRuntime(Runtime::kAbort, 1);
+  } else {
+    CallRuntime(Runtime::kAbort, 1);
+  }
+  // will not return here
+}
+
+
+void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
+  if (context_chain_length > 0) {
+    // Move up the chain of contexts to the context containing the slot.
+    LoadP(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
+    for (int i = 1; i < context_chain_length; i++) {
+      LoadP(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
+    }
+  } else {
+    // Slot is in the current function context.  Move it into the
+    // destination register in case we store into it (the write barrier
+    // cannot be allowed to destroy the context in esi).
+    mr(dst, cp);
+  }
+}
+
+
+void MacroAssembler::LoadTransitionedArrayMapConditional(
+    ElementsKind expected_kind, ElementsKind transitioned_kind,
+    Register map_in_out, Register scratch, Label* no_map_match) {
+  // Load the global or builtins object from the current context.
+  LoadP(scratch,
+        MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+  LoadP(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
+
+  // Check that the function's map is the same as the expected cached map.
+  LoadP(scratch,
+        MemOperand(scratch, Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
+  size_t offset = expected_kind * kPointerSize + FixedArrayBase::kHeaderSize;
+  LoadP(scratch, FieldMemOperand(scratch, offset));
+  cmp(map_in_out, scratch);
+  bne(no_map_match);
+
+  // Use the transitioned cached map.
+  offset = transitioned_kind * kPointerSize + FixedArrayBase::kHeaderSize;
+  LoadP(map_in_out, FieldMemOperand(scratch, offset));
+}
+
+
+void MacroAssembler::LoadGlobalFunction(int index, Register function) {
+  // Load the global or builtins object from the current context.
+  LoadP(function,
+        MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
+  // Load the native context from the global or builtins object.
+  LoadP(function,
+        FieldMemOperand(function, GlobalObject::kNativeContextOffset));
+  // Load the function from the native context.
+  LoadP(function, MemOperand(function, Context::SlotOffset(index)), r0);
+}
+
+
+void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
+                                                  Register map,
+                                                  Register scratch) {
+  // Load the initial map. The global functions all have initial maps.
+  LoadP(map,
+        FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
+  if (emit_debug_code()) {
+    Label ok, fail;
+    CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
+    b(&ok);
+    bind(&fail);
+    Abort(kGlobalFunctionsMustHaveInitialMap);
+    bind(&ok);
+  }
+}
+
+
+void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
+    Register reg, Register scratch, Label* not_power_of_two_or_zero) {
+  subi(scratch, reg, Operand(1));
+  cmpi(scratch, Operand::Zero());
+  blt(not_power_of_two_or_zero);
+  and_(r0, scratch, reg, SetRC);
+  bne(not_power_of_two_or_zero, cr0);
+}
+
+
+void MacroAssembler::JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg,
+                                                     Register scratch,
+                                                     Label* zero_and_neg,
+                                                     Label* not_power_of_two) {
+  subi(scratch, reg, Operand(1));
+  cmpi(scratch, Operand::Zero());
+  blt(zero_and_neg);
+  and_(r0, scratch, reg, SetRC);
+  bne(not_power_of_two, cr0);
+}
+
+#if !V8_TARGET_ARCH_PPC64
+void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) {
+  DCHECK(!reg.is(overflow));
+  mr(overflow, reg);  // Save original value.
+  SmiTag(reg);
+  xor_(overflow, overflow, reg, SetRC);  // Overflow if (value ^ 2 * value) < 0.
+}
+
+
+void MacroAssembler::SmiTagCheckOverflow(Register dst, Register src,
+                                         Register overflow) {
+  if (dst.is(src)) {
+    // Fall back to slower case.
+    SmiTagCheckOverflow(dst, overflow);
+  } else {
+    DCHECK(!dst.is(src));
+    DCHECK(!dst.is(overflow));
+    DCHECK(!src.is(overflow));
+    SmiTag(dst, src);
+    xor_(overflow, dst, src, SetRC);  // Overflow if (value ^ 2 * value) < 0.
+  }
+}
+#endif
+
+void MacroAssembler::JumpIfNotBothSmi(Register reg1, Register reg2,
+                                      Label* on_not_both_smi) {
+  STATIC_ASSERT(kSmiTag == 0);
+  DCHECK_EQ(1, static_cast<int>(kSmiTagMask));
+  orx(r0, reg1, reg2, LeaveRC);
+  JumpIfNotSmi(r0, on_not_both_smi);
+}
+
+
+void MacroAssembler::UntagAndJumpIfSmi(Register dst, Register src,
+                                       Label* smi_case) {
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagSize == 1);
+  TestBit(src, 0, r0);
+  SmiUntag(dst, src);
+  beq(smi_case, cr0);
+}
+
+
+void MacroAssembler::UntagAndJumpIfNotSmi(Register dst, Register src,
+                                          Label* non_smi_case) {
+  STATIC_ASSERT(kSmiTag == 0);
+  STATIC_ASSERT(kSmiTagSize == 1);
+  TestBit(src, 0, r0);
+  SmiUntag(dst, src);
+  bne(non_smi_case, cr0);
+}
+
+
+void MacroAssembler::JumpIfEitherSmi(Register reg1, Register reg2,
+                                     Label* on_either_smi) {
+  STATIC_ASSERT(kSmiTag == 0);
+  JumpIfSmi(reg1, on_either_smi);
+  JumpIfSmi(reg2, on_either_smi);
+}
+
+
+void MacroAssembler::AssertNotSmi(Register object) {
+  if (emit_debug_code()) {
+    STATIC_ASSERT(kSmiTag == 0);
+    TestIfSmi(object, r0);
+    Check(ne, kOperandIsASmi, cr0);
+  }
+}
+
+
+void MacroAssembler::AssertSmi(Register object) {
+  if (emit_debug_code()) {
+    STATIC_ASSERT(kSmiTag == 0);
+    TestIfSmi(object, r0);
+    Check(eq, kOperandIsNotSmi, cr0);
+  }
+}
+
+
+void MacroAssembler::AssertString(Register object) {
+  if (emit_debug_code()) {
+    STATIC_ASSERT(kSmiTag == 0);
+    TestIfSmi(object, r0);
+    Check(ne, kOperandIsASmiAndNotAString, cr0);
+    push(object);
+    LoadP(object, FieldMemOperand(object, HeapObject::kMapOffset));
+    CompareInstanceType(object, object, FIRST_NONSTRING_TYPE);
+    pop(object);
+    Check(lt, kOperandIsNotAString);
+  }
+}
+
+
+void MacroAssembler::AssertName(Register object) {
+  if (emit_debug_code()) {
+    STATIC_ASSERT(kSmiTag == 0);
+    TestIfSmi(object, r0);
+    Check(ne, kOperandIsASmiAndNotAName, cr0);
+    push(object);
+    LoadP(object, FieldMemOperand(object, HeapObject::kMapOffset));
+    CompareInstanceType(object, object, LAST_NAME_TYPE);
+    pop(object);
+    Check(le, kOperandIsNotAName);
+  }
+}
+
+
+void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
+                                                     Register scratch) {
+  if (emit_debug_code()) {
+    Label done_checking;
+    AssertNotSmi(object);
+    CompareRoot(object, Heap::kUndefinedValueRootIndex);
+    beq(&done_checking);
+    LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+    CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex);
+    Assert(eq, kExpectedUndefinedOrCell);
+    bind(&done_checking);
+  }
+}
+
+
+void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
+  if (emit_debug_code()) {
+    CompareRoot(reg, index);
+    Check(eq, kHeapNumberMapRegisterClobbered);
+  }
+}
+
+
+void MacroAssembler::JumpIfNotHeapNumber(Register object,
+                                         Register heap_number_map,
+                                         Register scratch,
+                                         Label* on_not_heap_number) {
+  LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
+  AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
+  cmp(scratch, heap_number_map);
+  bne(on_not_heap_number);
+}
+
+
+void MacroAssembler::LookupNumberStringCache(Register object, Register result,
+                                             Register scratch1,
+                                             Register scratch2,
+                                             Register scratch3,
+                                             Label* not_found) {
+  // Use of registers. Register result is used as a temporary.
+  Register number_string_cache = result;
+  Register mask = scratch3;
+
+  // Load the number string cache.
+  LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
+
+  // Make the hash mask from the length of the number string cache. It
+  // contains two elements (number and string) for each cache entry.
+  LoadP(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
+  // Divide length by two (length is a smi).
+  ShiftRightArithImm(mask, mask, kSmiTagSize + kSmiShiftSize + 1);
+  subi(mask, mask, Operand(1));  // Make mask.
+
+  // Calculate the entry in the number string cache. The hash value in the
+  // number string cache for smis is just the smi value, and the hash for
+  // doubles is the xor of the upper and lower words. See
+  // Heap::GetNumberStringCache.
+  Label is_smi;
+  Label load_result_from_cache;
+  JumpIfSmi(object, &is_smi);
+  CheckMap(object, scratch1, Heap::kHeapNumberMapRootIndex, not_found,
+           DONT_DO_SMI_CHECK);
+
+  STATIC_ASSERT(8 == kDoubleSize);
+  lwz(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
+  lwz(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset));
+  xor_(scratch1, scratch1, scratch2);
+  and_(scratch1, scratch1, mask);
+
+  // Calculate address of entry in string cache: each entry consists
+  // of two pointer sized fields.
+  ShiftLeftImm(scratch1, scratch1, Operand(kPointerSizeLog2 + 1));
+  add(scratch1, number_string_cache, scratch1);
+
+  Register probe = mask;
+  LoadP(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
+  JumpIfSmi(probe, not_found);
+  lfd(d0, FieldMemOperand(object, HeapNumber::kValueOffset));
+  lfd(d1, FieldMemOperand(probe, HeapNumber::kValueOffset));
+  fcmpu(d0, d1);
+  bne(not_found);  // The cache did not contain this value.
+  b(&load_result_from_cache);
+
+  bind(&is_smi);
+  Register scratch = scratch1;
+  SmiUntag(scratch, object);
+  and_(scratch, mask, scratch);
+  // Calculate address of entry in string cache: each entry consists
+  // of two pointer sized fields.
+  ShiftLeftImm(scratch, scratch, Operand(kPointerSizeLog2 + 1));
+  add(scratch, number_string_cache, scratch);
+
+  // Check if the entry is the smi we are looking for.
+  LoadP(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
+  cmp(object, probe);
+  bne(not_found);
+
+  // Get the result from the cache.
+  bind(&load_result_from_cache);
+  LoadP(result,
+        FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
+  IncrementCounter(isolate()->counters()->number_to_string_native(), 1,
+                   scratch1, scratch2);
+}
+
+
+void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
+    Register first, Register second, Register scratch1, Register scratch2,
+    Label* failure) {
+  // Test that both first and second are sequential one-byte strings.
+  // Assume that they are non-smis.
+  LoadP(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
+  LoadP(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
+  lbz(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
+  lbz(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
+
+  JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1,
+                                                 scratch2, failure);
+}
+
+void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first,
+                                                           Register second,
+                                                           Register scratch1,
+                                                           Register scratch2,
+                                                           Label* failure) {
+  // Check that neither is a smi.
+  and_(scratch1, first, second);
+  JumpIfSmi(scratch1, failure);
+  JumpIfNonSmisNotBothSequentialOneByteStrings(first, second, scratch1,
+                                               scratch2, failure);
+}
+
+
+void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
+                                                     Label* not_unique_name) {
+  STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
+  Label succeed;
+  andi(r0, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
+  beq(&succeed, cr0);
+  cmpi(reg, Operand(SYMBOL_TYPE));
+  bne(not_unique_name);
+
+  bind(&succeed);
+}
+
+
+// Allocates a heap number or jumps to the need_gc label if the young space
+// is full and a scavenge is needed.
+void MacroAssembler::AllocateHeapNumber(Register result, Register scratch1,
+                                        Register scratch2,
+                                        Register heap_number_map,
+                                        Label* gc_required,
+                                        TaggingMode tagging_mode,
+                                        MutableMode mode) {
+  // Allocate an object in the heap for the heap number and tag it as a heap
+  // object.
+  Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
+           tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
+
+  Heap::RootListIndex map_index = mode == MUTABLE
+                                      ? Heap::kMutableHeapNumberMapRootIndex
+                                      : Heap::kHeapNumberMapRootIndex;
+  AssertIsRoot(heap_number_map, map_index);
+
+  // Store heap number map in the allocated object.
+  if (tagging_mode == TAG_RESULT) {
+    StoreP(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset),
+           r0);
+  } else {
+    StoreP(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
+  }
+}
+
+
+void MacroAssembler::AllocateHeapNumberWithValue(
+    Register result, DoubleRegister value, Register scratch1, Register scratch2,
+    Register heap_number_map, Label* gc_required) {
+  AllocateHeapNumber(result, scratch1, scratch2, heap_number_map, gc_required);
+  stfd(value, FieldMemOperand(result, HeapNumber::kValueOffset));
+}
+
+
+// Copies a fixed number of fields of heap objects from src to dst.
+void MacroAssembler::CopyFields(Register dst, Register src, RegList temps,
+                                int field_count) {
+  // At least one bit set in the first 15 registers.
+  DCHECK((temps & ((1 << 15) - 1)) != 0);
+  DCHECK((temps & dst.bit()) == 0);
+  DCHECK((temps & src.bit()) == 0);
+  // Primitive implementation using only one temporary register.
+
+  Register tmp = no_reg;
+  // Find a temp register in temps list.
+  for (int i = 0; i < 15; i++) {
+    if ((temps & (1 << i)) != 0) {
+      tmp.set_code(i);
+      break;
+    }
+  }
+  DCHECK(!tmp.is(no_reg));
+
+  for (int i = 0; i < field_count; i++) {
+    LoadP(tmp, FieldMemOperand(src, i * kPointerSize), r0);
+    StoreP(tmp, FieldMemOperand(dst, i * kPointerSize), r0);
+  }
+}
+
+
+void MacroAssembler::CopyBytes(Register src, Register dst, Register length,
+                               Register scratch) {
+  Label align_loop, aligned, word_loop, byte_loop, byte_loop_1, done;
+
+  DCHECK(!scratch.is(r0));
+
+  cmpi(length, Operand::Zero());
+  beq(&done);
+
+  // Check src alignment and length to see whether word_loop is possible
+  andi(scratch, src, Operand(kPointerSize - 1));
+  beq(&aligned, cr0);
+  subfic(scratch, scratch, Operand(kPointerSize * 2));
+  cmp(length, scratch);
+  blt(&byte_loop);
+
+  // Align src before copying in word size chunks.
+  subi(scratch, scratch, Operand(kPointerSize));
+  mtctr(scratch);
+  bind(&align_loop);
+  lbz(scratch, MemOperand(src));
+  addi(src, src, Operand(1));
+  subi(length, length, Operand(1));
+  stb(scratch, MemOperand(dst));
+  addi(dst, dst, Operand(1));
+  bdnz(&align_loop);
+
+  bind(&aligned);
+
+  // Copy bytes in word size chunks.
+  if (emit_debug_code()) {
+    andi(r0, src, Operand(kPointerSize - 1));
+    Assert(eq, kExpectingAlignmentForCopyBytes, cr0);
+  }
+
+  ShiftRightImm(scratch, length, Operand(kPointerSizeLog2));
+  cmpi(scratch, Operand::Zero());
+  beq(&byte_loop);
+
+  mtctr(scratch);
+  bind(&word_loop);
+  LoadP(scratch, MemOperand(src));
+  addi(src, src, Operand(kPointerSize));
+  subi(length, length, Operand(kPointerSize));
+  if (CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) {
+    // currently false for PPC - but possible future opt
+    StoreP(scratch, MemOperand(dst));
+    addi(dst, dst, Operand(kPointerSize));
+  } else {
+#if V8_TARGET_LITTLE_ENDIAN
+    stb(scratch, MemOperand(dst, 0));
+    ShiftRightImm(scratch, scratch, Operand(8));
+    stb(scratch, MemOperand(dst, 1));
+    ShiftRightImm(scratch, scratch, Operand(8));
+    stb(scratch, MemOperand(dst, 2));
+    ShiftRightImm(scratch, scratch, Operand(8));
+    stb(scratch, MemOperand(dst, 3));
+#if V8_TARGET_ARCH_PPC64
+    ShiftRightImm(scratch, scratch, Operand(8));
+    stb(scratch, MemOperand(dst, 4));
+    ShiftRightImm(scratch, scratch, Operand(8));
+    stb(scratch, MemOperand(dst, 5));
+    ShiftRightImm(scratch, scratch, Operand(8));
+    stb(scratch, MemOperand(dst, 6));
+    ShiftRightImm(scratch, scratch, Operand(8));
+    stb(scratch, MemOperand(dst, 7));
+#endif
+#else
+#if V8_TARGET_ARCH_PPC64
+    stb(scratch, MemOperand(dst, 7));
+    ShiftRightImm(scratch, scratch, Operand(8));
+    stb(scratch, MemOperand(dst, 6));
+    ShiftRightImm(scratch, scratch, Operand(8));
+    stb(scratch, MemOperand(dst, 5));
+    ShiftRightImm(scratch, scratch, Operand(8));
+    stb(scratch, MemOperand(dst, 4));
+    ShiftRightImm(scratch, scratch, Operand(8));
+#endif
+    stb(scratch, MemOperand(dst, 3));
+    ShiftRightImm(scratch, scratch, Operand(8));
+    stb(scratch, MemOperand(dst, 2));
+    ShiftRightImm(scratch, scratch, Operand(8));
+    stb(scratch, MemOperand(dst, 1));
+    ShiftRightImm(scratch, scratch, Operand(8));
+    stb(scratch, MemOperand(dst, 0));
+#endif
+    addi(dst, dst, Operand(kPointerSize));
+  }
+  bdnz(&word_loop);
+
+  // Copy the last bytes if any left.
+  cmpi(length, Operand::Zero());
+  beq(&done);
+
+  bind(&byte_loop);
+  mtctr(length);
+  bind(&byte_loop_1);
+  lbz(scratch, MemOperand(src));
+  addi(src, src, Operand(1));
+  stb(scratch, MemOperand(dst));
+  addi(dst, dst, Operand(1));
+  bdnz(&byte_loop_1);
+
+  bind(&done);
+}
+
+
+void MacroAssembler::InitializeNFieldsWithFiller(Register start_offset,
+                                                 Register count,
+                                                 Register filler) {
+  Label loop;
+  mtctr(count);
+  bind(&loop);
+  StoreP(filler, MemOperand(start_offset));
+  addi(start_offset, start_offset, Operand(kPointerSize));
+  bdnz(&loop);
+}
+
+void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
+                                                Register end_offset,
+                                                Register filler) {
+  Label done;
+  sub(r0, end_offset, start_offset, LeaveOE, SetRC);
+  beq(&done, cr0);
+  ShiftRightImm(r0, r0, Operand(kPointerSizeLog2));
+  InitializeNFieldsWithFiller(start_offset, r0, filler);
+  bind(&done);
+}
+
+
+void MacroAssembler::SaveFPRegs(Register location, int first, int count) {
+  DCHECK(count > 0);
+  int cur = first;
+  subi(location, location, Operand(count * kDoubleSize));
+  for (int i = 0; i < count; i++) {
+    DoubleRegister reg = DoubleRegister::from_code(cur++);
+    stfd(reg, MemOperand(location, i * kDoubleSize));
+  }
+}
+
+
+void MacroAssembler::RestoreFPRegs(Register location, int first, int count) {
+  DCHECK(count > 0);
+  int cur = first + count - 1;
+  for (int i = count - 1; i >= 0; i--) {
+    DoubleRegister reg = DoubleRegister::from_code(cur--);
+    lfd(reg, MemOperand(location, i * kDoubleSize));
+  }
+  addi(location, location, Operand(count * kDoubleSize));
+}
+
+
+void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
+    Register first, Register second, Register scratch1, Register scratch2,
+    Label* failure) {
+  const int kFlatOneByteStringMask =
+      kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
+  const int kFlatOneByteStringTag =
+      kStringTag | kOneByteStringTag | kSeqStringTag;
+  andi(scratch1, first, Operand(kFlatOneByteStringMask));
+  andi(scratch2, second, Operand(kFlatOneByteStringMask));
+  cmpi(scratch1, Operand(kFlatOneByteStringTag));
+  bne(failure);
+  cmpi(scratch2, Operand(kFlatOneByteStringTag));
+  bne(failure);
+}
+
+
+void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
+                                                              Register scratch,
+                                                              Label* failure) {
+  const int kFlatOneByteStringMask =
+      kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
+  const int kFlatOneByteStringTag =
+      kStringTag | kOneByteStringTag | kSeqStringTag;
+  andi(scratch, type, Operand(kFlatOneByteStringMask));
+  cmpi(scratch, Operand(kFlatOneByteStringTag));
+  bne(failure);
+}
+
+static const int kRegisterPassedArguments = 8;
+
+
+int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
+                                              int num_double_arguments) {
+  int stack_passed_words = 0;
+  if (num_double_arguments > DoubleRegister::kNumRegisters) {
+    stack_passed_words +=
+        2 * (num_double_arguments - DoubleRegister::kNumRegisters);
+  }
+  // Up to 8 simple arguments are passed in registers r3..r10.
+  if (num_reg_arguments > kRegisterPassedArguments) {
+    stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
+  }
+  return stack_passed_words;
+}
+
+
+void MacroAssembler::EmitSeqStringSetCharCheck(Register string, Register index,
+                                               Register value,
+                                               uint32_t encoding_mask) {
+  Label is_object;
+  TestIfSmi(string, r0);
+  Check(ne, kNonObject, cr0);
+
+  LoadP(ip, FieldMemOperand(string, HeapObject::kMapOffset));
+  lbz(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
+
+  andi(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask));
+  cmpi(ip, Operand(encoding_mask));
+  Check(eq, kUnexpectedStringType);
+
+// The index is assumed to be untagged coming in, tag it to compare with the
+// string length without using a temp register, it is restored at the end of
+// this function.
+#if !V8_TARGET_ARCH_PPC64
+  Label index_tag_ok, index_tag_bad;
+  JumpIfNotSmiCandidate(index, r0, &index_tag_bad);
+#endif
+  SmiTag(index, index);
+#if !V8_TARGET_ARCH_PPC64
+  b(&index_tag_ok);
+  bind(&index_tag_bad);
+  Abort(kIndexIsTooLarge);
+  bind(&index_tag_ok);
+#endif
+
+  LoadP(ip, FieldMemOperand(string, String::kLengthOffset));
+  cmp(index, ip);
+  Check(lt, kIndexIsTooLarge);
+
+  DCHECK(Smi::FromInt(0) == 0);
+  cmpi(index, Operand::Zero());
+  Check(ge, kIndexIsNegative);
+
+  SmiUntag(index, index);
+}
+
+
+void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
+                                          int num_double_arguments,
+                                          Register scratch) {
+  int frame_alignment = ActivationFrameAlignment();
+  int stack_passed_arguments =
+      CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
+  int stack_space = kNumRequiredStackFrameSlots;
+
+  if (frame_alignment > kPointerSize) {
+    // Make stack end at alignment and make room for stack arguments
+    // -- preserving original value of sp.
+    mr(scratch, sp);
+    addi(sp, sp, Operand(-(stack_passed_arguments + 1) * kPointerSize));
+    DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
+    ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment)));
+    StoreP(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
+  } else {
+    // Make room for stack arguments
+    stack_space += stack_passed_arguments;
+  }
+
+  // Allocate frame with required slots to make ABI work.
+  li(r0, Operand::Zero());
+  StorePU(r0, MemOperand(sp, -stack_space * kPointerSize));
+}
+
+
+void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
+                                          Register scratch) {
+  PrepareCallCFunction(num_reg_arguments, 0, scratch);
+}
+
+
+void MacroAssembler::MovToFloatParameter(DoubleRegister src) { Move(d1, src); }
+
+
+void MacroAssembler::MovToFloatResult(DoubleRegister src) { Move(d1, src); }
+
+
+void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
+                                          DoubleRegister src2) {
+  if (src2.is(d1)) {
+    DCHECK(!src1.is(d2));
+    Move(d2, src2);
+    Move(d1, src1);
+  } else {
+    Move(d1, src1);
+    Move(d2, src2);
+  }
+}
+
+
+void MacroAssembler::CallCFunction(ExternalReference function,
+                                   int num_reg_arguments,
+                                   int num_double_arguments) {
+  mov(ip, Operand(function));
+  CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments);
+}
+
+
+void MacroAssembler::CallCFunction(Register function, int num_reg_arguments,
+                                   int num_double_arguments) {
+  CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
+}
+
+
+void MacroAssembler::CallCFunction(ExternalReference function,
+                                   int num_arguments) {
+  CallCFunction(function, num_arguments, 0);
+}
+
+
+void MacroAssembler::CallCFunction(Register function, int num_arguments) {
+  CallCFunction(function, num_arguments, 0);
+}
+
+
+void MacroAssembler::CallCFunctionHelper(Register function,
+                                         int num_reg_arguments,
+                                         int num_double_arguments) {
+  DCHECK(has_frame());
+// Just call directly. The function called cannot cause a GC, or
+// allow preemption, so the return address in the link register
+// stays correct.
+#if ABI_USES_FUNCTION_DESCRIPTORS && !defined(USE_SIMULATOR)
+  // AIX uses a function descriptor. When calling C code be aware
+  // of this descriptor and pick up values from it
+  LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(function, kPointerSize));
+  LoadP(ip, MemOperand(function, 0));
+  Register dest = ip;
+#elif ABI_TOC_ADDRESSABILITY_VIA_IP
+  Move(ip, function);
+  Register dest = ip;
+#else
+  Register dest = function;
+#endif
+
+  Call(dest);
+
+  // Remove frame bought in PrepareCallCFunction
+  int stack_passed_arguments =
+      CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
+  int stack_space = kNumRequiredStackFrameSlots + stack_passed_arguments;
+  if (ActivationFrameAlignment() > kPointerSize) {
+    LoadP(sp, MemOperand(sp, stack_space * kPointerSize));
+  } else {
+    addi(sp, sp, Operand(stack_space * kPointerSize));
+  }
+}
+
+
+void MacroAssembler::FlushICache(Register address, size_t size,
+                                 Register scratch) {
+  if (CpuFeatures::IsSupported(INSTR_AND_DATA_CACHE_COHERENCY)) {
+    sync();
+    icbi(r0, address);
+    isync();
+    return;
+  }
+
+  Label done;
+
+  dcbf(r0, address);
+  sync();
+  icbi(r0, address);
+  isync();
+
+  // This code handles ranges which cross a single cacheline boundary.
+  // scratch is last cacheline which intersects range.
+  const int kCacheLineSizeLog2 = WhichPowerOf2(CpuFeatures::cache_line_size());
+
+  DCHECK(size > 0 && size <= (size_t)(1 << kCacheLineSizeLog2));
+  addi(scratch, address, Operand(size - 1));
+  ClearRightImm(scratch, scratch, Operand(kCacheLineSizeLog2));
+  cmpl(scratch, address);
+  ble(&done);
+
+  dcbf(r0, scratch);
+  sync();
+  icbi(r0, scratch);
+  isync();
+
+  bind(&done);
+}
+
+
+void MacroAssembler::SetRelocatedValue(Register location, Register scratch,
+                                       Register new_value) {
+  lwz(scratch, MemOperand(location));
+
+#if V8_OOL_CONSTANT_POOL
+  if (emit_debug_code()) {
+// Check that the instruction sequence is a load from the constant pool
+#if V8_TARGET_ARCH_PPC64
+    And(scratch, scratch, Operand(kOpcodeMask | (0x1f * B16)));
+    Cmpi(scratch, Operand(ADDI), r0);
+    Check(eq, kTheInstructionShouldBeALi);
+    lwz(scratch, MemOperand(location, kInstrSize));
+#endif
+    ExtractBitMask(scratch, scratch, 0x1f * B16);
+    cmpi(scratch, Operand(kConstantPoolRegister.code()));
+    Check(eq, kTheInstructionToPatchShouldBeALoadFromConstantPool);
+    // Scratch was clobbered. Restore it.
+    lwz(scratch, MemOperand(location));
+  }
+  // Get the address of the constant and patch it.
+  andi(scratch, scratch, Operand(kImm16Mask));
+  StorePX(new_value, MemOperand(kConstantPoolRegister, scratch));
+#else
+  // This code assumes a FIXED_SEQUENCE for lis/ori
+
+  // At this point scratch is a lis instruction.
+  if (emit_debug_code()) {
+    And(scratch, scratch, Operand(kOpcodeMask | (0x1f * B16)));
+    Cmpi(scratch, Operand(ADDIS), r0);
+    Check(eq, kTheInstructionToPatchShouldBeALis);
+    lwz(scratch, MemOperand(location));
+  }
+
+// insert new high word into lis instruction
+#if V8_TARGET_ARCH_PPC64
+  srdi(ip, new_value, Operand(32));
+  rlwimi(scratch, ip, 16, 16, 31);
+#else
+  rlwimi(scratch, new_value, 16, 16, 31);
+#endif
+
+  stw(scratch, MemOperand(location));
+
+  lwz(scratch, MemOperand(location, kInstrSize));
+  // scratch is now ori.
+  if (emit_debug_code()) {
+    And(scratch, scratch, Operand(kOpcodeMask));
+    Cmpi(scratch, Operand(ORI), r0);
+    Check(eq, kTheInstructionShouldBeAnOri);
+    lwz(scratch, MemOperand(location, kInstrSize));
+  }
+
+// insert new low word into ori instruction
+#if V8_TARGET_ARCH_PPC64
+  rlwimi(scratch, ip, 0, 16, 31);
+#else
+  rlwimi(scratch, new_value, 0, 16, 31);
+#endif
+  stw(scratch, MemOperand(location, kInstrSize));
+
+#if V8_TARGET_ARCH_PPC64
+  if (emit_debug_code()) {
+    lwz(scratch, MemOperand(location, 2 * kInstrSize));
+    // scratch is now sldi.
+    And(scratch, scratch, Operand(kOpcodeMask | kExt5OpcodeMask));
+    Cmpi(scratch, Operand(EXT5 | RLDICR), r0);
+    Check(eq, kTheInstructionShouldBeASldi);
+  }
+
+  lwz(scratch, MemOperand(location, 3 * kInstrSize));
+  // scratch is now ori.
+  if (emit_debug_code()) {
+    And(scratch, scratch, Operand(kOpcodeMask));
+    Cmpi(scratch, Operand(ORIS), r0);
+    Check(eq, kTheInstructionShouldBeAnOris);
+    lwz(scratch, MemOperand(location, 3 * kInstrSize));
+  }
+
+  rlwimi(scratch, new_value, 16, 16, 31);
+  stw(scratch, MemOperand(location, 3 * kInstrSize));
+
+  lwz(scratch, MemOperand(location, 4 * kInstrSize));
+  // scratch is now ori.
+  if (emit_debug_code()) {
+    And(scratch, scratch, Operand(kOpcodeMask));
+    Cmpi(scratch, Operand(ORI), r0);
+    Check(eq, kTheInstructionShouldBeAnOri);
+    lwz(scratch, MemOperand(location, 4 * kInstrSize));
+  }
+  rlwimi(scratch, new_value, 0, 16, 31);
+  stw(scratch, MemOperand(location, 4 * kInstrSize));
+#endif
+
+// Update the I-cache so the new lis and addic can be executed.
+#if V8_TARGET_ARCH_PPC64
+  FlushICache(location, 5 * kInstrSize, scratch);
+#else
+  FlushICache(location, 2 * kInstrSize, scratch);
+#endif
+#endif
+}
+
+
+void MacroAssembler::GetRelocatedValue(Register location, Register result,
+                                       Register scratch) {
+  lwz(result, MemOperand(location));
+
+#if V8_OOL_CONSTANT_POOL
+  if (emit_debug_code()) {
+// Check that the instruction sequence is a load from the constant pool
+#if V8_TARGET_ARCH_PPC64
+    And(result, result, Operand(kOpcodeMask | (0x1f * B16)));
+    Cmpi(result, Operand(ADDI), r0);
+    Check(eq, kTheInstructionShouldBeALi);
+    lwz(result, MemOperand(location, kInstrSize));
+#endif
+    ExtractBitMask(result, result, 0x1f * B16);
+    cmpi(result, Operand(kConstantPoolRegister.code()));
+    Check(eq, kTheInstructionToPatchShouldBeALoadFromConstantPool);
+    lwz(result, MemOperand(location));
+  }
+  // Get the address of the constant and retrieve it.
+  andi(result, result, Operand(kImm16Mask));
+  LoadPX(result, MemOperand(kConstantPoolRegister, result));
+#else
+  // This code assumes a FIXED_SEQUENCE for lis/ori
+  if (emit_debug_code()) {
+    And(result, result, Operand(kOpcodeMask | (0x1f * B16)));
+    Cmpi(result, Operand(ADDIS), r0);
+    Check(eq, kTheInstructionShouldBeALis);
+    lwz(result, MemOperand(location));
+  }
+
+  // result now holds a lis instruction. Extract the immediate.
+  slwi(result, result, Operand(16));
+
+  lwz(scratch, MemOperand(location, kInstrSize));
+  if (emit_debug_code()) {
+    And(scratch, scratch, Operand(kOpcodeMask));
+    Cmpi(scratch, Operand(ORI), r0);
+    Check(eq, kTheInstructionShouldBeAnOri);
+    lwz(scratch, MemOperand(location, kInstrSize));
+  }
+  // Copy the low 16bits from ori instruction into result
+  rlwimi(result, scratch, 0, 16, 31);
+
+#if V8_TARGET_ARCH_PPC64
+  if (emit_debug_code()) {
+    lwz(scratch, MemOperand(location, 2 * kInstrSize));
+    // scratch is now sldi.
+    And(scratch, scratch, Operand(kOpcodeMask | kExt5OpcodeMask));
+    Cmpi(scratch, Operand(EXT5 | RLDICR), r0);
+    Check(eq, kTheInstructionShouldBeASldi);
+  }
+
+  lwz(scratch, MemOperand(location, 3 * kInstrSize));
+  // scratch is now ori.
+  if (emit_debug_code()) {
+    And(scratch, scratch, Operand(kOpcodeMask));
+    Cmpi(scratch, Operand(ORIS), r0);
+    Check(eq, kTheInstructionShouldBeAnOris);
+    lwz(scratch, MemOperand(location, 3 * kInstrSize));
+  }
+  sldi(result, result, Operand(16));
+  rldimi(result, scratch, 0, 48);
+
+  lwz(scratch, MemOperand(location, 4 * kInstrSize));
+  // scratch is now ori.
+  if (emit_debug_code()) {
+    And(scratch, scratch, Operand(kOpcodeMask));
+    Cmpi(scratch, Operand(ORI), r0);
+    Check(eq, kTheInstructionShouldBeAnOri);
+    lwz(scratch, MemOperand(location, 4 * kInstrSize));
+  }
+  sldi(result, result, Operand(16));
+  rldimi(result, scratch, 0, 48);
+#endif
+#endif
+}
+
+
+void MacroAssembler::CheckPageFlag(
+    Register object,
+    Register scratch,  // scratch may be same register as object
+    int mask, Condition cc, Label* condition_met) {
+  DCHECK(cc == ne || cc == eq);
+  ClearRightImm(scratch, object, Operand(kPageSizeBits));
+  LoadP(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
+
+  And(r0, scratch, Operand(mask), SetRC);
+
+  if (cc == ne) {
+    bne(condition_met, cr0);
+  }
+  if (cc == eq) {
+    beq(condition_met, cr0);
+  }
+}
+
+
+void MacroAssembler::CheckMapDeprecated(Handle<Map> map, Register scratch,
+                                        Label* if_deprecated) {
+  if (map->CanBeDeprecated()) {
+    mov(scratch, Operand(map));
+    lwz(scratch, FieldMemOperand(scratch, Map::kBitField3Offset));
+    ExtractBitMask(scratch, scratch, Map::Deprecated::kMask, SetRC);
+    bne(if_deprecated, cr0);
+  }
+}
+
+
+void MacroAssembler::JumpIfBlack(Register object, Register scratch0,
+                                 Register scratch1, Label* on_black) {
+  HasColor(object, scratch0, scratch1, on_black, 1, 0);  // kBlackBitPattern.
+  DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
+}
+
+
+void MacroAssembler::HasColor(Register object, Register bitmap_scratch,
+                              Register mask_scratch, Label* has_color,
+                              int first_bit, int second_bit) {
+  DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg));
+
+  GetMarkBits(object, bitmap_scratch, mask_scratch);
+
+  Label other_color, word_boundary;
+  lwz(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+  // Test the first bit
+  and_(r0, ip, mask_scratch, SetRC);
+  b(first_bit == 1 ? eq : ne, &other_color, cr0);
+  // Shift left 1
+  // May need to load the next cell
+  slwi(mask_scratch, mask_scratch, Operand(1), SetRC);
+  beq(&word_boundary, cr0);
+  // Test the second bit
+  and_(r0, ip, mask_scratch, SetRC);
+  b(second_bit == 1 ? ne : eq, has_color, cr0);
+  b(&other_color);
+
+  bind(&word_boundary);
+  lwz(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kIntSize));
+  andi(r0, ip, Operand(1));
+  b(second_bit == 1 ? ne : eq, has_color, cr0);
+  bind(&other_color);
+}
+
+
+// Detect some, but not all, common pointer-free objects.  This is used by the
+// incremental write barrier which doesn't care about oddballs (they are always
+// marked black immediately so this code is not hit).
+void MacroAssembler::JumpIfDataObject(Register value, Register scratch,
+                                      Label* not_data_object) {
+  Label is_data_object;
+  LoadP(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
+  CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
+  beq(&is_data_object);
+  DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+  DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+  // If it's a string and it's not a cons string then it's an object containing
+  // no GC pointers.
+  lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
+  STATIC_ASSERT((kIsIndirectStringMask | kIsNotStringMask) == 0x81);
+  andi(scratch, scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
+  bne(not_data_object, cr0);
+  bind(&is_data_object);
+}
+
+
+void MacroAssembler::GetMarkBits(Register addr_reg, Register bitmap_reg,
+                                 Register mask_reg) {
+  DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
+  DCHECK((~Page::kPageAlignmentMask & 0xffff) == 0);
+  lis(r0, Operand((~Page::kPageAlignmentMask >> 16)));
+  and_(bitmap_reg, addr_reg, r0);
+  const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
+  ExtractBitRange(mask_reg, addr_reg, kLowBits - 1, kPointerSizeLog2);
+  ExtractBitRange(ip, addr_reg, kPageSizeBits - 1, kLowBits);
+  ShiftLeftImm(ip, ip, Operand(Bitmap::kBytesPerCellLog2));
+  add(bitmap_reg, bitmap_reg, ip);
+  li(ip, Operand(1));
+  slw(mask_reg, ip, mask_reg);
+}
+
+
+void MacroAssembler::EnsureNotWhite(Register value, Register bitmap_scratch,
+                                    Register mask_scratch,
+                                    Register load_scratch,
+                                    Label* value_is_white_and_not_data) {
+  DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, ip));
+  GetMarkBits(value, bitmap_scratch, mask_scratch);
+
+  // If the value is black or grey we don't need to do anything.
+  DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
+  DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
+  DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
+  DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
+
+  Label done;
+
+  // Since both black and grey have a 1 in the first position and white does
+  // not have a 1 there we only need to check one bit.
+  lwz(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+  and_(r0, mask_scratch, load_scratch, SetRC);
+  bne(&done, cr0);
+
+  if (emit_debug_code()) {
+    // Check for impossible bit pattern.
+    Label ok;
+    // LSL may overflow, making the check conservative.
+    slwi(r0, mask_scratch, Operand(1));
+    and_(r0, load_scratch, r0, SetRC);
+    beq(&ok, cr0);
+    stop("Impossible marking bit pattern");
+    bind(&ok);
+  }
+
+  // Value is white.  We check whether it is data that doesn't need scanning.
+  // Currently only checks for HeapNumber and non-cons strings.
+  Register map = load_scratch;     // Holds map while checking type.
+  Register length = load_scratch;  // Holds length of object after testing type.
+  Label is_data_object, maybe_string_object, is_string_object, is_encoded;
+#if V8_TARGET_ARCH_PPC64
+  Label length_computed;
+#endif
+
+
+  // Check for heap-number
+  LoadP(map, FieldMemOperand(value, HeapObject::kMapOffset));
+  CompareRoot(map, Heap::kHeapNumberMapRootIndex);
+  bne(&maybe_string_object);
+  li(length, Operand(HeapNumber::kSize));
+  b(&is_data_object);
+  bind(&maybe_string_object);
+
+  // Check for strings.
+  DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
+  DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
+  // If it's a string and it's not a cons string then it's an object containing
+  // no GC pointers.
+  Register instance_type = load_scratch;
+  lbz(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
+  andi(r0, instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
+  bne(value_is_white_and_not_data, cr0);
+  // It's a non-indirect (non-cons and non-slice) string.
+  // If it's external, the length is just ExternalString::kSize.
+  // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
+  // External strings are the only ones with the kExternalStringTag bit
+  // set.
+  DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
+  DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
+  andi(r0, instance_type, Operand(kExternalStringTag));
+  beq(&is_string_object, cr0);
+  li(length, Operand(ExternalString::kSize));
+  b(&is_data_object);
+  bind(&is_string_object);
+
+  // Sequential string, either Latin1 or UC16.
+  // For Latin1 (char-size of 1) we untag the smi to get the length.
+  // For UC16 (char-size of 2):
+  //   - (32-bit) we just leave the smi tag in place, thereby getting
+  //              the length multiplied by 2.
+  //   - (64-bit) we compute the offset in the 2-byte array
+  DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4);
+  LoadP(ip, FieldMemOperand(value, String::kLengthOffset));
+  andi(r0, instance_type, Operand(kStringEncodingMask));
+  beq(&is_encoded, cr0);
+  SmiUntag(ip);
+#if V8_TARGET_ARCH_PPC64
+  b(&length_computed);
+#endif
+  bind(&is_encoded);
+#if V8_TARGET_ARCH_PPC64
+  SmiToShortArrayOffset(ip, ip);
+  bind(&length_computed);
+#else
+  DCHECK(kSmiShift == 1);
+#endif
+  addi(length, ip, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
+  li(r0, Operand(~kObjectAlignmentMask));
+  and_(length, length, r0);
+
+  bind(&is_data_object);
+  // Value is a data object, and it is white.  Mark it black.  Since we know
+  // that the object is white we can make it black by flipping one bit.
+  lwz(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+  orx(ip, ip, mask_scratch);
+  stw(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
+
+  mov(ip, Operand(~Page::kPageAlignmentMask));
+  and_(bitmap_scratch, bitmap_scratch, ip);
+  lwz(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
+  add(ip, ip, length);
+  stw(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
+
+  bind(&done);
+}
+
+
+// Saturate a value into 8-bit unsigned integer
+//   if input_value < 0, output_value is 0
+//   if input_value > 255, output_value is 255
+//   otherwise output_value is the input_value
+void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
+  Label done, negative_label, overflow_label;
+  int satval = (1 << 8) - 1;
+
+  cmpi(input_reg, Operand::Zero());
+  blt(&negative_label);
+
+  cmpi(input_reg, Operand(satval));
+  bgt(&overflow_label);
+  if (!output_reg.is(input_reg)) {
+    mr(output_reg, input_reg);
+  }
+  b(&done);
+
+  bind(&negative_label);
+  li(output_reg, Operand::Zero());  // set to 0 if negative
+  b(&done);
+
+
+  bind(&overflow_label);  // set to satval if > satval
+  li(output_reg, Operand(satval));
+
+  bind(&done);
+}
+
+
+void MacroAssembler::SetRoundingMode(FPRoundingMode RN) { mtfsfi(7, RN); }
+
+
+void MacroAssembler::ResetRoundingMode() {
+  mtfsfi(7, kRoundToNearest);  // reset (default is kRoundToNearest)
+}
+
+
+void MacroAssembler::ClampDoubleToUint8(Register result_reg,
+                                        DoubleRegister input_reg,
+                                        DoubleRegister double_scratch) {
+  Label above_zero;
+  Label done;
+  Label in_bounds;
+
+  LoadDoubleLiteral(double_scratch, 0.0, result_reg);
+  fcmpu(input_reg, double_scratch);
+  bgt(&above_zero);
+
+  // Double value is less than zero, NaN or Inf, return 0.
+  LoadIntLiteral(result_reg, 0);
+  b(&done);
+
+  // Double value is >= 255, return 255.
+  bind(&above_zero);
+  LoadDoubleLiteral(double_scratch, 255.0, result_reg);
+  fcmpu(input_reg, double_scratch);
+  ble(&in_bounds);
+  LoadIntLiteral(result_reg, 255);
+  b(&done);
+
+  // In 0-255 range, round and truncate.
+  bind(&in_bounds);
+
+  // round to nearest (default rounding mode)
+  fctiw(double_scratch, input_reg);
+  MovDoubleLowToInt(result_reg, double_scratch);
+  bind(&done);
+}
+
+
+void MacroAssembler::LoadInstanceDescriptors(Register map,
+                                             Register descriptors) {
+  LoadP(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
+}
+
+
+void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
+  lwz(dst, FieldMemOperand(map, Map::kBitField3Offset));
+  DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
+}
+
+
+void MacroAssembler::EnumLength(Register dst, Register map) {
+  STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
+  lwz(dst, FieldMemOperand(map, Map::kBitField3Offset));
+  ExtractBitMask(dst, dst, Map::EnumLengthBits::kMask);
+  SmiTag(dst);
+}
+
+
+void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
+  Register empty_fixed_array_value = r9;
+  LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
+  Label next, start;
+  mr(r5, r3);
+
+  // Check if the enum length field is properly initialized, indicating that
+  // there is an enum cache.
+  LoadP(r4, FieldMemOperand(r5, HeapObject::kMapOffset));
+
+  EnumLength(r6, r4);
+  CmpSmiLiteral(r6, Smi::FromInt(kInvalidEnumCacheSentinel), r0);
+  beq(call_runtime);
+
+  b(&start);
+
+  bind(&next);
+  LoadP(r4, FieldMemOperand(r5, HeapObject::kMapOffset));
+
+  // For all objects but the receiver, check that the cache is empty.
+  EnumLength(r6, r4);
+  CmpSmiLiteral(r6, Smi::FromInt(0), r0);
+  bne(call_runtime);
+
+  bind(&start);
+
+  // Check that there are no elements. Register r5 contains the current JS
+  // object we've reached through the prototype chain.
+  Label no_elements;
+  LoadP(r5, FieldMemOperand(r5, JSObject::kElementsOffset));
+  cmp(r5, empty_fixed_array_value);
+  beq(&no_elements);
+
+  // Second chance, the object may be using the empty slow element dictionary.
+  CompareRoot(r5, Heap::kEmptySlowElementDictionaryRootIndex);
+  bne(call_runtime);
+
+  bind(&no_elements);
+  LoadP(r5, FieldMemOperand(r4, Map::kPrototypeOffset));
+  cmp(r5, null_value);
+  bne(&next);
+}
+
+
+////////////////////////////////////////////////////////////////////////////////
+//
+// New MacroAssembler Interfaces added for PPC
+//
+////////////////////////////////////////////////////////////////////////////////
+void MacroAssembler::LoadIntLiteral(Register dst, int value) {
+  mov(dst, Operand(value));
+}
+
+
+void MacroAssembler::LoadSmiLiteral(Register dst, Smi* smi) {
+  mov(dst, Operand(smi));
+}
+
+
+void MacroAssembler::LoadDoubleLiteral(DoubleRegister result, double value,
+                                       Register scratch) {
+#if V8_OOL_CONSTANT_POOL
+  // TODO(mbrandy): enable extended constant pool usage for doubles.
+  //                See ARM commit e27ab337 for a reference.
+  if (is_ool_constant_pool_available() && !is_constant_pool_full()) {
+    RelocInfo rinfo(pc_, value);
+    ConstantPoolAddEntry(rinfo);
+#if V8_TARGET_ARCH_PPC64
+    // We use 2 instruction sequence here for consistency with mov.
+    li(scratch, Operand::Zero());
+    lfdx(result, MemOperand(kConstantPoolRegister, scratch));
+#else
+    lfd(result, MemOperand(kConstantPoolRegister, 0));
+#endif
+    return;
+  }
+#endif
+
+  // avoid gcc strict aliasing error using union cast
+  union {
+    double dval;
+#if V8_TARGET_ARCH_PPC64
+    intptr_t ival;
+#else
+    intptr_t ival[2];
+#endif
+  } litVal;
+
+  litVal.dval = value;
+
+#if V8_TARGET_ARCH_PPC64
+  if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
+    mov(scratch, Operand(litVal.ival));
+    mtfprd(result, scratch);
+    return;
+  }
+#endif
+
+  addi(sp, sp, Operand(-kDoubleSize));
+#if V8_TARGET_ARCH_PPC64
+  mov(scratch, Operand(litVal.ival));
+  std(scratch, MemOperand(sp));
+#else
+  LoadIntLiteral(scratch, litVal.ival[0]);
+  stw(scratch, MemOperand(sp, 0));
+  LoadIntLiteral(scratch, litVal.ival[1]);
+  stw(scratch, MemOperand(sp, 4));
+#endif
+  nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
+  lfd(result, MemOperand(sp, 0));
+  addi(sp, sp, Operand(kDoubleSize));
+}
+
+
+void MacroAssembler::MovIntToDouble(DoubleRegister dst, Register src,
+                                    Register scratch) {
+// sign-extend src to 64-bit
+#if V8_TARGET_ARCH_PPC64
+  if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
+    mtfprwa(dst, src);
+    return;
+  }
+#endif
+
+  DCHECK(!src.is(scratch));
+  subi(sp, sp, Operand(kDoubleSize));
+#if V8_TARGET_ARCH_PPC64
+  extsw(scratch, src);
+  std(scratch, MemOperand(sp, 0));
+#else
+  srawi(scratch, src, 31);
+  stw(scratch, MemOperand(sp, Register::kExponentOffset));
+  stw(src, MemOperand(sp, Register::kMantissaOffset));
+#endif
+  nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
+  lfd(dst, MemOperand(sp, 0));
+  addi(sp, sp, Operand(kDoubleSize));
+}
+
+
+void MacroAssembler::MovUnsignedIntToDouble(DoubleRegister dst, Register src,
+                                            Register scratch) {
+// zero-extend src to 64-bit
+#if V8_TARGET_ARCH_PPC64
+  if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
+    mtfprwz(dst, src);
+    return;
+  }
+#endif
+
+  DCHECK(!src.is(scratch));
+  subi(sp, sp, Operand(kDoubleSize));
+#if V8_TARGET_ARCH_PPC64
+  clrldi(scratch, src, Operand(32));
+  std(scratch, MemOperand(sp, 0));
+#else
+  li(scratch, Operand::Zero());
+  stw(scratch, MemOperand(sp, Register::kExponentOffset));
+  stw(src, MemOperand(sp, Register::kMantissaOffset));
+#endif
+  nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
+  lfd(dst, MemOperand(sp, 0));
+  addi(sp, sp, Operand(kDoubleSize));
+}
+
+
+void MacroAssembler::MovInt64ToDouble(DoubleRegister dst,
+#if !V8_TARGET_ARCH_PPC64
+                                      Register src_hi,
+#endif
+                                      Register src) {
+#if V8_TARGET_ARCH_PPC64
+  if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
+    mtfprd(dst, src);
+    return;
+  }
+#endif
+
+  subi(sp, sp, Operand(kDoubleSize));
+#if V8_TARGET_ARCH_PPC64
+  std(src, MemOperand(sp, 0));
+#else
+  stw(src_hi, MemOperand(sp, Register::kExponentOffset));
+  stw(src, MemOperand(sp, Register::kMantissaOffset));
+#endif
+  nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
+  lfd(dst, MemOperand(sp, 0));
+  addi(sp, sp, Operand(kDoubleSize));
+}
+
+
+#if V8_TARGET_ARCH_PPC64
+void MacroAssembler::MovInt64ComponentsToDouble(DoubleRegister dst,
+                                                Register src_hi,
+                                                Register src_lo,
+                                                Register scratch) {
+  if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
+    sldi(scratch, src_hi, Operand(32));
+    rldimi(scratch, src_lo, 0, 32);
+    mtfprd(dst, scratch);
+    return;
+  }
+
+  subi(sp, sp, Operand(kDoubleSize));
+  stw(src_hi, MemOperand(sp, Register::kExponentOffset));
+  stw(src_lo, MemOperand(sp, Register::kMantissaOffset));
+  nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
+  lfd(dst, MemOperand(sp));
+  addi(sp, sp, Operand(kDoubleSize));
+}
+#endif
+
+
+void MacroAssembler::MovDoubleLowToInt(Register dst, DoubleRegister src) {
+#if V8_TARGET_ARCH_PPC64
+  if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
+    mffprwz(dst, src);
+    return;
+  }
+#endif
+
+  subi(sp, sp, Operand(kDoubleSize));
+  stfd(src, MemOperand(sp));
+  nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
+  lwz(dst, MemOperand(sp, Register::kMantissaOffset));
+  addi(sp, sp, Operand(kDoubleSize));
+}
+
+
+void MacroAssembler::MovDoubleHighToInt(Register dst, DoubleRegister src) {
+#if V8_TARGET_ARCH_PPC64
+  if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
+    mffprd(dst, src);
+    srdi(dst, dst, Operand(32));
+    return;
+  }
+#endif
+
+  subi(sp, sp, Operand(kDoubleSize));
+  stfd(src, MemOperand(sp));
+  nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
+  lwz(dst, MemOperand(sp, Register::kExponentOffset));
+  addi(sp, sp, Operand(kDoubleSize));
+}
+
+
+void MacroAssembler::MovDoubleToInt64(
+#if !V8_TARGET_ARCH_PPC64
+    Register dst_hi,
+#endif
+    Register dst, DoubleRegister src) {
+#if V8_TARGET_ARCH_PPC64
+  if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
+    mffprd(dst, src);
+    return;
+  }
+#endif
+
+  subi(sp, sp, Operand(kDoubleSize));
+  stfd(src, MemOperand(sp));
+  nop(GROUP_ENDING_NOP);  // LHS/RAW optimization
+#if V8_TARGET_ARCH_PPC64
+  ld(dst, MemOperand(sp, 0));
+#else
+  lwz(dst_hi, MemOperand(sp, Register::kExponentOffset));
+  lwz(dst, MemOperand(sp, Register::kMantissaOffset));
+#endif
+  addi(sp, sp, Operand(kDoubleSize));
+}
+
+
+void MacroAssembler::Add(Register dst, Register src, intptr_t value,
+                         Register scratch) {
+  if (is_int16(value)) {
+    addi(dst, src, Operand(value));
+  } else {
+    mov(scratch, Operand(value));
+    add(dst, src, scratch);
+  }
+}
+
+
+void MacroAssembler::Cmpi(Register src1, const Operand& src2, Register scratch,
+                          CRegister cr) {
+  intptr_t value = src2.immediate();
+  if (is_int16(value)) {
+    cmpi(src1, src2, cr);
+  } else {
+    mov(scratch, src2);
+    cmp(src1, scratch, cr);
+  }
+}
+
+
+void MacroAssembler::Cmpli(Register src1, const Operand& src2, Register scratch,
+                           CRegister cr) {
+  intptr_t value = src2.immediate();
+  if (is_uint16(value)) {
+    cmpli(src1, src2, cr);
+  } else {
+    mov(scratch, src2);
+    cmpl(src1, scratch, cr);
+  }
+}
+
+
+void MacroAssembler::Cmpwi(Register src1, const Operand& src2, Register scratch,
+                           CRegister cr) {
+  intptr_t value = src2.immediate();
+  if (is_int16(value)) {
+    cmpwi(src1, src2, cr);
+  } else {
+    mov(scratch, src2);
+    cmpw(src1, scratch, cr);
+  }
+}
+
+
+void MacroAssembler::Cmplwi(Register src1, const Operand& src2,
+                            Register scratch, CRegister cr) {
+  intptr_t value = src2.immediate();
+  if (is_uint16(value)) {
+    cmplwi(src1, src2, cr);
+  } else {
+    mov(scratch, src2);
+    cmplw(src1, scratch, cr);
+  }
+}
+
+
+void MacroAssembler::And(Register ra, Register rs, const Operand& rb,
+                         RCBit rc) {
+  if (rb.is_reg()) {
+    and_(ra, rs, rb.rm(), rc);
+  } else {
+    if (is_uint16(rb.imm_) && RelocInfo::IsNone(rb.rmode_) && rc == SetRC) {
+      andi(ra, rs, rb);
+    } else {
+      // mov handles the relocation.
+      DCHECK(!rs.is(r0));
+      mov(r0, rb);
+      and_(ra, rs, r0, rc);
+    }
+  }
+}
+
+
+void MacroAssembler::Or(Register ra, Register rs, const Operand& rb, RCBit rc) {
+  if (rb.is_reg()) {
+    orx(ra, rs, rb.rm(), rc);
+  } else {
+    if (is_uint16(rb.imm_) && RelocInfo::IsNone(rb.rmode_) && rc == LeaveRC) {
+      ori(ra, rs, rb);
+    } else {
+      // mov handles the relocation.
+      DCHECK(!rs.is(r0));
+      mov(r0, rb);
+      orx(ra, rs, r0, rc);
+    }
+  }
+}
+
+
+void MacroAssembler::Xor(Register ra, Register rs, const Operand& rb,
+                         RCBit rc) {
+  if (rb.is_reg()) {
+    xor_(ra, rs, rb.rm(), rc);
+  } else {
+    if (is_uint16(rb.imm_) && RelocInfo::IsNone(rb.rmode_) && rc == LeaveRC) {
+      xori(ra, rs, rb);
+    } else {
+      // mov handles the relocation.
+      DCHECK(!rs.is(r0));
+      mov(r0, rb);
+      xor_(ra, rs, r0, rc);
+    }
+  }
+}
+
+
+void MacroAssembler::CmpSmiLiteral(Register src1, Smi* smi, Register scratch,
+                                   CRegister cr) {
+#if V8_TARGET_ARCH_PPC64
+  LoadSmiLiteral(scratch, smi);
+  cmp(src1, scratch, cr);
+#else
+  Cmpi(src1, Operand(smi), scratch, cr);
+#endif
+}
+
+
+void MacroAssembler::CmplSmiLiteral(Register src1, Smi* smi, Register scratch,
+                                    CRegister cr) {
+#if V8_TARGET_ARCH_PPC64
+  LoadSmiLiteral(scratch, smi);
+  cmpl(src1, scratch, cr);
+#else
+  Cmpli(src1, Operand(smi), scratch, cr);
+#endif
+}
+
+
+void MacroAssembler::AddSmiLiteral(Register dst, Register src, Smi* smi,
+                                   Register scratch) {
+#if V8_TARGET_ARCH_PPC64
+  LoadSmiLiteral(scratch, smi);
+  add(dst, src, scratch);
+#else
+  Add(dst, src, reinterpret_cast<intptr_t>(smi), scratch);
+#endif
+}
+
+
+void MacroAssembler::SubSmiLiteral(Register dst, Register src, Smi* smi,
+                                   Register scratch) {
+#if V8_TARGET_ARCH_PPC64
+  LoadSmiLiteral(scratch, smi);
+  sub(dst, src, scratch);
+#else
+  Add(dst, src, -(reinterpret_cast<intptr_t>(smi)), scratch);
+#endif
+}
+
+
+void MacroAssembler::AndSmiLiteral(Register dst, Register src, Smi* smi,
+                                   Register scratch, RCBit rc) {
+#if V8_TARGET_ARCH_PPC64
+  LoadSmiLiteral(scratch, smi);
+  and_(dst, src, scratch, rc);
+#else
+  And(dst, src, Operand(smi), rc);
+#endif
+}
+
+
+// Load a "pointer" sized value from the memory location
+void MacroAssembler::LoadP(Register dst, const MemOperand& mem,
+                           Register scratch) {
+  int offset = mem.offset();
+
+  if (!scratch.is(no_reg) && !is_int16(offset)) {
+    /* cannot use d-form */
+    LoadIntLiteral(scratch, offset);
+#if V8_TARGET_ARCH_PPC64
+    ldx(dst, MemOperand(mem.ra(), scratch));
+#else
+    lwzx(dst, MemOperand(mem.ra(), scratch));
+#endif
+  } else {
+#if V8_TARGET_ARCH_PPC64
+    int misaligned = (offset & 3);
+    if (misaligned) {
+      // adjust base to conform to offset alignment requirements
+      // Todo: enhance to use scratch if dst is unsuitable
+      DCHECK(!dst.is(r0));
+      addi(dst, mem.ra(), Operand((offset & 3) - 4));
+      ld(dst, MemOperand(dst, (offset & ~3) + 4));
+    } else {
+      ld(dst, mem);
+    }
+#else
+    lwz(dst, mem);
+#endif
+  }
+}
+
+
+// Store a "pointer" sized value to the memory location
+void MacroAssembler::StoreP(Register src, const MemOperand& mem,
+                            Register scratch) {
+  int offset = mem.offset();
+
+  if (!scratch.is(no_reg) && !is_int16(offset)) {
+    /* cannot use d-form */
+    LoadIntLiteral(scratch, offset);
+#if V8_TARGET_ARCH_PPC64
+    stdx(src, MemOperand(mem.ra(), scratch));
+#else
+    stwx(src, MemOperand(mem.ra(), scratch));
+#endif
+  } else {
+#if V8_TARGET_ARCH_PPC64
+    int misaligned = (offset & 3);
+    if (misaligned) {
+      // adjust base to conform to offset alignment requirements
+      // a suitable scratch is required here
+      DCHECK(!scratch.is(no_reg));
+      if (scratch.is(r0)) {
+        LoadIntLiteral(scratch, offset);
+        stdx(src, MemOperand(mem.ra(), scratch));
+      } else {
+        addi(scratch, mem.ra(), Operand((offset & 3) - 4));
+        std(src, MemOperand(scratch, (offset & ~3) + 4));
+      }
+    } else {
+      std(src, mem);
+    }
+#else
+    stw(src, mem);
+#endif
+  }
+}
+
+void MacroAssembler::LoadWordArith(Register dst, const MemOperand& mem,
+                                   Register scratch) {
+  int offset = mem.offset();
+
+  if (!scratch.is(no_reg) && !is_int16(offset)) {
+    /* cannot use d-form */
+    LoadIntLiteral(scratch, offset);
+#if V8_TARGET_ARCH_PPC64
+    // lwax(dst, MemOperand(mem.ra(), scratch));
+    DCHECK(0);  // lwax not yet implemented
+#else
+    lwzx(dst, MemOperand(mem.ra(), scratch));
+#endif
+  } else {
+#if V8_TARGET_ARCH_PPC64
+    int misaligned = (offset & 3);
+    if (misaligned) {
+      // adjust base to conform to offset alignment requirements
+      // Todo: enhance to use scratch if dst is unsuitable
+      DCHECK(!dst.is(r0));
+      addi(dst, mem.ra(), Operand((offset & 3) - 4));
+      lwa(dst, MemOperand(dst, (offset & ~3) + 4));
+    } else {
+      lwa(dst, mem);
+    }
+#else
+    lwz(dst, mem);
+#endif
+  }
+}
+
+
+// Variable length depending on whether offset fits into immediate field
+// MemOperand currently only supports d-form
+void MacroAssembler::LoadWord(Register dst, const MemOperand& mem,
+                              Register scratch) {
+  Register base = mem.ra();
+  int offset = mem.offset();
+
+  if (!is_int16(offset)) {
+    LoadIntLiteral(scratch, offset);
+    lwzx(dst, MemOperand(base, scratch));
+  } else {
+    lwz(dst, mem);
+  }
+}
+
+
+// Variable length depending on whether offset fits into immediate field
+// MemOperand current only supports d-form
+void MacroAssembler::StoreWord(Register src, const MemOperand& mem,
+                               Register scratch) {
+  Register base = mem.ra();
+  int offset = mem.offset();
+
+  if (!is_int16(offset)) {
+    LoadIntLiteral(scratch, offset);
+    stwx(src, MemOperand(base, scratch));
+  } else {
+    stw(src, mem);
+  }
+}
+
+
+// Variable length depending on whether offset fits into immediate field
+// MemOperand currently only supports d-form
+void MacroAssembler::LoadHalfWord(Register dst, const MemOperand& mem,
+                                  Register scratch) {
+  Register base = mem.ra();
+  int offset = mem.offset();
+
+  if (!is_int16(offset)) {
+    LoadIntLiteral(scratch, offset);
+    lhzx(dst, MemOperand(base, scratch));
+  } else {
+    lhz(dst, mem);
+  }
+}
+
+
+// Variable length depending on whether offset fits into immediate field
+// MemOperand current only supports d-form
+void MacroAssembler::StoreHalfWord(Register src, const MemOperand& mem,
+                                   Register scratch) {
+  Register base = mem.ra();
+  int offset = mem.offset();
+
+  if (!is_int16(offset)) {
+    LoadIntLiteral(scratch, offset);
+    sthx(src, MemOperand(base, scratch));
+  } else {
+    sth(src, mem);
+  }
+}
+
+
+// Variable length depending on whether offset fits into immediate field
+// MemOperand currently only supports d-form
+void MacroAssembler::LoadByte(Register dst, const MemOperand& mem,
+                              Register scratch) {
+  Register base = mem.ra();
+  int offset = mem.offset();
+
+  if (!is_int16(offset)) {
+    LoadIntLiteral(scratch, offset);
+    lbzx(dst, MemOperand(base, scratch));
+  } else {
+    lbz(dst, mem);
+  }
+}
+
+
+// Variable length depending on whether offset fits into immediate field
+// MemOperand current only supports d-form
+void MacroAssembler::StoreByte(Register src, const MemOperand& mem,
+                               Register scratch) {
+  Register base = mem.ra();
+  int offset = mem.offset();
+
+  if (!is_int16(offset)) {
+    LoadIntLiteral(scratch, offset);
+    stbx(src, MemOperand(base, scratch));
+  } else {
+    stb(src, mem);
+  }
+}
+
+
+void MacroAssembler::LoadRepresentation(Register dst, const MemOperand& mem,
+                                        Representation r, Register scratch) {
+  DCHECK(!r.IsDouble());
+  if (r.IsInteger8()) {
+    LoadByte(dst, mem, scratch);
+    extsb(dst, dst);
+  } else if (r.IsUInteger8()) {
+    LoadByte(dst, mem, scratch);
+  } else if (r.IsInteger16()) {
+    LoadHalfWord(dst, mem, scratch);
+    extsh(dst, dst);
+  } else if (r.IsUInteger16()) {
+    LoadHalfWord(dst, mem, scratch);
+#if V8_TARGET_ARCH_PPC64
+  } else if (r.IsInteger32()) {
+    LoadWord(dst, mem, scratch);
+#endif
+  } else {
+    LoadP(dst, mem, scratch);
+  }
+}
+
+
+void MacroAssembler::StoreRepresentation(Register src, const MemOperand& mem,
+                                         Representation r, Register scratch) {
+  DCHECK(!r.IsDouble());
+  if (r.IsInteger8() || r.IsUInteger8()) {
+    StoreByte(src, mem, scratch);
+  } else if (r.IsInteger16() || r.IsUInteger16()) {
+    StoreHalfWord(src, mem, scratch);
+#if V8_TARGET_ARCH_PPC64
+  } else if (r.IsInteger32()) {
+    StoreWord(src, mem, scratch);
+#endif
+  } else {
+    if (r.IsHeapObject()) {
+      AssertNotSmi(src);
+    } else if (r.IsSmi()) {
+      AssertSmi(src);
+    }
+    StoreP(src, mem, scratch);
+  }
+}
+
+
+void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
+                                                     Register scratch_reg,
+                                                     Label* no_memento_found) {
+  ExternalReference new_space_start =
+      ExternalReference::new_space_start(isolate());
+  ExternalReference new_space_allocation_top =
+      ExternalReference::new_space_allocation_top_address(isolate());
+  addi(scratch_reg, receiver_reg,
+       Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
+  Cmpi(scratch_reg, Operand(new_space_start), r0);
+  blt(no_memento_found);
+  mov(ip, Operand(new_space_allocation_top));
+  LoadP(ip, MemOperand(ip));
+  cmp(scratch_reg, ip);
+  bgt(no_memento_found);
+  LoadP(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize));
+  Cmpi(scratch_reg, Operand(isolate()->factory()->allocation_memento_map()),
+       r0);
+}
+
+
+Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
+                                   Register reg4, Register reg5,
+                                   Register reg6) {
+  RegList regs = 0;
+  if (reg1.is_valid()) regs |= reg1.bit();
+  if (reg2.is_valid()) regs |= reg2.bit();
+  if (reg3.is_valid()) regs |= reg3.bit();
+  if (reg4.is_valid()) regs |= reg4.bit();
+  if (reg5.is_valid()) regs |= reg5.bit();
+  if (reg6.is_valid()) regs |= reg6.bit();
+
+  for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
+    Register candidate = Register::FromAllocationIndex(i);
+    if (regs & candidate.bit()) continue;
+    return candidate;
+  }
+  UNREACHABLE();
+  return no_reg;
+}
+
+
+void MacroAssembler::JumpIfDictionaryInPrototypeChain(Register object,
+                                                      Register scratch0,
+                                                      Register scratch1,
+                                                      Label* found) {
+  DCHECK(!scratch1.is(scratch0));
+  Factory* factory = isolate()->factory();
+  Register current = scratch0;
+  Label loop_again;
+
+  // scratch contained elements pointer.
+  mr(current, object);
+
+  // Loop based on the map going up the prototype chain.
+  bind(&loop_again);
+  LoadP(current, FieldMemOperand(current, HeapObject::kMapOffset));
+  lbz(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
+  DecodeField<Map::ElementsKindBits>(scratch1);
+  cmpi(scratch1, Operand(DICTIONARY_ELEMENTS));
+  beq(found);
+  LoadP(current, FieldMemOperand(current, Map::kPrototypeOffset));
+  Cmpi(current, Operand(factory->null_value()), r0);
+  bne(&loop_again);
+}
+
+
+#ifdef DEBUG
+bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
+                Register reg5, Register reg6, Register reg7, Register reg8) {
+  int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() + reg3.is_valid() +
+                        reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
+                        reg7.is_valid() + reg8.is_valid();
+
+  RegList regs = 0;
+  if (reg1.is_valid()) regs |= reg1.bit();
+  if (reg2.is_valid()) regs |= reg2.bit();
+  if (reg3.is_valid()) regs |= reg3.bit();
+  if (reg4.is_valid()) regs |= reg4.bit();
+  if (reg5.is_valid()) regs |= reg5.bit();
+  if (reg6.is_valid()) regs |= reg6.bit();
+  if (reg7.is_valid()) regs |= reg7.bit();
+  if (reg8.is_valid()) regs |= reg8.bit();
+  int n_of_non_aliasing_regs = NumRegs(regs);
+
+  return n_of_valid_regs != n_of_non_aliasing_regs;
+}
+#endif
+
+
+CodePatcher::CodePatcher(byte* address, int instructions,
+                         FlushICache flush_cache)
+    : address_(address),
+      size_(instructions * Assembler::kInstrSize),
+      masm_(NULL, address, size_ + Assembler::kGap),
+      flush_cache_(flush_cache) {
+  // Create a new macro assembler pointing to the address of the code to patch.
+  // The size is adjusted with kGap on order for the assembler to generate size
+  // bytes of instructions without failing with buffer size constraints.
+  DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
+}
+
+
+CodePatcher::~CodePatcher() {
+  // Indicate that code has changed.
+  if (flush_cache_ == FLUSH) {
+    CpuFeatures::FlushICache(address_, size_);
+  }
+
+  // Check that the code was patched as expected.
+  DCHECK(masm_.pc_ == address_ + size_);
+  DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
+}
+
+
+void CodePatcher::Emit(Instr instr) { masm()->emit(instr); }
+
+
+void CodePatcher::EmitCondition(Condition cond) {
+  Instr instr = Assembler::instr_at(masm_.pc_);
+  switch (cond) {
+    case eq:
+      instr = (instr & ~kCondMask) | BT;
+      break;
+    case ne:
+      instr = (instr & ~kCondMask) | BF;
+      break;
+    default:
+      UNIMPLEMENTED();
+  }
+  masm_.emit(instr);
+}
+
+
+void MacroAssembler::TruncatingDiv(Register result, Register dividend,
+                                   int32_t divisor) {
+  DCHECK(!dividend.is(result));
+  DCHECK(!dividend.is(r0));
+  DCHECK(!result.is(r0));
+  base::MagicNumbersForDivision<uint32_t> mag =
+      base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
+  mov(r0, Operand(mag.multiplier));
+  mulhw(result, dividend, r0);
+  bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
+  if (divisor > 0 && neg) {
+    add(result, result, dividend);
+  }
+  if (divisor < 0 && !neg && mag.multiplier > 0) {
+    sub(result, result, dividend);
+  }
+  if (mag.shift > 0) srawi(result, result, mag.shift);
+  ExtractBit(r0, dividend, 31);
+  add(result, result, r0);
+}
+
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_TARGET_ARCH_PPC
diff --git a/deps/v8/src/ppc/macro-assembler-ppc.h b/deps/v8/src/ppc/macro-assembler-ppc.h
new file mode 100644 (file)
index 0000000..8f1aeab
--- /dev/null
@@ -0,0 +1,1554 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_PPC_MACRO_ASSEMBLER_PPC_H_
+#define V8_PPC_MACRO_ASSEMBLER_PPC_H_
+
+#include "src/assembler.h"
+#include "src/bailout-reason.h"
+#include "src/frames.h"
+#include "src/globals.h"
+
+namespace v8 {
+namespace internal {
+
+// ----------------------------------------------------------------------------
+// Static helper functions
+
+// Generate a MemOperand for loading a field from an object.
+inline MemOperand FieldMemOperand(Register object, int offset) {
+  return MemOperand(object, offset - kHeapObjectTag);
+}
+
+
+// Flags used for AllocateHeapNumber
+enum TaggingMode {
+  // Tag the result.
+  TAG_RESULT,
+  // Don't tag
+  DONT_TAG_RESULT
+};
+
+
+enum RememberedSetAction { EMIT_REMEMBERED_SET, OMIT_REMEMBERED_SET };
+enum SmiCheck { INLINE_SMI_CHECK, OMIT_SMI_CHECK };
+enum PointersToHereCheck {
+  kPointersToHereMaybeInteresting,
+  kPointersToHereAreAlwaysInteresting
+};
+enum LinkRegisterStatus { kLRHasNotBeenSaved, kLRHasBeenSaved };
+
+
+Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2 = no_reg,
+                                   Register reg3 = no_reg,
+                                   Register reg4 = no_reg,
+                                   Register reg5 = no_reg,
+                                   Register reg6 = no_reg);
+
+
+#ifdef DEBUG
+bool AreAliased(Register reg1, Register reg2, Register reg3 = no_reg,
+                Register reg4 = no_reg, Register reg5 = no_reg,
+                Register reg6 = no_reg, Register reg7 = no_reg,
+                Register reg8 = no_reg);
+#endif
+
+// These exist to provide portability between 32 and 64bit
+#if V8_TARGET_ARCH_PPC64
+#define LoadPU ldu
+#define LoadPX ldx
+#define LoadPUX ldux
+#define StorePU stdu
+#define StorePX stdx
+#define StorePUX stdux
+#define ShiftLeftImm sldi
+#define ShiftRightImm srdi
+#define ClearLeftImm clrldi
+#define ClearRightImm clrrdi
+#define ShiftRightArithImm sradi
+#define ShiftLeft_ sld
+#define ShiftRight_ srd
+#define ShiftRightArith srad
+#define Mul mulld
+#define Div divd
+#else
+#define LoadPU lwzu
+#define LoadPX lwzx
+#define LoadPUX lwzux
+#define StorePU stwu
+#define StorePX stwx
+#define StorePUX stwux
+#define ShiftLeftImm slwi
+#define ShiftRightImm srwi
+#define ClearLeftImm clrlwi
+#define ClearRightImm clrrwi
+#define ShiftRightArithImm srawi
+#define ShiftLeft_ slw
+#define ShiftRight_ srw
+#define ShiftRightArith sraw
+#define Mul mullw
+#define Div divw
+#endif
+
+
+// MacroAssembler implements a collection of frequently used macros.
+class MacroAssembler : public Assembler {
+ public:
+  // The isolate parameter can be NULL if the macro assembler should
+  // not use isolate-dependent functionality. In this case, it's the
+  // responsibility of the caller to never invoke such function on the
+  // macro assembler.
+  MacroAssembler(Isolate* isolate, void* buffer, int size);
+
+
+  // Returns the size of a call in instructions. Note, the value returned is
+  // only valid as long as no entries are added to the constant pool between
+  // checking the call size and emitting the actual call.
+  static int CallSize(Register target);
+  int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al);
+  static int CallSizeNotPredictableCodeSize(Address target,
+                                            RelocInfo::Mode rmode,
+                                            Condition cond = al);
+
+  // Jump, Call, and Ret pseudo instructions implementing inter-working.
+  void Jump(Register target);
+  void JumpToJSEntry(Register target);
+  void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al,
+            CRegister cr = cr7);
+  void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
+  void Call(Register target);
+  void CallJSEntry(Register target);
+  void Call(Address target, RelocInfo::Mode rmode, Condition cond = al);
+  int CallSize(Handle<Code> code,
+               RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
+               TypeFeedbackId ast_id = TypeFeedbackId::None(),
+               Condition cond = al);
+  void Call(Handle<Code> code, RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
+            TypeFeedbackId ast_id = TypeFeedbackId::None(),
+            Condition cond = al);
+  void Ret(Condition cond = al);
+
+  // Emit code to discard a non-negative number of pointer-sized elements
+  // from the stack, clobbering only the sp register.
+  void Drop(int count, Condition cond = al);
+
+  void Ret(int drop, Condition cond = al);
+
+  void Call(Label* target);
+
+  // Emit call to the code we are currently generating.
+  void CallSelf() {
+    Handle<Code> self(reinterpret_cast<Code**>(CodeObject().location()));
+    Call(self, RelocInfo::CODE_TARGET);
+  }
+
+  // Register move. May do nothing if the registers are identical.
+  void Move(Register dst, Handle<Object> value);
+  void Move(Register dst, Register src, Condition cond = al);
+  void Move(DoubleRegister dst, DoubleRegister src);
+
+  void MultiPush(RegList regs);
+  void MultiPop(RegList regs);
+
+  // Load an object from the root table.
+  void LoadRoot(Register destination, Heap::RootListIndex index,
+                Condition cond = al);
+  // Store an object to the root table.
+  void StoreRoot(Register source, Heap::RootListIndex index,
+                 Condition cond = al);
+
+  // ---------------------------------------------------------------------------
+  // GC Support
+
+  void IncrementalMarkingRecordWriteHelper(Register object, Register value,
+                                           Register address);
+
+  enum RememberedSetFinalAction { kReturnAtEnd, kFallThroughAtEnd };
+
+  // Record in the remembered set the fact that we have a pointer to new space
+  // at the address pointed to by the addr register.  Only works if addr is not
+  // in new space.
+  void RememberedSetHelper(Register object,  // Used for debug code.
+                           Register addr, Register scratch,
+                           SaveFPRegsMode save_fp,
+                           RememberedSetFinalAction and_then);
+
+  void CheckPageFlag(Register object, Register scratch, int mask, Condition cc,
+                     Label* condition_met);
+
+  void CheckMapDeprecated(Handle<Map> map, Register scratch,
+                          Label* if_deprecated);
+
+  // Check if object is in new space.  Jumps if the object is not in new space.
+  // The register scratch can be object itself, but scratch will be clobbered.
+  void JumpIfNotInNewSpace(Register object, Register scratch, Label* branch) {
+    InNewSpace(object, scratch, ne, branch);
+  }
+
+  // Check if object is in new space.  Jumps if the object is in new space.
+  // The register scratch can be object itself, but it will be clobbered.
+  void JumpIfInNewSpace(Register object, Register scratch, Label* branch) {
+    InNewSpace(object, scratch, eq, branch);
+  }
+
+  // Check if an object has a given incremental marking color.
+  void HasColor(Register object, Register scratch0, Register scratch1,
+                Label* has_color, int first_bit, int second_bit);
+
+  void JumpIfBlack(Register object, Register scratch0, Register scratch1,
+                   Label* on_black);
+
+  // Checks the color of an object.  If the object is already grey or black
+  // then we just fall through, since it is already live.  If it is white and
+  // we can determine that it doesn't need to be scanned, then we just mark it
+  // black and fall through.  For the rest we jump to the label so the
+  // incremental marker can fix its assumptions.
+  void EnsureNotWhite(Register object, Register scratch1, Register scratch2,
+                      Register scratch3, Label* object_is_white_and_not_data);
+
+  // Detects conservatively whether an object is data-only, i.e. it does need to
+  // be scanned by the garbage collector.
+  void JumpIfDataObject(Register value, Register scratch,
+                        Label* not_data_object);
+
+  // Notify the garbage collector that we wrote a pointer into an object.
+  // |object| is the object being stored into, |value| is the object being
+  // stored.  value and scratch registers are clobbered by the operation.
+  // The offset is the offset from the start of the object, not the offset from
+  // the tagged HeapObject pointer.  For use with FieldOperand(reg, off).
+  void RecordWriteField(
+      Register object, int offset, Register value, Register scratch,
+      LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
+      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+      SmiCheck smi_check = INLINE_SMI_CHECK,
+      PointersToHereCheck pointers_to_here_check_for_value =
+          kPointersToHereMaybeInteresting);
+
+  // As above, but the offset has the tag presubtracted.  For use with
+  // MemOperand(reg, off).
+  inline void RecordWriteContextSlot(
+      Register context, int offset, Register value, Register scratch,
+      LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
+      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+      SmiCheck smi_check = INLINE_SMI_CHECK,
+      PointersToHereCheck pointers_to_here_check_for_value =
+          kPointersToHereMaybeInteresting) {
+    RecordWriteField(context, offset + kHeapObjectTag, value, scratch,
+                     lr_status, save_fp, remembered_set_action, smi_check,
+                     pointers_to_here_check_for_value);
+  }
+
+  void RecordWriteForMap(Register object, Register map, Register dst,
+                         LinkRegisterStatus lr_status, SaveFPRegsMode save_fp);
+
+  // For a given |object| notify the garbage collector that the slot |address|
+  // has been written.  |value| is the object being stored. The value and
+  // address registers are clobbered by the operation.
+  void RecordWrite(
+      Register object, Register address, Register value,
+      LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
+      RememberedSetAction remembered_set_action = EMIT_REMEMBERED_SET,
+      SmiCheck smi_check = INLINE_SMI_CHECK,
+      PointersToHereCheck pointers_to_here_check_for_value =
+          kPointersToHereMaybeInteresting);
+
+  void Push(Register src) { push(src); }
+
+  // Push a handle.
+  void Push(Handle<Object> handle);
+  void Push(Smi* smi) { Push(Handle<Smi>(smi, isolate())); }
+
+  // Push two registers.  Pushes leftmost register first (to highest address).
+  void Push(Register src1, Register src2) {
+    StorePU(src2, MemOperand(sp, -2 * kPointerSize));
+    StoreP(src1, MemOperand(sp, kPointerSize));
+  }
+
+  // Push three registers.  Pushes leftmost register first (to highest address).
+  void Push(Register src1, Register src2, Register src3) {
+    StorePU(src3, MemOperand(sp, -3 * kPointerSize));
+    StoreP(src2, MemOperand(sp, kPointerSize));
+    StoreP(src1, MemOperand(sp, 2 * kPointerSize));
+  }
+
+  // Push four registers.  Pushes leftmost register first (to highest address).
+  void Push(Register src1, Register src2, Register src3, Register src4) {
+    StorePU(src4, MemOperand(sp, -4 * kPointerSize));
+    StoreP(src3, MemOperand(sp, kPointerSize));
+    StoreP(src2, MemOperand(sp, 2 * kPointerSize));
+    StoreP(src1, MemOperand(sp, 3 * kPointerSize));
+  }
+
+  // Push five registers.  Pushes leftmost register first (to highest address).
+  void Push(Register src1, Register src2, Register src3, Register src4,
+            Register src5) {
+    StorePU(src5, MemOperand(sp, -5 * kPointerSize));
+    StoreP(src4, MemOperand(sp, kPointerSize));
+    StoreP(src3, MemOperand(sp, 2 * kPointerSize));
+    StoreP(src2, MemOperand(sp, 3 * kPointerSize));
+    StoreP(src1, MemOperand(sp, 4 * kPointerSize));
+  }
+
+  void Pop(Register dst) { pop(dst); }
+
+  // Pop two registers. Pops rightmost register first (from lower address).
+  void Pop(Register src1, Register src2) {
+    LoadP(src2, MemOperand(sp, 0));
+    LoadP(src1, MemOperand(sp, kPointerSize));
+    addi(sp, sp, Operand(2 * kPointerSize));
+  }
+
+  // Pop three registers.  Pops rightmost register first (from lower address).
+  void Pop(Register src1, Register src2, Register src3) {
+    LoadP(src3, MemOperand(sp, 0));
+    LoadP(src2, MemOperand(sp, kPointerSize));
+    LoadP(src1, MemOperand(sp, 2 * kPointerSize));
+    addi(sp, sp, Operand(3 * kPointerSize));
+  }
+
+  // Pop four registers.  Pops rightmost register first (from lower address).
+  void Pop(Register src1, Register src2, Register src3, Register src4) {
+    LoadP(src4, MemOperand(sp, 0));
+    LoadP(src3, MemOperand(sp, kPointerSize));
+    LoadP(src2, MemOperand(sp, 2 * kPointerSize));
+    LoadP(src1, MemOperand(sp, 3 * kPointerSize));
+    addi(sp, sp, Operand(4 * kPointerSize));
+  }
+
+  // Pop five registers.  Pops rightmost register first (from lower address).
+  void Pop(Register src1, Register src2, Register src3, Register src4,
+           Register src5) {
+    LoadP(src5, MemOperand(sp, 0));
+    LoadP(src4, MemOperand(sp, kPointerSize));
+    LoadP(src3, MemOperand(sp, 2 * kPointerSize));
+    LoadP(src2, MemOperand(sp, 3 * kPointerSize));
+    LoadP(src1, MemOperand(sp, 4 * kPointerSize));
+    addi(sp, sp, Operand(5 * kPointerSize));
+  }
+
+  // Push a fixed frame, consisting of lr, fp, context and
+  // JS function / marker id if marker_reg is a valid register.
+  void PushFixedFrame(Register marker_reg = no_reg);
+  void PopFixedFrame(Register marker_reg = no_reg);
+
+  // Push and pop the registers that can hold pointers, as defined by the
+  // RegList constant kSafepointSavedRegisters.
+  void PushSafepointRegisters();
+  void PopSafepointRegisters();
+  // Store value in register src in the safepoint stack slot for
+  // register dst.
+  void StoreToSafepointRegisterSlot(Register src, Register dst);
+  // Load the value of the src register from its safepoint stack slot
+  // into register dst.
+  void LoadFromSafepointRegisterSlot(Register dst, Register src);
+
+  // Flush the I-cache from asm code. You should use CpuFeatures::FlushICache
+  // from C.
+  // Does not handle errors.
+  void FlushICache(Register address, size_t size, Register scratch);
+
+  // If the value is a NaN, canonicalize the value else, do nothing.
+  void CanonicalizeNaN(const DoubleRegister dst, const DoubleRegister src);
+  void CanonicalizeNaN(const DoubleRegister value) {
+    CanonicalizeNaN(value, value);
+  }
+
+  // Converts the integer (untagged smi) in |src| to a double, storing
+  // the result to |double_dst|
+  void ConvertIntToDouble(Register src, DoubleRegister double_dst);
+
+  // Converts the unsigned integer (untagged smi) in |src| to
+  // a double, storing the result to |double_dst|
+  void ConvertUnsignedIntToDouble(Register src, DoubleRegister double_dst);
+
+  // Converts the integer (untagged smi) in |src| to
+  // a float, storing the result in |dst|
+  // Warning: The value in |int_scrach| will be changed in the process!
+  void ConvertIntToFloat(const DoubleRegister dst, const Register src,
+                         const Register int_scratch);
+
+  // Converts the double_input to an integer.  Note that, upon return,
+  // the contents of double_dst will also hold the fixed point representation.
+  void ConvertDoubleToInt64(const DoubleRegister double_input,
+#if !V8_TARGET_ARCH_PPC64
+                            const Register dst_hi,
+#endif
+                            const Register dst, const DoubleRegister double_dst,
+                            FPRoundingMode rounding_mode = kRoundToZero);
+
+  // Generates function and stub prologue code.
+  void StubPrologue(int prologue_offset = 0);
+  void Prologue(bool code_pre_aging, int prologue_offset = 0);
+
+  // Enter exit frame.
+  // stack_space - extra stack space, used for alignment before call to C.
+  void EnterExitFrame(bool save_doubles, int stack_space = 0);
+
+  // Leave the current exit frame. Expects the return value in r0.
+  // Expect the number of values, pushed prior to the exit frame, to
+  // remove in a register (or no_reg, if there is nothing to remove).
+  void LeaveExitFrame(bool save_doubles, Register argument_count,
+                      bool restore_context);
+
+  // Get the actual activation frame alignment for target environment.
+  static int ActivationFrameAlignment();
+
+  void LoadContext(Register dst, int context_chain_length);
+
+  // Conditionally load the cached Array transitioned map of type
+  // transitioned_kind from the native context if the map in register
+  // map_in_out is the cached Array map in the native context of
+  // expected_kind.
+  void LoadTransitionedArrayMapConditional(ElementsKind expected_kind,
+                                           ElementsKind transitioned_kind,
+                                           Register map_in_out,
+                                           Register scratch,
+                                           Label* no_map_match);
+
+  void LoadGlobalFunction(int index, Register function);
+
+  // Load the initial map from the global function. The registers
+  // function and map can be the same, function is then overwritten.
+  void LoadGlobalFunctionInitialMap(Register function, Register map,
+                                    Register scratch);
+
+  void InitializeRootRegister() {
+    ExternalReference roots_array_start =
+        ExternalReference::roots_array_start(isolate());
+    mov(kRootRegister, Operand(roots_array_start));
+  }
+
+  // ----------------------------------------------------------------
+  // new PPC macro-assembler interfaces that are slightly higher level
+  // than assembler-ppc and may generate variable length sequences
+
+  // load a literal signed int value <value> to GPR <dst>
+  void LoadIntLiteral(Register dst, int value);
+
+  // load an SMI value <value> to GPR <dst>
+  void LoadSmiLiteral(Register dst, Smi* smi);
+
+  // load a literal double value <value> to FPR <result>
+  void LoadDoubleLiteral(DoubleRegister result, double value, Register scratch);
+
+  void LoadWord(Register dst, const MemOperand& mem, Register scratch);
+
+  void LoadWordArith(Register dst, const MemOperand& mem,
+                     Register scratch = no_reg);
+
+  void StoreWord(Register src, const MemOperand& mem, Register scratch);
+
+  void LoadHalfWord(Register dst, const MemOperand& mem, Register scratch);
+
+  void StoreHalfWord(Register src, const MemOperand& mem, Register scratch);
+
+  void LoadByte(Register dst, const MemOperand& mem, Register scratch);
+
+  void StoreByte(Register src, const MemOperand& mem, Register scratch);
+
+  void LoadRepresentation(Register dst, const MemOperand& mem, Representation r,
+                          Register scratch = no_reg);
+
+  void StoreRepresentation(Register src, const MemOperand& mem,
+                           Representation r, Register scratch = no_reg);
+
+  // Move values between integer and floating point registers.
+  void MovIntToDouble(DoubleRegister dst, Register src, Register scratch);
+  void MovUnsignedIntToDouble(DoubleRegister dst, Register src,
+                              Register scratch);
+  void MovInt64ToDouble(DoubleRegister dst,
+#if !V8_TARGET_ARCH_PPC64
+                        Register src_hi,
+#endif
+                        Register src);
+#if V8_TARGET_ARCH_PPC64
+  void MovInt64ComponentsToDouble(DoubleRegister dst, Register src_hi,
+                                  Register src_lo, Register scratch);
+#endif
+  void MovDoubleLowToInt(Register dst, DoubleRegister src);
+  void MovDoubleHighToInt(Register dst, DoubleRegister src);
+  void MovDoubleToInt64(
+#if !V8_TARGET_ARCH_PPC64
+      Register dst_hi,
+#endif
+      Register dst, DoubleRegister src);
+
+  void Add(Register dst, Register src, intptr_t value, Register scratch);
+  void Cmpi(Register src1, const Operand& src2, Register scratch,
+            CRegister cr = cr7);
+  void Cmpli(Register src1, const Operand& src2, Register scratch,
+             CRegister cr = cr7);
+  void Cmpwi(Register src1, const Operand& src2, Register scratch,
+             CRegister cr = cr7);
+  void Cmplwi(Register src1, const Operand& src2, Register scratch,
+              CRegister cr = cr7);
+  void And(Register ra, Register rs, const Operand& rb, RCBit rc = LeaveRC);
+  void Or(Register ra, Register rs, const Operand& rb, RCBit rc = LeaveRC);
+  void Xor(Register ra, Register rs, const Operand& rb, RCBit rc = LeaveRC);
+
+  void AddSmiLiteral(Register dst, Register src, Smi* smi, Register scratch);
+  void SubSmiLiteral(Register dst, Register src, Smi* smi, Register scratch);
+  void CmpSmiLiteral(Register src1, Smi* smi, Register scratch,
+                     CRegister cr = cr7);
+  void CmplSmiLiteral(Register src1, Smi* smi, Register scratch,
+                      CRegister cr = cr7);
+  void AndSmiLiteral(Register dst, Register src, Smi* smi, Register scratch,
+                     RCBit rc = LeaveRC);
+
+  // Set new rounding mode RN to FPSCR
+  void SetRoundingMode(FPRoundingMode RN);
+
+  // reset rounding mode to default (kRoundToNearest)
+  void ResetRoundingMode();
+
+  // These exist to provide portability between 32 and 64bit
+  void LoadP(Register dst, const MemOperand& mem, Register scratch = no_reg);
+  void StoreP(Register src, const MemOperand& mem, Register scratch = no_reg);
+
+  // ---------------------------------------------------------------------------
+  // JavaScript invokes
+
+  // Invoke the JavaScript function code by either calling or jumping.
+  void InvokeCode(Register code, const ParameterCount& expected,
+                  const ParameterCount& actual, InvokeFlag flag,
+                  const CallWrapper& call_wrapper);
+
+  // Invoke the JavaScript function in the given register. Changes the
+  // current context to the context in the function before invoking.
+  void InvokeFunction(Register function, const ParameterCount& actual,
+                      InvokeFlag flag, const CallWrapper& call_wrapper);
+
+  void InvokeFunction(Register function, const ParameterCount& expected,
+                      const ParameterCount& actual, InvokeFlag flag,
+                      const CallWrapper& call_wrapper);
+
+  void InvokeFunction(Handle<JSFunction> function,
+                      const ParameterCount& expected,
+                      const ParameterCount& actual, InvokeFlag flag,
+                      const CallWrapper& call_wrapper);
+
+  void IsObjectJSObjectType(Register heap_object, Register map,
+                            Register scratch, Label* fail);
+
+  void IsInstanceJSObjectType(Register map, Register scratch, Label* fail);
+
+  void IsObjectJSStringType(Register object, Register scratch, Label* fail);
+
+  void IsObjectNameType(Register object, Register scratch, Label* fail);
+
+  // ---------------------------------------------------------------------------
+  // Debugger Support
+
+  void DebugBreak();
+
+  // ---------------------------------------------------------------------------
+  // Exception handling
+
+  // Push a new try handler and link into try handler chain.
+  void PushTryHandler(StackHandler::Kind kind, int handler_index);
+
+  // Unlink the stack handler on top of the stack from the try handler chain.
+  // Must preserve the result register.
+  void PopTryHandler();
+
+  // Passes thrown value to the handler of top of the try handler chain.
+  void Throw(Register value);
+
+  // Propagates an uncatchable exception to the top of the current JS stack's
+  // handler chain.
+  void ThrowUncatchable(Register value);
+
+  // ---------------------------------------------------------------------------
+  // Inline caching support
+
+  // Generate code for checking access rights - used for security checks
+  // on access to global objects across environments. The holder register
+  // is left untouched, whereas both scratch registers are clobbered.
+  void CheckAccessGlobalProxy(Register holder_reg, Register scratch,
+                              Label* miss);
+
+  void GetNumberHash(Register t0, Register scratch);
+
+  void LoadFromNumberDictionary(Label* miss, Register elements, Register key,
+                                Register result, Register t0, Register t1,
+                                Register t2);
+
+
+  inline void MarkCode(NopMarkerTypes type) { nop(type); }
+
+  // Check if the given instruction is a 'type' marker.
+  // i.e. check if is is a mov r<type>, r<type> (referenced as nop(type))
+  // These instructions are generated to mark special location in the code,
+  // like some special IC code.
+  static inline bool IsMarkedCode(Instr instr, int type) {
+    DCHECK((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER));
+    return IsNop(instr, type);
+  }
+
+
+  static inline int GetCodeMarker(Instr instr) {
+    int dst_reg_offset = 12;
+    int dst_mask = 0xf << dst_reg_offset;
+    int src_mask = 0xf;
+    int dst_reg = (instr & dst_mask) >> dst_reg_offset;
+    int src_reg = instr & src_mask;
+    uint32_t non_register_mask = ~(dst_mask | src_mask);
+    uint32_t mov_mask = al | 13 << 21;
+
+    // Return <n> if we have a mov rn rn, else return -1.
+    int type = ((instr & non_register_mask) == mov_mask) &&
+                       (dst_reg == src_reg) && (FIRST_IC_MARKER <= dst_reg) &&
+                       (dst_reg < LAST_CODE_MARKER)
+                   ? src_reg
+                   : -1;
+    DCHECK((type == -1) ||
+           ((FIRST_IC_MARKER <= type) && (type < LAST_CODE_MARKER)));
+    return type;
+  }
+
+
+  // ---------------------------------------------------------------------------
+  // Allocation support
+
+  // Allocate an object in new space or old pointer space. The object_size is
+  // specified either in bytes or in words if the allocation flag SIZE_IN_WORDS
+  // is passed. If the space is exhausted control continues at the gc_required
+  // label. The allocated object is returned in result. If the flag
+  // tag_allocated_object is true the result is tagged as as a heap object.
+  // All registers are clobbered also when control continues at the gc_required
+  // label.
+  void Allocate(int object_size, Register result, Register scratch1,
+                Register scratch2, Label* gc_required, AllocationFlags flags);
+
+  void Allocate(Register object_size, Register result, Register scratch1,
+                Register scratch2, Label* gc_required, AllocationFlags flags);
+
+  // Undo allocation in new space. The object passed and objects allocated after
+  // it will no longer be allocated. The caller must make sure that no pointers
+  // are left to the object(s) no longer allocated as they would be invalid when
+  // allocation is undone.
+  void UndoAllocationInNewSpace(Register object, Register scratch);
+
+
+  void AllocateTwoByteString(Register result, Register length,
+                             Register scratch1, Register scratch2,
+                             Register scratch3, Label* gc_required);
+  void AllocateOneByteString(Register result, Register length,
+                             Register scratch1, Register scratch2,
+                             Register scratch3, Label* gc_required);
+  void AllocateTwoByteConsString(Register result, Register length,
+                                 Register scratch1, Register scratch2,
+                                 Label* gc_required);
+  void AllocateOneByteConsString(Register result, Register length,
+                                 Register scratch1, Register scratch2,
+                                 Label* gc_required);
+  void AllocateTwoByteSlicedString(Register result, Register length,
+                                   Register scratch1, Register scratch2,
+                                   Label* gc_required);
+  void AllocateOneByteSlicedString(Register result, Register length,
+                                   Register scratch1, Register scratch2,
+                                   Label* gc_required);
+
+  // Allocates a heap number or jumps to the gc_required label if the young
+  // space is full and a scavenge is needed. All registers are clobbered also
+  // when control continues at the gc_required label.
+  void AllocateHeapNumber(Register result, Register scratch1, Register scratch2,
+                          Register heap_number_map, Label* gc_required,
+                          TaggingMode tagging_mode = TAG_RESULT,
+                          MutableMode mode = IMMUTABLE);
+  void AllocateHeapNumberWithValue(Register result, DoubleRegister value,
+                                   Register scratch1, Register scratch2,
+                                   Register heap_number_map,
+                                   Label* gc_required);
+
+  // Copies a fixed number of fields of heap objects from src to dst.
+  void CopyFields(Register dst, Register src, RegList temps, int field_count);
+
+  // Copies a number of bytes from src to dst. All registers are clobbered. On
+  // exit src and dst will point to the place just after where the last byte was
+  // read or written and length will be zero.
+  void CopyBytes(Register src, Register dst, Register length, Register scratch);
+
+  // Initialize fields with filler values.  |count| fields starting at
+  // |start_offset| are overwritten with the value in |filler|.  At the end the
+  // loop, |start_offset| points at the next uninitialized field.  |count| is
+  // assumed to be non-zero.
+  void InitializeNFieldsWithFiller(Register start_offset, Register count,
+                                   Register filler);
+
+  // Initialize fields with filler values.  Fields starting at |start_offset|
+  // not including end_offset are overwritten with the value in |filler|.  At
+  // the end the loop, |start_offset| takes the value of |end_offset|.
+  void InitializeFieldsWithFiller(Register start_offset, Register end_offset,
+                                  Register filler);
+
+  // ---------------------------------------------------------------------------
+  // Support functions.
+
+  // Try to get function prototype of a function and puts the value in
+  // the result register. Checks that the function really is a
+  // function and jumps to the miss label if the fast checks fail. The
+  // function register will be untouched; the other registers may be
+  // clobbered.
+  void TryGetFunctionPrototype(Register function, Register result,
+                               Register scratch, Label* miss,
+                               bool miss_on_bound_function = false);
+
+  // Compare object type for heap object.  heap_object contains a non-Smi
+  // whose object type should be compared with the given type.  This both
+  // sets the flags and leaves the object type in the type_reg register.
+  // It leaves the map in the map register (unless the type_reg and map register
+  // are the same register).  It leaves the heap object in the heap_object
+  // register unless the heap_object register is the same register as one of the
+  // other registers.
+  // Type_reg can be no_reg. In that case ip is used.
+  void CompareObjectType(Register heap_object, Register map, Register type_reg,
+                         InstanceType type);
+
+  // Compare object type for heap object. Branch to false_label if type
+  // is lower than min_type or greater than max_type.
+  // Load map into the register map.
+  void CheckObjectTypeRange(Register heap_object, Register map,
+                            InstanceType min_type, InstanceType max_type,
+                            Label* false_label);
+
+  // Compare instance type in a map.  map contains a valid map object whose
+  // object type should be compared with the given type.  This both
+  // sets the flags and leaves the object type in the type_reg register.
+  void CompareInstanceType(Register map, Register type_reg, InstanceType type);
+
+
+  // Check if a map for a JSObject indicates that the object has fast elements.
+  // Jump to the specified label if it does not.
+  void CheckFastElements(Register map, Register scratch, Label* fail);
+
+  // Check if a map for a JSObject indicates that the object can have both smi
+  // and HeapObject elements.  Jump to the specified label if it does not.
+  void CheckFastObjectElements(Register map, Register scratch, Label* fail);
+
+  // Check if a map for a JSObject indicates that the object has fast smi only
+  // elements.  Jump to the specified label if it does not.
+  void CheckFastSmiElements(Register map, Register scratch, Label* fail);
+
+  // Check to see if maybe_number can be stored as a double in
+  // FastDoubleElements. If it can, store it at the index specified by key in
+  // the FastDoubleElements array elements. Otherwise jump to fail.
+  void StoreNumberToDoubleElements(Register value_reg, Register key_reg,
+                                   Register elements_reg, Register scratch1,
+                                   DoubleRegister double_scratch, Label* fail,
+                                   int elements_offset = 0);
+
+  // Compare an object's map with the specified map and its transitioned
+  // elements maps if mode is ALLOW_ELEMENT_TRANSITION_MAPS. Condition flags are
+  // set with result of map compare. If multiple map compares are required, the
+  // compare sequences branches to early_success.
+  void CompareMap(Register obj, Register scratch, Handle<Map> map,
+                  Label* early_success);
+
+  // As above, but the map of the object is already loaded into the register
+  // which is preserved by the code generated.
+  void CompareMap(Register obj_map, Handle<Map> map, Label* early_success);
+
+  // Check if the map of an object is equal to a specified map and branch to
+  // label if not. Skip the smi check if not required (object is known to be a
+  // heap object). If mode is ALLOW_ELEMENT_TRANSITION_MAPS, then also match
+  // against maps that are ElementsKind transition maps of the specified map.
+  void CheckMap(Register obj, Register scratch, Handle<Map> map, Label* fail,
+                SmiCheckType smi_check_type);
+
+
+  void CheckMap(Register obj, Register scratch, Heap::RootListIndex index,
+                Label* fail, SmiCheckType smi_check_type);
+
+
+  // Check if the map of an object is equal to a specified map and branch to a
+  // specified target if equal. Skip the smi check if not required (object is
+  // known to be a heap object)
+  void DispatchMap(Register obj, Register scratch, Handle<Map> map,
+                   Handle<Code> success, SmiCheckType smi_check_type);
+
+
+  // Compare the object in a register to a value from the root list.
+  // Uses the ip register as scratch.
+  void CompareRoot(Register obj, Heap::RootListIndex index);
+
+
+  // Load and check the instance type of an object for being a string.
+  // Loads the type into the second argument register.
+  // Returns a condition that will be enabled if the object was a string.
+  Condition IsObjectStringType(Register obj, Register type) {
+    LoadP(type, FieldMemOperand(obj, HeapObject::kMapOffset));
+    lbz(type, FieldMemOperand(type, Map::kInstanceTypeOffset));
+    andi(r0, type, Operand(kIsNotStringMask));
+    DCHECK_EQ(0, kStringTag);
+    return eq;
+  }
+
+
+  // Picks out an array index from the hash field.
+  // Register use:
+  //   hash - holds the index's hash. Clobbered.
+  //   index - holds the overwritten index on exit.
+  void IndexFromHash(Register hash, Register index);
+
+  // Get the number of least significant bits from a register
+  void GetLeastBitsFromSmi(Register dst, Register src, int num_least_bits);
+  void GetLeastBitsFromInt32(Register dst, Register src, int mun_least_bits);
+
+  // Load the value of a smi object into a double register.
+  void SmiToDouble(DoubleRegister value, Register smi);
+
+  // Check if a double can be exactly represented as a signed 32-bit integer.
+  // CR_EQ in cr7 is set if true.
+  void TestDoubleIsInt32(DoubleRegister double_input, Register scratch1,
+                         Register scratch2, DoubleRegister double_scratch);
+
+  // Try to convert a double to a signed 32-bit integer.
+  // CR_EQ in cr7 is set and result assigned if the conversion is exact.
+  void TryDoubleToInt32Exact(Register result, DoubleRegister double_input,
+                             Register scratch, DoubleRegister double_scratch);
+
+  // Floor a double and writes the value to the result register.
+  // Go to exact if the conversion is exact (to be able to test -0),
+  // fall through calling code if an overflow occurred, else go to done.
+  // In return, input_high is loaded with high bits of input.
+  void TryInt32Floor(Register result, DoubleRegister double_input,
+                     Register input_high, Register scratch,
+                     DoubleRegister double_scratch, Label* done, Label* exact);
+
+  // Performs a truncating conversion of a floating point number as used by
+  // the JS bitwise operations. See ECMA-262 9.5: ToInt32. Goes to 'done' if it
+  // succeeds, otherwise falls through if result is saturated. On return
+  // 'result' either holds answer, or is clobbered on fall through.
+  //
+  // Only public for the test code in test-code-stubs-arm.cc.
+  void TryInlineTruncateDoubleToI(Register result, DoubleRegister input,
+                                  Label* done);
+
+  // Performs a truncating conversion of a floating point number as used by
+  // the JS bitwise operations. See ECMA-262 9.5: ToInt32.
+  // Exits with 'result' holding the answer.
+  void TruncateDoubleToI(Register result, DoubleRegister double_input);
+
+  // Performs a truncating conversion of a heap number as used by
+  // the JS bitwise operations. See ECMA-262 9.5: ToInt32. 'result' and 'input'
+  // must be different registers.  Exits with 'result' holding the answer.
+  void TruncateHeapNumberToI(Register result, Register object);
+
+  // Converts the smi or heap number in object to an int32 using the rules
+  // for ToInt32 as described in ECMAScript 9.5.: the value is truncated
+  // and brought into the range -2^31 .. +2^31 - 1. 'result' and 'input' must be
+  // different registers.
+  void TruncateNumberToI(Register object, Register result,
+                         Register heap_number_map, Register scratch1,
+                         Label* not_int32);
+
+  // Overflow handling functions.
+  // Usage: call the appropriate arithmetic function and then call one of the
+  // flow control functions with the corresponding label.
+
+  // Compute dst = left + right, setting condition codes. dst may be same as
+  // either left or right (or a unique register). left and right must not be
+  // the same register.
+  void AddAndCheckForOverflow(Register dst, Register left, Register right,
+                              Register overflow_dst, Register scratch = r0);
+  void AddAndCheckForOverflow(Register dst, Register left, intptr_t right,
+                              Register overflow_dst, Register scratch = r0);
+
+  // Compute dst = left - right, setting condition codes. dst may be same as
+  // either left or right (or a unique register). left and right must not be
+  // the same register.
+  void SubAndCheckForOverflow(Register dst, Register left, Register right,
+                              Register overflow_dst, Register scratch = r0);
+
+  void BranchOnOverflow(Label* label) { blt(label, cr0); }
+
+  void BranchOnNoOverflow(Label* label) { bge(label, cr0); }
+
+  void RetOnOverflow(void) {
+    Label label;
+
+    blt(&label, cr0);
+    Ret();
+    bind(&label);
+  }
+
+  void RetOnNoOverflow(void) {
+    Label label;
+
+    bge(&label, cr0);
+    Ret();
+    bind(&label);
+  }
+
+  // Pushes <count> double values to <location>, starting from d<first>.
+  void SaveFPRegs(Register location, int first, int count);
+
+  // Pops <count> double values from <location>, starting from d<first>.
+  void RestoreFPRegs(Register location, int first, int count);
+
+  // ---------------------------------------------------------------------------
+  // Runtime calls
+
+  // Call a code stub.
+  void CallStub(CodeStub* stub, TypeFeedbackId ast_id = TypeFeedbackId::None(),
+                Condition cond = al);
+
+  // Call a code stub.
+  void TailCallStub(CodeStub* stub, Condition cond = al);
+
+  // Call a runtime routine.
+  void CallRuntime(const Runtime::Function* f, int num_arguments,
+                   SaveFPRegsMode save_doubles = kDontSaveFPRegs);
+  void CallRuntimeSaveDoubles(Runtime::FunctionId id) {
+    const Runtime::Function* function = Runtime::FunctionForId(id);
+    CallRuntime(function, function->nargs, kSaveFPRegs);
+  }
+
+  // Convenience function: Same as above, but takes the fid instead.
+  void CallRuntime(Runtime::FunctionId id, int num_arguments,
+                   SaveFPRegsMode save_doubles = kDontSaveFPRegs) {
+    CallRuntime(Runtime::FunctionForId(id), num_arguments, save_doubles);
+  }
+
+  // Convenience function: call an external reference.
+  void CallExternalReference(const ExternalReference& ext, int num_arguments);
+
+  // Tail call of a runtime routine (jump).
+  // Like JumpToExternalReference, but also takes care of passing the number
+  // of parameters.
+  void TailCallExternalReference(const ExternalReference& ext,
+                                 int num_arguments, int result_size);
+
+  // Convenience function: tail call a runtime routine (jump).
+  void TailCallRuntime(Runtime::FunctionId fid, int num_arguments,
+                       int result_size);
+
+  int CalculateStackPassedWords(int num_reg_arguments,
+                                int num_double_arguments);
+
+  // Before calling a C-function from generated code, align arguments on stack.
+  // After aligning the frame, non-register arguments must be stored in
+  // sp[0], sp[4], etc., not pushed. The argument count assumes all arguments
+  // are word sized. If double arguments are used, this function assumes that
+  // all double arguments are stored before core registers; otherwise the
+  // correct alignment of the double values is not guaranteed.
+  // Some compilers/platforms require the stack to be aligned when calling
+  // C++ code.
+  // Needs a scratch register to do some arithmetic. This register will be
+  // trashed.
+  void PrepareCallCFunction(int num_reg_arguments, int num_double_registers,
+                            Register scratch);
+  void PrepareCallCFunction(int num_reg_arguments, Register scratch);
+
+  // There are two ways of passing double arguments on ARM, depending on
+  // whether soft or hard floating point ABI is used. These functions
+  // abstract parameter passing for the three different ways we call
+  // C functions from generated code.
+  void MovToFloatParameter(DoubleRegister src);
+  void MovToFloatParameters(DoubleRegister src1, DoubleRegister src2);
+  void MovToFloatResult(DoubleRegister src);
+
+  // Calls a C function and cleans up the space for arguments allocated
+  // by PrepareCallCFunction. The called function is not allowed to trigger a
+  // garbage collection, since that might move the code and invalidate the
+  // return address (unless this is somehow accounted for by the called
+  // function).
+  void CallCFunction(ExternalReference function, int num_arguments);
+  void CallCFunction(Register function, int num_arguments);
+  void CallCFunction(ExternalReference function, int num_reg_arguments,
+                     int num_double_arguments);
+  void CallCFunction(Register function, int num_reg_arguments,
+                     int num_double_arguments);
+
+  void MovFromFloatParameter(DoubleRegister dst);
+  void MovFromFloatResult(DoubleRegister dst);
+
+  // Calls an API function.  Allocates HandleScope, extracts returned value
+  // from handle and propagates exceptions.  Restores context.  stack_space
+  // - space to be unwound on exit (includes the call JS arguments space and
+  // the additional space allocated for the fast call).
+  void CallApiFunctionAndReturn(Register function_address,
+                                ExternalReference thunk_ref, int stack_space,
+                                MemOperand return_value_operand,
+                                MemOperand* context_restore_operand);
+
+  // Jump to a runtime routine.
+  void JumpToExternalReference(const ExternalReference& builtin);
+
+  // Invoke specified builtin JavaScript function. Adds an entry to
+  // the unresolved list if the name does not resolve.
+  void InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag,
+                     const CallWrapper& call_wrapper = NullCallWrapper());
+
+  // Store the code object for the given builtin in the target register and
+  // setup the function in r1.
+  void GetBuiltinEntry(Register target, Builtins::JavaScript id);
+
+  // Store the function for the given builtin in the target register.
+  void GetBuiltinFunction(Register target, Builtins::JavaScript id);
+
+  Handle<Object> CodeObject() {
+    DCHECK(!code_object_.is_null());
+    return code_object_;
+  }
+
+
+  // Emit code for a truncating division by a constant. The dividend register is
+  // unchanged and ip gets clobbered. Dividend and result must be different.
+  void TruncatingDiv(Register result, Register dividend, int32_t divisor);
+
+  // ---------------------------------------------------------------------------
+  // StatsCounter support
+
+  void SetCounter(StatsCounter* counter, int value, Register scratch1,
+                  Register scratch2);
+  void IncrementCounter(StatsCounter* counter, int value, Register scratch1,
+                        Register scratch2);
+  void DecrementCounter(StatsCounter* counter, int value, Register scratch1,
+                        Register scratch2);
+
+
+  // ---------------------------------------------------------------------------
+  // Debugging
+
+  // Calls Abort(msg) if the condition cond is not satisfied.
+  // Use --debug_code to enable.
+  void Assert(Condition cond, BailoutReason reason, CRegister cr = cr7);
+  void AssertFastElements(Register elements);
+
+  // Like Assert(), but always enabled.
+  void Check(Condition cond, BailoutReason reason, CRegister cr = cr7);
+
+  // Print a message to stdout and abort execution.
+  void Abort(BailoutReason reason);
+
+  // Verify restrictions about code generated in stubs.
+  void set_generating_stub(bool value) { generating_stub_ = value; }
+  bool generating_stub() { return generating_stub_; }
+  void set_has_frame(bool value) { has_frame_ = value; }
+  bool has_frame() { return has_frame_; }
+  inline bool AllowThisStubCall(CodeStub* stub);
+
+  // ---------------------------------------------------------------------------
+  // Number utilities
+
+  // Check whether the value of reg is a power of two and not zero. If not
+  // control continues at the label not_power_of_two. If reg is a power of two
+  // the register scratch contains the value of (reg - 1) when control falls
+  // through.
+  void JumpIfNotPowerOfTwoOrZero(Register reg, Register scratch,
+                                 Label* not_power_of_two_or_zero);
+  // Check whether the value of reg is a power of two and not zero.
+  // Control falls through if it is, with scratch containing the mask
+  // value (reg - 1).
+  // Otherwise control jumps to the 'zero_and_neg' label if the value of reg is
+  // zero or negative, or jumps to the 'not_power_of_two' label if the value is
+  // strictly positive but not a power of two.
+  void JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg, Register scratch,
+                                       Label* zero_and_neg,
+                                       Label* not_power_of_two);
+
+  // ---------------------------------------------------------------------------
+  // Bit testing/extraction
+  //
+  // Bit numbering is such that the least significant bit is bit 0
+  // (for consistency between 32/64-bit).
+
+  // Extract consecutive bits (defined by rangeStart - rangeEnd) from src
+  // and place them into the least significant bits of dst.
+  inline void ExtractBitRange(Register dst, Register src, int rangeStart,
+                              int rangeEnd, RCBit rc = LeaveRC) {
+    DCHECK(rangeStart >= rangeEnd && rangeStart < kBitsPerPointer);
+    int rotate = (rangeEnd == 0) ? 0 : kBitsPerPointer - rangeEnd;
+    int width = rangeStart - rangeEnd + 1;
+#if V8_TARGET_ARCH_PPC64
+    rldicl(dst, src, rotate, kBitsPerPointer - width, rc);
+#else
+    rlwinm(dst, src, rotate, kBitsPerPointer - width, kBitsPerPointer - 1, rc);
+#endif
+  }
+
+  inline void ExtractBit(Register dst, Register src, uint32_t bitNumber,
+                         RCBit rc = LeaveRC) {
+    ExtractBitRange(dst, src, bitNumber, bitNumber, rc);
+  }
+
+  // Extract consecutive bits (defined by mask) from src and place them
+  // into the least significant bits of dst.
+  inline void ExtractBitMask(Register dst, Register src, uintptr_t mask,
+                             RCBit rc = LeaveRC) {
+    int start = kBitsPerPointer - 1;
+    int end;
+    uintptr_t bit = (1L << start);
+
+    while (bit && (mask & bit) == 0) {
+      start--;
+      bit >>= 1;
+    }
+    end = start;
+    bit >>= 1;
+
+    while (bit && (mask & bit)) {
+      end--;
+      bit >>= 1;
+    }
+
+    // 1-bits in mask must be contiguous
+    DCHECK(bit == 0 || (mask & ((bit << 1) - 1)) == 0);
+
+    ExtractBitRange(dst, src, start, end, rc);
+  }
+
+  // Test single bit in value.
+  inline void TestBit(Register value, int bitNumber, Register scratch = r0) {
+    ExtractBitRange(scratch, value, bitNumber, bitNumber, SetRC);
+  }
+
+  // Test consecutive bit range in value.  Range is defined by
+  // rangeStart - rangeEnd.
+  inline void TestBitRange(Register value, int rangeStart, int rangeEnd,
+                           Register scratch = r0) {
+    ExtractBitRange(scratch, value, rangeStart, rangeEnd, SetRC);
+  }
+
+  // Test consecutive bit range in value.  Range is defined by mask.
+  inline void TestBitMask(Register value, uintptr_t mask,
+                          Register scratch = r0) {
+    ExtractBitMask(scratch, value, mask, SetRC);
+  }
+
+
+  // ---------------------------------------------------------------------------
+  // Smi utilities
+
+  // Shift left by 1
+  void SmiTag(Register reg, RCBit rc = LeaveRC) { SmiTag(reg, reg, rc); }
+  void SmiTag(Register dst, Register src, RCBit rc = LeaveRC) {
+    ShiftLeftImm(dst, src, Operand(kSmiShift), rc);
+  }
+
+#if !V8_TARGET_ARCH_PPC64
+  // Test for overflow < 0: use BranchOnOverflow() or BranchOnNoOverflow().
+  void SmiTagCheckOverflow(Register reg, Register overflow);
+  void SmiTagCheckOverflow(Register dst, Register src, Register overflow);
+
+  inline void JumpIfNotSmiCandidate(Register value, Register scratch,
+                                    Label* not_smi_label) {
+    // High bits must be identical to fit into an Smi
+    addis(scratch, value, Operand(0x40000000u >> 16));
+    cmpi(scratch, Operand::Zero());
+    blt(not_smi_label);
+  }
+#endif
+  inline void TestUnsignedSmiCandidate(Register value, Register scratch) {
+    // The test is different for unsigned int values. Since we need
+    // the value to be in the range of a positive smi, we can't
+    // handle any of the high bits being set in the value.
+    TestBitRange(value, kBitsPerPointer - 1, kBitsPerPointer - 1 - kSmiShift,
+                 scratch);
+  }
+  inline void JumpIfNotUnsignedSmiCandidate(Register value, Register scratch,
+                                            Label* not_smi_label) {
+    TestUnsignedSmiCandidate(value, scratch);
+    bne(not_smi_label, cr0);
+  }
+
+  void SmiUntag(Register reg, RCBit rc = LeaveRC) { SmiUntag(reg, reg, rc); }
+
+  void SmiUntag(Register dst, Register src, RCBit rc = LeaveRC) {
+    ShiftRightArithImm(dst, src, kSmiShift, rc);
+  }
+
+  void SmiToPtrArrayOffset(Register dst, Register src) {
+#if V8_TARGET_ARCH_PPC64
+    STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kPointerSizeLog2);
+    ShiftRightArithImm(dst, src, kSmiShift - kPointerSizeLog2);
+#else
+    STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kPointerSizeLog2);
+    ShiftLeftImm(dst, src, Operand(kPointerSizeLog2 - kSmiShift));
+#endif
+  }
+
+  void SmiToByteArrayOffset(Register dst, Register src) { SmiUntag(dst, src); }
+
+  void SmiToShortArrayOffset(Register dst, Register src) {
+#if V8_TARGET_ARCH_PPC64
+    STATIC_ASSERT(kSmiTag == 0 && kSmiShift > 1);
+    ShiftRightArithImm(dst, src, kSmiShift - 1);
+#else
+    STATIC_ASSERT(kSmiTag == 0 && kSmiShift == 1);
+    if (!dst.is(src)) {
+      mr(dst, src);
+    }
+#endif
+  }
+
+  void SmiToIntArrayOffset(Register dst, Register src) {
+#if V8_TARGET_ARCH_PPC64
+    STATIC_ASSERT(kSmiTag == 0 && kSmiShift > 2);
+    ShiftRightArithImm(dst, src, kSmiShift - 2);
+#else
+    STATIC_ASSERT(kSmiTag == 0 && kSmiShift < 2);
+    ShiftLeftImm(dst, src, Operand(2 - kSmiShift));
+#endif
+  }
+
+#define SmiToFloatArrayOffset SmiToIntArrayOffset
+
+  void SmiToDoubleArrayOffset(Register dst, Register src) {
+#if V8_TARGET_ARCH_PPC64
+    STATIC_ASSERT(kSmiTag == 0 && kSmiShift > kDoubleSizeLog2);
+    ShiftRightArithImm(dst, src, kSmiShift - kDoubleSizeLog2);
+#else
+    STATIC_ASSERT(kSmiTag == 0 && kSmiShift < kDoubleSizeLog2);
+    ShiftLeftImm(dst, src, Operand(kDoubleSizeLog2 - kSmiShift));
+#endif
+  }
+
+  void SmiToArrayOffset(Register dst, Register src, int elementSizeLog2) {
+    if (kSmiShift < elementSizeLog2) {
+      ShiftLeftImm(dst, src, Operand(elementSizeLog2 - kSmiShift));
+    } else if (kSmiShift > elementSizeLog2) {
+      ShiftRightArithImm(dst, src, kSmiShift - elementSizeLog2);
+    } else if (!dst.is(src)) {
+      mr(dst, src);
+    }
+  }
+
+  void IndexToArrayOffset(Register dst, Register src, int elementSizeLog2,
+                          bool isSmi) {
+    if (isSmi) {
+      SmiToArrayOffset(dst, src, elementSizeLog2);
+    } else {
+      ShiftLeftImm(dst, src, Operand(elementSizeLog2));
+    }
+  }
+
+  // Untag the source value into destination and jump if source is a smi.
+  // Souce and destination can be the same register.
+  void UntagAndJumpIfSmi(Register dst, Register src, Label* smi_case);
+
+  // Untag the source value into destination and jump if source is not a smi.
+  // Souce and destination can be the same register.
+  void UntagAndJumpIfNotSmi(Register dst, Register src, Label* non_smi_case);
+
+  inline void TestIfSmi(Register value, Register scratch) {
+    TestBit(value, 0, scratch);  // tst(value, Operand(kSmiTagMask));
+  }
+
+  inline void TestIfPositiveSmi(Register value, Register scratch) {
+    STATIC_ASSERT((kSmiTagMask | kSmiSignMask) ==
+                  (intptr_t)(1UL << (kBitsPerPointer - 1) | 1));
+#if V8_TARGET_ARCH_PPC64
+    rldicl(scratch, value, 1, kBitsPerPointer - 2, SetRC);
+#else
+    rlwinm(scratch, value, 1, kBitsPerPointer - 2, kBitsPerPointer - 1, SetRC);
+#endif
+  }
+
+  // Jump the register contains a smi.
+  inline void JumpIfSmi(Register value, Label* smi_label) {
+    TestIfSmi(value, r0);
+    beq(smi_label, cr0);  // branch if SMI
+  }
+  // Jump if either of the registers contain a non-smi.
+  inline void JumpIfNotSmi(Register value, Label* not_smi_label) {
+    TestIfSmi(value, r0);
+    bne(not_smi_label, cr0);
+  }
+  // Jump if either of the registers contain a non-smi.
+  void JumpIfNotBothSmi(Register reg1, Register reg2, Label* on_not_both_smi);
+  // Jump if either of the registers contain a smi.
+  void JumpIfEitherSmi(Register reg1, Register reg2, Label* on_either_smi);
+
+  // Abort execution if argument is a smi, enabled via --debug-code.
+  void AssertNotSmi(Register object);
+  void AssertSmi(Register object);
+
+
+#if V8_TARGET_ARCH_PPC64
+  inline void TestIfInt32(Register value, Register scratch1, Register scratch2,
+                          CRegister cr = cr7) {
+    // High bits must be identical to fit into an 32-bit integer
+    srawi(scratch1, value, 31);
+    sradi(scratch2, value, 32);
+    cmp(scratch1, scratch2, cr);
+  }
+#else
+  inline void TestIfInt32(Register hi_word, Register lo_word, Register scratch,
+                          CRegister cr = cr7) {
+    // High bits must be identical to fit into an 32-bit integer
+    srawi(scratch, lo_word, 31);
+    cmp(scratch, hi_word, cr);
+  }
+#endif
+
+  // Abort execution if argument is not a string, enabled via --debug-code.
+  void AssertString(Register object);
+
+  // Abort execution if argument is not a name, enabled via --debug-code.
+  void AssertName(Register object);
+
+  // Abort execution if argument is not undefined or an AllocationSite, enabled
+  // via --debug-code.
+  void AssertUndefinedOrAllocationSite(Register object, Register scratch);
+
+  // Abort execution if reg is not the root value with the given index,
+  // enabled via --debug-code.
+  void AssertIsRoot(Register reg, Heap::RootListIndex index);
+
+  // ---------------------------------------------------------------------------
+  // HeapNumber utilities
+
+  void JumpIfNotHeapNumber(Register object, Register heap_number_map,
+                           Register scratch, Label* on_not_heap_number);
+
+  // ---------------------------------------------------------------------------
+  // String utilities
+
+  // Generate code to do a lookup in the number string cache. If the number in
+  // the register object is found in the cache the generated code falls through
+  // with the result in the result register. The object and the result register
+  // can be the same. If the number is not found in the cache the code jumps to
+  // the label not_found with only the content of register object unchanged.
+  void LookupNumberStringCache(Register object, Register result,
+                               Register scratch1, Register scratch2,
+                               Register scratch3, Label* not_found);
+
+  // Checks if both objects are sequential one-byte strings and jumps to label
+  // if either is not. Assumes that neither object is a smi.
+  void JumpIfNonSmisNotBothSequentialOneByteStrings(Register object1,
+                                                    Register object2,
+                                                    Register scratch1,
+                                                    Register scratch2,
+                                                    Label* failure);
+
+  // Checks if both objects are sequential one-byte strings and jumps to label
+  // if either is not.
+  void JumpIfNotBothSequentialOneByteStrings(Register first, Register second,
+                                             Register scratch1,
+                                             Register scratch2,
+                                             Label* not_flat_one_byte_strings);
+
+  // Checks if both instance types are sequential one-byte strings and jumps to
+  // label if either is not.
+  void JumpIfBothInstanceTypesAreNotSequentialOneByte(
+      Register first_object_instance_type, Register second_object_instance_type,
+      Register scratch1, Register scratch2, Label* failure);
+
+  // Check if instance type is sequential one-byte string and jump to label if
+  // it is not.
+  void JumpIfInstanceTypeIsNotSequentialOneByte(Register type, Register scratch,
+                                                Label* failure);
+
+  void JumpIfNotUniqueNameInstanceType(Register reg, Label* not_unique_name);
+
+  void EmitSeqStringSetCharCheck(Register string, Register index,
+                                 Register value, uint32_t encoding_mask);
+
+  // ---------------------------------------------------------------------------
+  // Patching helpers.
+
+  // Retrieve/patch the relocated value (lis/ori pair or constant pool load).
+  void GetRelocatedValue(Register location, Register result, Register scratch);
+  void SetRelocatedValue(Register location, Register scratch,
+                         Register new_value);
+
+  void ClampUint8(Register output_reg, Register input_reg);
+
+  // Saturate a value into 8-bit unsigned integer
+  //   if input_value < 0, output_value is 0
+  //   if input_value > 255, output_value is 255
+  //   otherwise output_value is the (int)input_value (round to nearest)
+  void ClampDoubleToUint8(Register result_reg, DoubleRegister input_reg,
+                          DoubleRegister temp_double_reg);
+
+
+  void LoadInstanceDescriptors(Register map, Register descriptors);
+  void EnumLength(Register dst, Register map);
+  void NumberOfOwnDescriptors(Register dst, Register map);
+
+  template <typename Field>
+  void DecodeField(Register dst, Register src) {
+    ExtractBitRange(dst, src, Field::kShift + Field::kSize - 1, Field::kShift);
+  }
+
+  template <typename Field>
+  void DecodeField(Register reg) {
+    DecodeField<Field>(reg, reg);
+  }
+
+  template <typename Field>
+  void DecodeFieldToSmi(Register dst, Register src) {
+#if V8_TARGET_ARCH_PPC64
+    DecodeField<Field>(dst, src);
+    SmiTag(dst);
+#else
+    // 32-bit can do this in one instruction:
+    int start = Field::kSize + kSmiShift - 1;
+    int end = kSmiShift;
+    int rotate = kSmiShift - Field::kShift;
+    if (rotate < 0) {
+      rotate += kBitsPerPointer;
+    }
+    rlwinm(dst, src, rotate, kBitsPerPointer - start - 1,
+           kBitsPerPointer - end - 1);
+#endif
+  }
+
+  template <typename Field>
+  void DecodeFieldToSmi(Register reg) {
+    DecodeFieldToSmi<Field>(reg, reg);
+  }
+
+  // Activation support.
+  void EnterFrame(StackFrame::Type type,
+                  bool load_constant_pool_pointer_reg = false);
+  // Returns the pc offset at which the frame ends.
+  int LeaveFrame(StackFrame::Type type, int stack_adjustment = 0);
+
+  // Expects object in r0 and returns map with validated enum cache
+  // in r0.  Assumes that any other register can be used as a scratch.
+  void CheckEnumCache(Register null_value, Label* call_runtime);
+
+  // AllocationMemento support. Arrays may have an associated
+  // AllocationMemento object that can be checked for in order to pretransition
+  // to another type.
+  // On entry, receiver_reg should point to the array object.
+  // scratch_reg gets clobbered.
+  // If allocation info is present, condition flags are set to eq.
+  void TestJSArrayForAllocationMemento(Register receiver_reg,
+                                       Register scratch_reg,
+                                       Label* no_memento_found);
+
+  void JumpIfJSArrayHasAllocationMemento(Register receiver_reg,
+                                         Register scratch_reg,
+                                         Label* memento_found) {
+    Label no_memento_found;
+    TestJSArrayForAllocationMemento(receiver_reg, scratch_reg,
+                                    &no_memento_found);
+    beq(memento_found);
+    bind(&no_memento_found);
+  }
+
+  // Jumps to found label if a prototype map has dictionary elements.
+  void JumpIfDictionaryInPrototypeChain(Register object, Register scratch0,
+                                        Register scratch1, Label* found);
+
+ private:
+  static const int kSmiShift = kSmiTagSize + kSmiShiftSize;
+
+  void CallCFunctionHelper(Register function, int num_reg_arguments,
+                           int num_double_arguments);
+
+  void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al,
+            CRegister cr = cr7);
+
+  // Helper functions for generating invokes.
+  void InvokePrologue(const ParameterCount& expected,
+                      const ParameterCount& actual, Handle<Code> code_constant,
+                      Register code_reg, Label* done,
+                      bool* definitely_mismatches, InvokeFlag flag,
+                      const CallWrapper& call_wrapper);
+
+  void InitializeNewString(Register string, Register length,
+                           Heap::RootListIndex map_index, Register scratch1,
+                           Register scratch2);
+
+  // Helper for implementing JumpIfNotInNewSpace and JumpIfInNewSpace.
+  void InNewSpace(Register object, Register scratch,
+                  Condition cond,  // eq for new space, ne otherwise.
+                  Label* branch);
+
+  // Helper for finding the mark bits for an address.  Afterwards, the
+  // bitmap register points at the word with the mark bits and the mask
+  // the position of the first bit.  Leaves addr_reg unchanged.
+  inline void GetMarkBits(Register addr_reg, Register bitmap_reg,
+                          Register mask_reg);
+
+  // Helper for throwing exceptions.  Compute a handler address and jump to
+  // it.  See the implementation for register usage.
+  void JumpToHandlerEntry();
+
+  // Compute memory operands for safepoint stack slots.
+  static int SafepointRegisterStackIndex(int reg_code);
+  MemOperand SafepointRegisterSlot(Register reg);
+  MemOperand SafepointRegistersAndDoublesSlot(Register reg);
+
+#if V8_OOL_CONSTANT_POOL
+  // Loads the constant pool pointer (kConstantPoolRegister).
+  enum CodeObjectAccessMethod { CAN_USE_IP, CONSTRUCT_INTERNAL_REFERENCE };
+  void LoadConstantPoolPointerRegister(CodeObjectAccessMethod access_method,
+                                       int ip_code_entry_delta = 0);
+#endif
+
+  bool generating_stub_;
+  bool has_frame_;
+  // This handle will be patched with the code object on installation.
+  Handle<Object> code_object_;
+
+  // Needs access to SafepointRegisterStackIndex for compiled frame
+  // traversal.
+  friend class StandardFrame;
+};
+
+
+// The code patcher is used to patch (typically) small parts of code e.g. for
+// debugging and other types of instrumentation. When using the code patcher
+// the exact number of bytes specified must be emitted. It is not legal to emit
+// relocation information. If any of these constraints are violated it causes
+// an assertion to fail.
+class CodePatcher {
+ public:
+  enum FlushICache { FLUSH, DONT_FLUSH };
+
+  CodePatcher(byte* address, int instructions, FlushICache flush_cache = FLUSH);
+  virtual ~CodePatcher();
+
+  // Macro assembler to emit code.
+  MacroAssembler* masm() { return &masm_; }
+
+  // Emit an instruction directly.
+  void Emit(Instr instr);
+
+  // Emit the condition part of an instruction leaving the rest of the current
+  // instruction unchanged.
+  void EmitCondition(Condition cond);
+
+ private:
+  byte* address_;            // The address of the code being patched.
+  int size_;                 // Number of bytes of the expected patch size.
+  MacroAssembler masm_;      // Macro assembler used to generate the code.
+  FlushICache flush_cache_;  // Whether to flush the I cache after patching.
+};
+
+
+// -----------------------------------------------------------------------------
+// Static helper functions.
+
+inline MemOperand ContextOperand(Register context, int index) {
+  return MemOperand(context, Context::SlotOffset(index));
+}
+
+
+inline MemOperand GlobalObjectOperand() {
+  return ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX);
+}
+
+
+#ifdef GENERATED_CODE_COVERAGE
+#define CODE_COVERAGE_STRINGIFY(x) #x
+#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
+#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
+#define ACCESS_MASM(masm)    \
+  masm->stop(__FILE_LINE__); \
+  masm->
+#else
+#define ACCESS_MASM(masm) masm->
+#endif
+}
+}  // namespace v8::internal
+
+#endif  // V8_PPC_MACRO_ASSEMBLER_PPC_H_
diff --git a/deps/v8/src/ppc/regexp-macro-assembler-ppc.cc b/deps/v8/src/ppc/regexp-macro-assembler-ppc.cc
new file mode 100644 (file)
index 0000000..54acce1
--- /dev/null
@@ -0,0 +1,1337 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_PPC
+
+#include "src/base/bits.h"
+#include "src/code-stubs.h"
+#include "src/cpu-profiler.h"
+#include "src/log.h"
+#include "src/macro-assembler.h"
+#include "src/regexp-macro-assembler.h"
+#include "src/regexp-stack.h"
+#include "src/unicode.h"
+
+#include "src/ppc/regexp-macro-assembler-ppc.h"
+
+namespace v8 {
+namespace internal {
+
+#ifndef V8_INTERPRETED_REGEXP
+/*
+ * This assembler uses the following register assignment convention
+ * - r25: Temporarily stores the index of capture start after a matching pass
+ *        for a global regexp.
+ * - r26: Pointer to current code object (Code*) including heap object tag.
+ * - r27: Current position in input, as negative offset from end of string.
+ *        Please notice that this is the byte offset, not the character offset!
+ * - r28: Currently loaded character. Must be loaded using
+ *        LoadCurrentCharacter before using any of the dispatch methods.
+ * - r29: Points to tip of backtrack stack
+ * - r30: End of input (points to byte after last character in input).
+ * - r31: Frame pointer. Used to access arguments, local variables and
+ *         RegExp registers.
+ * - r12: IP register, used by assembler. Very volatile.
+ * - r1/sp : Points to tip of C stack.
+ *
+ * The remaining registers are free for computations.
+ * Each call to a public method should retain this convention.
+ *
+ * The stack will have the following structure:
+ *  - fp[44]  Isolate* isolate   (address of the current isolate)
+ *  - fp[40]  secondary link/return address used by native call.
+ *  - fp[36]  lr save area (currently unused)
+ *  - fp[32]  backchain    (currently unused)
+ *  --- sp when called ---
+ *  - fp[28]  return address     (lr).
+ *  - fp[24]  old frame pointer  (r31).
+ *  - fp[0..20]  backup of registers r25..r30
+ *  --- frame pointer ----
+ *  - fp[-4]  direct_call        (if 1, direct call from JavaScript code,
+ *                                if 0, call through the runtime system).
+ *  - fp[-8]  stack_area_base    (high end of the memory area to use as
+ *                                backtracking stack).
+ *  - fp[-12] capture array size (may fit multiple sets of matches)
+ *  - fp[-16] int* capture_array (int[num_saved_registers_], for output).
+ *  - fp[-20] end of input       (address of end of string).
+ *  - fp[-24] start of input     (address of first character in string).
+ *  - fp[-28] start index        (character index of start).
+ *  - fp[-32] void* input_string (location of a handle containing the string).
+ *  - fp[-36] success counter    (only for global regexps to count matches).
+ *  - fp[-40] Offset of location before start of input (effectively character
+ *            position -1). Used to initialize capture registers to a
+ *            non-position.
+ *  - fp[-44] At start (if 1, we are starting at the start of the
+ *    string, otherwise 0)
+ *  - fp[-48] register 0         (Only positions must be stored in the first
+ *  -         register 1          num_saved_registers_ registers)
+ *  -         ...
+ *  -         register num_registers-1
+ *  --- sp ---
+ *
+ * The first num_saved_registers_ registers are initialized to point to
+ * "character -1" in the string (i.e., char_size() bytes before the first
+ * character of the string). The remaining registers start out as garbage.
+ *
+ * The data up to the return address must be placed there by the calling
+ * code and the remaining arguments are passed in registers, e.g. by calling the
+ * code entry as cast to a function with the signature:
+ * int (*match)(String* input_string,
+ *              int start_index,
+ *              Address start,
+ *              Address end,
+ *              int* capture_output_array,
+ *              byte* stack_area_base,
+ *              Address secondary_return_address,  // Only used by native call.
+ *              bool direct_call = false)
+ * The call is performed by NativeRegExpMacroAssembler::Execute()
+ * (in regexp-macro-assembler.cc) via the CALL_GENERATED_REGEXP_CODE macro
+ * in ppc/simulator-ppc.h.
+ * When calling as a non-direct call (i.e., from C++ code), the return address
+ * area is overwritten with the LR register by the RegExp code. When doing a
+ * direct call from generated code, the return address is placed there by
+ * the calling code, as in a normal exit frame.
+ */
+
+#define __ ACCESS_MASM(masm_)
+
+RegExpMacroAssemblerPPC::RegExpMacroAssemblerPPC(Mode mode,
+                                                 int registers_to_save,
+                                                 Zone* zone)
+    : NativeRegExpMacroAssembler(zone),
+      masm_(new MacroAssembler(zone->isolate(), NULL, kRegExpCodeSize)),
+      mode_(mode),
+      num_registers_(registers_to_save),
+      num_saved_registers_(registers_to_save),
+      entry_label_(),
+      start_label_(),
+      success_label_(),
+      backtrack_label_(),
+      exit_label_(),
+      internal_failure_label_() {
+  DCHECK_EQ(0, registers_to_save % 2);
+
+// Called from C
+#if ABI_USES_FUNCTION_DESCRIPTORS
+  __ function_descriptor();
+#endif
+
+  __ b(&entry_label_);  // We'll write the entry code later.
+  // If the code gets too big or corrupted, an internal exception will be
+  // raised, and we will exit right away.
+  __ bind(&internal_failure_label_);
+  __ li(r3, Operand(FAILURE));
+  __ Ret();
+  __ bind(&start_label_);  // And then continue from here.
+}
+
+
+RegExpMacroAssemblerPPC::~RegExpMacroAssemblerPPC() {
+  delete masm_;
+  // Unuse labels in case we throw away the assembler without calling GetCode.
+  entry_label_.Unuse();
+  start_label_.Unuse();
+  success_label_.Unuse();
+  backtrack_label_.Unuse();
+  exit_label_.Unuse();
+  check_preempt_label_.Unuse();
+  stack_overflow_label_.Unuse();
+  internal_failure_label_.Unuse();
+}
+
+
+int RegExpMacroAssemblerPPC::stack_limit_slack() {
+  return RegExpStack::kStackLimitSlack;
+}
+
+
+void RegExpMacroAssemblerPPC::AdvanceCurrentPosition(int by) {
+  if (by != 0) {
+    __ addi(current_input_offset(), current_input_offset(),
+            Operand(by * char_size()));
+  }
+}
+
+
+void RegExpMacroAssemblerPPC::AdvanceRegister(int reg, int by) {
+  DCHECK(reg >= 0);
+  DCHECK(reg < num_registers_);
+  if (by != 0) {
+    __ LoadP(r3, register_location(reg), r0);
+    __ mov(r0, Operand(by));
+    __ add(r3, r3, r0);
+    __ StoreP(r3, register_location(reg), r0);
+  }
+}
+
+
+void RegExpMacroAssemblerPPC::Backtrack() {
+  CheckPreemption();
+  // Pop Code* offset from backtrack stack, add Code* and jump to location.
+  Pop(r3);
+  __ add(r3, r3, code_pointer());
+  __ mtctr(r3);
+  __ bctr();
+}
+
+
+void RegExpMacroAssemblerPPC::Bind(Label* label) { __ bind(label); }
+
+
+void RegExpMacroAssemblerPPC::CheckCharacter(uint32_t c, Label* on_equal) {
+  __ Cmpli(current_character(), Operand(c), r0);
+  BranchOrBacktrack(eq, on_equal);
+}
+
+
+void RegExpMacroAssemblerPPC::CheckCharacterGT(uc16 limit, Label* on_greater) {
+  __ Cmpli(current_character(), Operand(limit), r0);
+  BranchOrBacktrack(gt, on_greater);
+}
+
+
+void RegExpMacroAssemblerPPC::CheckAtStart(Label* on_at_start) {
+  Label not_at_start;
+  // Did we start the match at the start of the string at all?
+  __ LoadP(r3, MemOperand(frame_pointer(), kStartIndex));
+  __ cmpi(r3, Operand::Zero());
+  BranchOrBacktrack(ne, &not_at_start);
+
+  // If we did, are we still at the start of the input?
+  __ LoadP(r4, MemOperand(frame_pointer(), kInputStart));
+  __ mr(r0, current_input_offset());
+  __ add(r3, end_of_input_address(), r0);
+  __ cmp(r4, r3);
+  BranchOrBacktrack(eq, on_at_start);
+  __ bind(&not_at_start);
+}
+
+
+void RegExpMacroAssemblerPPC::CheckNotAtStart(Label* on_not_at_start) {
+  // Did we start the match at the start of the string at all?
+  __ LoadP(r3, MemOperand(frame_pointer(), kStartIndex));
+  __ cmpi(r3, Operand::Zero());
+  BranchOrBacktrack(ne, on_not_at_start);
+  // If we did, are we still at the start of the input?
+  __ LoadP(r4, MemOperand(frame_pointer(), kInputStart));
+  __ add(r3, end_of_input_address(), current_input_offset());
+  __ cmp(r3, r4);
+  BranchOrBacktrack(ne, on_not_at_start);
+}
+
+
+void RegExpMacroAssemblerPPC::CheckCharacterLT(uc16 limit, Label* on_less) {
+  __ Cmpli(current_character(), Operand(limit), r0);
+  BranchOrBacktrack(lt, on_less);
+}
+
+
+void RegExpMacroAssemblerPPC::CheckGreedyLoop(Label* on_equal) {
+  Label backtrack_non_equal;
+  __ LoadP(r3, MemOperand(backtrack_stackpointer(), 0));
+  __ cmp(current_input_offset(), r3);
+  __ bne(&backtrack_non_equal);
+  __ addi(backtrack_stackpointer(), backtrack_stackpointer(),
+          Operand(kPointerSize));
+
+  __ bind(&backtrack_non_equal);
+  BranchOrBacktrack(eq, on_equal);
+}
+
+
+void RegExpMacroAssemblerPPC::CheckNotBackReferenceIgnoreCase(
+    int start_reg, Label* on_no_match) {
+  Label fallthrough;
+  __ LoadP(r3, register_location(start_reg), r0);  // Index of start of capture
+  __ LoadP(r4, register_location(start_reg + 1), r0);  // Index of end
+  __ sub(r4, r4, r3, LeaveOE, SetRC);                  // Length of capture.
+
+  // If length is zero, either the capture is empty or it is not participating.
+  // In either case succeed immediately.
+  __ beq(&fallthrough, cr0);
+
+  // Check that there are enough characters left in the input.
+  __ add(r0, r4, current_input_offset(), LeaveOE, SetRC);
+  //  __ cmn(r1, Operand(current_input_offset()));
+  BranchOrBacktrack(gt, on_no_match, cr0);
+
+  if (mode_ == LATIN1) {
+    Label success;
+    Label fail;
+    Label loop_check;
+
+    // r3 - offset of start of capture
+    // r4 - length of capture
+    __ add(r3, r3, end_of_input_address());
+    __ add(r5, end_of_input_address(), current_input_offset());
+    __ add(r4, r3, r4);
+
+    // r3 - Address of start of capture.
+    // r4 - Address of end of capture
+    // r5 - Address of current input position.
+
+    Label loop;
+    __ bind(&loop);
+    __ lbz(r6, MemOperand(r3));
+    __ addi(r3, r3, Operand(char_size()));
+    __ lbz(r25, MemOperand(r5));
+    __ addi(r5, r5, Operand(char_size()));
+    __ cmp(r25, r6);
+    __ beq(&loop_check);
+
+    // Mismatch, try case-insensitive match (converting letters to lower-case).
+    __ ori(r6, r6, Operand(0x20));  // Convert capture character to lower-case.
+    __ ori(r25, r25, Operand(0x20));  // Also convert input character.
+    __ cmp(r25, r6);
+    __ bne(&fail);
+    __ subi(r6, r6, Operand('a'));
+    __ cmpli(r6, Operand('z' - 'a'));  // Is r6 a lowercase letter?
+    __ ble(&loop_check);               // In range 'a'-'z'.
+    // Latin-1: Check for values in range [224,254] but not 247.
+    __ subi(r6, r6, Operand(224 - 'a'));
+    __ cmpli(r6, Operand(254 - 224));
+    __ bgt(&fail);                    // Weren't Latin-1 letters.
+    __ cmpi(r6, Operand(247 - 224));  // Check for 247.
+    __ beq(&fail);
+
+    __ bind(&loop_check);
+    __ cmp(r3, r4);
+    __ blt(&loop);
+    __ b(&success);
+
+    __ bind(&fail);
+    BranchOrBacktrack(al, on_no_match);
+
+    __ bind(&success);
+    // Compute new value of character position after the matched part.
+    __ sub(current_input_offset(), r5, end_of_input_address());
+  } else {
+    DCHECK(mode_ == UC16);
+    int argument_count = 4;
+    __ PrepareCallCFunction(argument_count, r5);
+
+    // r3 - offset of start of capture
+    // r4 - length of capture
+
+    // Put arguments into arguments registers.
+    // Parameters are
+    //   r3: Address byte_offset1 - Address captured substring's start.
+    //   r4: Address byte_offset2 - Address of current character position.
+    //   r5: size_t byte_length - length of capture in bytes(!)
+    //   r6: Isolate* isolate
+
+    // Address of start of capture.
+    __ add(r3, r3, end_of_input_address());
+    // Length of capture.
+    __ mr(r5, r4);
+    // Save length in callee-save register for use on return.
+    __ mr(r25, r4);
+    // Address of current input position.
+    __ add(r4, current_input_offset(), end_of_input_address());
+    // Isolate.
+    __ mov(r6, Operand(ExternalReference::isolate_address(isolate())));
+
+    {
+      AllowExternalCallThatCantCauseGC scope(masm_);
+      ExternalReference function =
+          ExternalReference::re_case_insensitive_compare_uc16(isolate());
+      __ CallCFunction(function, argument_count);
+    }
+
+    // Check if function returned non-zero for success or zero for failure.
+    __ cmpi(r3, Operand::Zero());
+    BranchOrBacktrack(eq, on_no_match);
+    // On success, increment position by length of capture.
+    __ add(current_input_offset(), current_input_offset(), r25);
+  }
+
+  __ bind(&fallthrough);
+}
+
+
+void RegExpMacroAssemblerPPC::CheckNotBackReference(int start_reg,
+                                                    Label* on_no_match) {
+  Label fallthrough;
+  Label success;
+
+  // Find length of back-referenced capture.
+  __ LoadP(r3, register_location(start_reg), r0);
+  __ LoadP(r4, register_location(start_reg + 1), r0);
+  __ sub(r4, r4, r3, LeaveOE, SetRC);  // Length to check.
+  // Succeed on empty capture (including no capture).
+  __ beq(&fallthrough, cr0);
+
+  // Check that there are enough characters left in the input.
+  __ add(r0, r4, current_input_offset(), LeaveOE, SetRC);
+  BranchOrBacktrack(gt, on_no_match, cr0);
+
+  // Compute pointers to match string and capture string
+  __ add(r3, r3, end_of_input_address());
+  __ add(r5, end_of_input_address(), current_input_offset());
+  __ add(r4, r4, r3);
+
+  Label loop;
+  __ bind(&loop);
+  if (mode_ == LATIN1) {
+    __ lbz(r6, MemOperand(r3));
+    __ addi(r3, r3, Operand(char_size()));
+    __ lbz(r25, MemOperand(r5));
+    __ addi(r5, r5, Operand(char_size()));
+  } else {
+    DCHECK(mode_ == UC16);
+    __ lhz(r6, MemOperand(r3));
+    __ addi(r3, r3, Operand(char_size()));
+    __ lhz(r25, MemOperand(r5));
+    __ addi(r5, r5, Operand(char_size()));
+  }
+  __ cmp(r6, r25);
+  BranchOrBacktrack(ne, on_no_match);
+  __ cmp(r3, r4);
+  __ blt(&loop);
+
+  // Move current character position to position after match.
+  __ sub(current_input_offset(), r5, end_of_input_address());
+  __ bind(&fallthrough);
+}
+
+
+void RegExpMacroAssemblerPPC::CheckNotCharacter(unsigned c,
+                                                Label* on_not_equal) {
+  __ Cmpli(current_character(), Operand(c), r0);
+  BranchOrBacktrack(ne, on_not_equal);
+}
+
+
+void RegExpMacroAssemblerPPC::CheckCharacterAfterAnd(uint32_t c, uint32_t mask,
+                                                     Label* on_equal) {
+  __ mov(r0, Operand(mask));
+  if (c == 0) {
+    __ and_(r3, current_character(), r0, SetRC);
+  } else {
+    __ and_(r3, current_character(), r0);
+    __ Cmpli(r3, Operand(c), r0, cr0);
+  }
+  BranchOrBacktrack(eq, on_equal, cr0);
+}
+
+
+void RegExpMacroAssemblerPPC::CheckNotCharacterAfterAnd(unsigned c,
+                                                        unsigned mask,
+                                                        Label* on_not_equal) {
+  __ mov(r0, Operand(mask));
+  if (c == 0) {
+    __ and_(r3, current_character(), r0, SetRC);
+  } else {
+    __ and_(r3, current_character(), r0);
+    __ Cmpli(r3, Operand(c), r0, cr0);
+  }
+  BranchOrBacktrack(ne, on_not_equal, cr0);
+}
+
+
+void RegExpMacroAssemblerPPC::CheckNotCharacterAfterMinusAnd(
+    uc16 c, uc16 minus, uc16 mask, Label* on_not_equal) {
+  DCHECK(minus < String::kMaxUtf16CodeUnit);
+  __ subi(r3, current_character(), Operand(minus));
+  __ mov(r0, Operand(mask));
+  __ and_(r3, r3, r0);
+  __ Cmpli(r3, Operand(c), r0);
+  BranchOrBacktrack(ne, on_not_equal);
+}
+
+
+void RegExpMacroAssemblerPPC::CheckCharacterInRange(uc16 from, uc16 to,
+                                                    Label* on_in_range) {
+  __ mov(r0, Operand(from));
+  __ sub(r3, current_character(), r0);
+  __ Cmpli(r3, Operand(to - from), r0);
+  BranchOrBacktrack(le, on_in_range);  // Unsigned lower-or-same condition.
+}
+
+
+void RegExpMacroAssemblerPPC::CheckCharacterNotInRange(uc16 from, uc16 to,
+                                                       Label* on_not_in_range) {
+  __ mov(r0, Operand(from));
+  __ sub(r3, current_character(), r0);
+  __ Cmpli(r3, Operand(to - from), r0);
+  BranchOrBacktrack(gt, on_not_in_range);  // Unsigned higher condition.
+}
+
+
+void RegExpMacroAssemblerPPC::CheckBitInTable(Handle<ByteArray> table,
+                                              Label* on_bit_set) {
+  __ mov(r3, Operand(table));
+  if (mode_ != LATIN1 || kTableMask != String::kMaxOneByteCharCode) {
+    __ andi(r4, current_character(), Operand(kTableSize - 1));
+    __ addi(r4, r4, Operand(ByteArray::kHeaderSize - kHeapObjectTag));
+  } else {
+    __ addi(r4, current_character(),
+            Operand(ByteArray::kHeaderSize - kHeapObjectTag));
+  }
+  __ lbzx(r3, MemOperand(r3, r4));
+  __ cmpi(r3, Operand::Zero());
+  BranchOrBacktrack(ne, on_bit_set);
+}
+
+
+bool RegExpMacroAssemblerPPC::CheckSpecialCharacterClass(uc16 type,
+                                                         Label* on_no_match) {
+  // Range checks (c in min..max) are generally implemented by an unsigned
+  // (c - min) <= (max - min) check
+  switch (type) {
+    case 's':
+      // Match space-characters
+      if (mode_ == LATIN1) {
+        // One byte space characters are '\t'..'\r', ' ' and \u00a0.
+        Label success;
+        __ cmpi(current_character(), Operand(' '));
+        __ beq(&success);
+        // Check range 0x09..0x0d
+        __ subi(r3, current_character(), Operand('\t'));
+        __ cmpli(r3, Operand('\r' - '\t'));
+        __ ble(&success);
+        // \u00a0 (NBSP).
+        __ cmpi(r3, Operand(0x00a0 - '\t'));
+        BranchOrBacktrack(ne, on_no_match);
+        __ bind(&success);
+        return true;
+      }
+      return false;
+    case 'S':
+      // The emitted code for generic character classes is good enough.
+      return false;
+    case 'd':
+      // Match ASCII digits ('0'..'9')
+      __ subi(r3, current_character(), Operand('0'));
+      __ cmpli(r3, Operand('9' - '0'));
+      BranchOrBacktrack(gt, on_no_match);
+      return true;
+    case 'D':
+      // Match non ASCII-digits
+      __ subi(r3, current_character(), Operand('0'));
+      __ cmpli(r3, Operand('9' - '0'));
+      BranchOrBacktrack(le, on_no_match);
+      return true;
+    case '.': {
+      // Match non-newlines (not 0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
+      __ xori(r3, current_character(), Operand(0x01));
+      // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
+      __ subi(r3, r3, Operand(0x0b));
+      __ cmpli(r3, Operand(0x0c - 0x0b));
+      BranchOrBacktrack(le, on_no_match);
+      if (mode_ == UC16) {
+        // Compare original value to 0x2028 and 0x2029, using the already
+        // computed (current_char ^ 0x01 - 0x0b). I.e., check for
+        // 0x201d (0x2028 - 0x0b) or 0x201e.
+        __ subi(r3, r3, Operand(0x2028 - 0x0b));
+        __ cmpli(r3, Operand(1));
+        BranchOrBacktrack(le, on_no_match);
+      }
+      return true;
+    }
+    case 'n': {
+      // Match newlines (0x0a('\n'), 0x0d('\r'), 0x2028 and 0x2029)
+      __ xori(r3, current_character(), Operand(0x01));
+      // See if current character is '\n'^1 or '\r'^1, i.e., 0x0b or 0x0c
+      __ subi(r3, r3, Operand(0x0b));
+      __ cmpli(r3, Operand(0x0c - 0x0b));
+      if (mode_ == LATIN1) {
+        BranchOrBacktrack(gt, on_no_match);
+      } else {
+        Label done;
+        __ ble(&done);
+        // Compare original value to 0x2028 and 0x2029, using the already
+        // computed (current_char ^ 0x01 - 0x0b). I.e., check for
+        // 0x201d (0x2028 - 0x0b) or 0x201e.
+        __ subi(r3, r3, Operand(0x2028 - 0x0b));
+        __ cmpli(r3, Operand(1));
+        BranchOrBacktrack(gt, on_no_match);
+        __ bind(&done);
+      }
+      return true;
+    }
+    case 'w': {
+      if (mode_ != LATIN1) {
+        // Table is 256 entries, so all Latin1 characters can be tested.
+        __ cmpi(current_character(), Operand('z'));
+        BranchOrBacktrack(gt, on_no_match);
+      }
+      ExternalReference map = ExternalReference::re_word_character_map();
+      __ mov(r3, Operand(map));
+      __ lbzx(r3, MemOperand(r3, current_character()));
+      __ cmpli(r3, Operand::Zero());
+      BranchOrBacktrack(eq, on_no_match);
+      return true;
+    }
+    case 'W': {
+      Label done;
+      if (mode_ != LATIN1) {
+        // Table is 256 entries, so all Latin1 characters can be tested.
+        __ cmpli(current_character(), Operand('z'));
+        __ bgt(&done);
+      }
+      ExternalReference map = ExternalReference::re_word_character_map();
+      __ mov(r3, Operand(map));
+      __ lbzx(r3, MemOperand(r3, current_character()));
+      __ cmpli(r3, Operand::Zero());
+      BranchOrBacktrack(ne, on_no_match);
+      if (mode_ != LATIN1) {
+        __ bind(&done);
+      }
+      return true;
+    }
+    case '*':
+      // Match any character.
+      return true;
+    // No custom implementation (yet): s(UC16), S(UC16).
+    default:
+      return false;
+  }
+}
+
+
+void RegExpMacroAssemblerPPC::Fail() {
+  __ li(r3, Operand(FAILURE));
+  __ b(&exit_label_);
+}
+
+
+Handle<HeapObject> RegExpMacroAssemblerPPC::GetCode(Handle<String> source) {
+  Label return_r3;
+
+  if (masm_->has_exception()) {
+    // If the code gets corrupted due to long regular expressions and lack of
+    // space on trampolines, an internal exception flag is set. If this case
+    // is detected, we will jump into exit sequence right away.
+    __ bind_to(&entry_label_, internal_failure_label_.pos());
+  } else {
+    // Finalize code - write the entry point code now we know how many
+    // registers we need.
+
+    // Entry code:
+    __ bind(&entry_label_);
+
+    // Tell the system that we have a stack frame.  Because the type
+    // is MANUAL, no is generated.
+    FrameScope scope(masm_, StackFrame::MANUAL);
+
+    // Ensure register assigments are consistent with callee save mask
+    DCHECK(r25.bit() & kRegExpCalleeSaved);
+    DCHECK(code_pointer().bit() & kRegExpCalleeSaved);
+    DCHECK(current_input_offset().bit() & kRegExpCalleeSaved);
+    DCHECK(current_character().bit() & kRegExpCalleeSaved);
+    DCHECK(backtrack_stackpointer().bit() & kRegExpCalleeSaved);
+    DCHECK(end_of_input_address().bit() & kRegExpCalleeSaved);
+    DCHECK(frame_pointer().bit() & kRegExpCalleeSaved);
+
+    // Actually emit code to start a new stack frame.
+    // Push arguments
+    // Save callee-save registers.
+    // Start new stack frame.
+    // Store link register in existing stack-cell.
+    // Order here should correspond to order of offset constants in header file.
+    RegList registers_to_retain = kRegExpCalleeSaved;
+    RegList argument_registers = r3.bit() | r4.bit() | r5.bit() | r6.bit() |
+                                 r7.bit() | r8.bit() | r9.bit() | r10.bit();
+    __ mflr(r0);
+    __ push(r0);
+    __ MultiPush(argument_registers | registers_to_retain);
+    // Set frame pointer in space for it if this is not a direct call
+    // from generated code.
+    __ addi(frame_pointer(), sp, Operand(8 * kPointerSize));
+    __ li(r3, Operand::Zero());
+    __ push(r3);  // Make room for success counter and initialize it to 0.
+    __ push(r3);  // Make room for "position - 1" constant (value is irrelevant)
+    // Check if we have space on the stack for registers.
+    Label stack_limit_hit;
+    Label stack_ok;
+
+    ExternalReference stack_limit =
+        ExternalReference::address_of_stack_limit(isolate());
+    __ mov(r3, Operand(stack_limit));
+    __ LoadP(r3, MemOperand(r3));
+    __ sub(r3, sp, r3, LeaveOE, SetRC);
+    // Handle it if the stack pointer is already below the stack limit.
+    __ ble(&stack_limit_hit, cr0);
+    // Check if there is room for the variable number of registers above
+    // the stack limit.
+    __ Cmpli(r3, Operand(num_registers_ * kPointerSize), r0);
+    __ bge(&stack_ok);
+    // Exit with OutOfMemory exception. There is not enough space on the stack
+    // for our working registers.
+    __ li(r3, Operand(EXCEPTION));
+    __ b(&return_r3);
+
+    __ bind(&stack_limit_hit);
+    CallCheckStackGuardState(r3);
+    __ cmpi(r3, Operand::Zero());
+    // If returned value is non-zero, we exit with the returned value as result.
+    __ bne(&return_r3);
+
+    __ bind(&stack_ok);
+
+    // Allocate space on stack for registers.
+    __ Add(sp, sp, -num_registers_ * kPointerSize, r0);
+    // Load string end.
+    __ LoadP(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+    // Load input start.
+    __ LoadP(r3, MemOperand(frame_pointer(), kInputStart));
+    // Find negative length (offset of start relative to end).
+    __ sub(current_input_offset(), r3, end_of_input_address());
+    // Set r3 to address of char before start of the input string
+    // (effectively string position -1).
+    __ LoadP(r4, MemOperand(frame_pointer(), kStartIndex));
+    __ subi(r3, current_input_offset(), Operand(char_size()));
+    if (mode_ == UC16) {
+      __ ShiftLeftImm(r0, r4, Operand(1));
+      __ sub(r3, r3, r0);
+    } else {
+      __ sub(r3, r3, r4);
+    }
+    // Store this value in a local variable, for use when clearing
+    // position registers.
+    __ StoreP(r3, MemOperand(frame_pointer(), kInputStartMinusOne));
+
+    // Initialize code pointer register
+    __ mov(code_pointer(), Operand(masm_->CodeObject()));
+
+    Label load_char_start_regexp, start_regexp;
+    // Load newline if index is at start, previous character otherwise.
+    __ cmpi(r4, Operand::Zero());
+    __ bne(&load_char_start_regexp);
+    __ li(current_character(), Operand('\n'));
+    __ b(&start_regexp);
+
+    // Global regexp restarts matching here.
+    __ bind(&load_char_start_regexp);
+    // Load previous char as initial value of current character register.
+    LoadCurrentCharacterUnchecked(-1, 1);
+    __ bind(&start_regexp);
+
+    // Initialize on-stack registers.
+    if (num_saved_registers_ > 0) {  // Always is, if generated from a regexp.
+      // Fill saved registers with initial value = start offset - 1
+      if (num_saved_registers_ > 8) {
+        // One slot beyond address of register 0.
+        __ addi(r4, frame_pointer(), Operand(kRegisterZero + kPointerSize));
+        __ li(r5, Operand(num_saved_registers_));
+        __ mtctr(r5);
+        Label init_loop;
+        __ bind(&init_loop);
+        __ StorePU(r3, MemOperand(r4, -kPointerSize));
+        __ bdnz(&init_loop);
+      } else {
+        for (int i = 0; i < num_saved_registers_; i++) {
+          __ StoreP(r3, register_location(i), r0);
+        }
+      }
+    }
+
+    // Initialize backtrack stack pointer.
+    __ LoadP(backtrack_stackpointer(),
+             MemOperand(frame_pointer(), kStackHighEnd));
+
+    __ b(&start_label_);
+
+    // Exit code:
+    if (success_label_.is_linked()) {
+      // Save captures when successful.
+      __ bind(&success_label_);
+      if (num_saved_registers_ > 0) {
+        // copy captures to output
+        __ LoadP(r4, MemOperand(frame_pointer(), kInputStart));
+        __ LoadP(r3, MemOperand(frame_pointer(), kRegisterOutput));
+        __ LoadP(r5, MemOperand(frame_pointer(), kStartIndex));
+        __ sub(r4, end_of_input_address(), r4);
+        // r4 is length of input in bytes.
+        if (mode_ == UC16) {
+          __ ShiftRightImm(r4, r4, Operand(1));
+        }
+        // r4 is length of input in characters.
+        __ add(r4, r4, r5);
+        // r4 is length of string in characters.
+
+        DCHECK_EQ(0, num_saved_registers_ % 2);
+        // Always an even number of capture registers. This allows us to
+        // unroll the loop once to add an operation between a load of a register
+        // and the following use of that register.
+        for (int i = 0; i < num_saved_registers_; i += 2) {
+          __ LoadP(r5, register_location(i), r0);
+          __ LoadP(r6, register_location(i + 1), r0);
+          if (i == 0 && global_with_zero_length_check()) {
+            // Keep capture start in r25 for the zero-length check later.
+            __ mr(r25, r5);
+          }
+          if (mode_ == UC16) {
+            __ ShiftRightArithImm(r5, r5, 1);
+            __ add(r5, r4, r5);
+            __ ShiftRightArithImm(r6, r6, 1);
+            __ add(r6, r4, r6);
+          } else {
+            __ add(r5, r4, r5);
+            __ add(r6, r4, r6);
+          }
+          __ stw(r5, MemOperand(r3));
+          __ addi(r3, r3, Operand(kIntSize));
+          __ stw(r6, MemOperand(r3));
+          __ addi(r3, r3, Operand(kIntSize));
+        }
+      }
+
+      if (global()) {
+        // Restart matching if the regular expression is flagged as global.
+        __ LoadP(r3, MemOperand(frame_pointer(), kSuccessfulCaptures));
+        __ LoadP(r4, MemOperand(frame_pointer(), kNumOutputRegisters));
+        __ LoadP(r5, MemOperand(frame_pointer(), kRegisterOutput));
+        // Increment success counter.
+        __ addi(r3, r3, Operand(1));
+        __ StoreP(r3, MemOperand(frame_pointer(), kSuccessfulCaptures));
+        // Capture results have been stored, so the number of remaining global
+        // output registers is reduced by the number of stored captures.
+        __ subi(r4, r4, Operand(num_saved_registers_));
+        // Check whether we have enough room for another set of capture results.
+        __ cmpi(r4, Operand(num_saved_registers_));
+        __ blt(&return_r3);
+
+        __ StoreP(r4, MemOperand(frame_pointer(), kNumOutputRegisters));
+        // Advance the location for output.
+        __ addi(r5, r5, Operand(num_saved_registers_ * kIntSize));
+        __ StoreP(r5, MemOperand(frame_pointer(), kRegisterOutput));
+
+        // Prepare r3 to initialize registers with its value in the next run.
+        __ LoadP(r3, MemOperand(frame_pointer(), kInputStartMinusOne));
+
+        if (global_with_zero_length_check()) {
+          // Special case for zero-length matches.
+          // r25: capture start index
+          __ cmp(current_input_offset(), r25);
+          // Not a zero-length match, restart.
+          __ bne(&load_char_start_regexp);
+          // Offset from the end is zero if we already reached the end.
+          __ cmpi(current_input_offset(), Operand::Zero());
+          __ beq(&exit_label_);
+          // Advance current position after a zero-length match.
+          __ addi(current_input_offset(), current_input_offset(),
+                  Operand((mode_ == UC16) ? 2 : 1));
+        }
+
+        __ b(&load_char_start_regexp);
+      } else {
+        __ li(r3, Operand(SUCCESS));
+      }
+    }
+
+    // Exit and return r3
+    __ bind(&exit_label_);
+    if (global()) {
+      __ LoadP(r3, MemOperand(frame_pointer(), kSuccessfulCaptures));
+    }
+
+    __ bind(&return_r3);
+    // Skip sp past regexp registers and local variables..
+    __ mr(sp, frame_pointer());
+    // Restore registers r25..r31 and return (restoring lr to pc).
+    __ MultiPop(registers_to_retain);
+    __ pop(r0);
+    __ mtctr(r0);
+    __ bctr();
+
+    // Backtrack code (branch target for conditional backtracks).
+    if (backtrack_label_.is_linked()) {
+      __ bind(&backtrack_label_);
+      Backtrack();
+    }
+
+    Label exit_with_exception;
+
+    // Preempt-code
+    if (check_preempt_label_.is_linked()) {
+      SafeCallTarget(&check_preempt_label_);
+
+      CallCheckStackGuardState(r3);
+      __ cmpi(r3, Operand::Zero());
+      // If returning non-zero, we should end execution with the given
+      // result as return value.
+      __ bne(&return_r3);
+
+      // String might have moved: Reload end of string from frame.
+      __ LoadP(end_of_input_address(), MemOperand(frame_pointer(), kInputEnd));
+      SafeReturn();
+    }
+
+    // Backtrack stack overflow code.
+    if (stack_overflow_label_.is_linked()) {
+      SafeCallTarget(&stack_overflow_label_);
+      // Reached if the backtrack-stack limit has been hit.
+      Label grow_failed;
+
+      // Call GrowStack(backtrack_stackpointer(), &stack_base)
+      static const int num_arguments = 3;
+      __ PrepareCallCFunction(num_arguments, r3);
+      __ mr(r3, backtrack_stackpointer());
+      __ addi(r4, frame_pointer(), Operand(kStackHighEnd));
+      __ mov(r5, Operand(ExternalReference::isolate_address(isolate())));
+      ExternalReference grow_stack =
+          ExternalReference::re_grow_stack(isolate());
+      __ CallCFunction(grow_stack, num_arguments);
+      // If return NULL, we have failed to grow the stack, and
+      // must exit with a stack-overflow exception.
+      __ cmpi(r3, Operand::Zero());
+      __ beq(&exit_with_exception);
+      // Otherwise use return value as new stack pointer.
+      __ mr(backtrack_stackpointer(), r3);
+      // Restore saved registers and continue.
+      SafeReturn();
+    }
+
+    if (exit_with_exception.is_linked()) {
+      // If any of the code above needed to exit with an exception.
+      __ bind(&exit_with_exception);
+      // Exit with Result EXCEPTION(-1) to signal thrown exception.
+      __ li(r3, Operand(EXCEPTION));
+      __ b(&return_r3);
+    }
+  }
+
+  CodeDesc code_desc;
+  masm_->GetCode(&code_desc);
+  Handle<Code> code = isolate()->factory()->NewCode(
+      code_desc, Code::ComputeFlags(Code::REGEXP), masm_->CodeObject());
+  PROFILE(masm_->isolate(), RegExpCodeCreateEvent(*code, *source));
+  return Handle<HeapObject>::cast(code);
+}
+
+
+void RegExpMacroAssemblerPPC::GoTo(Label* to) { BranchOrBacktrack(al, to); }
+
+
+void RegExpMacroAssemblerPPC::IfRegisterGE(int reg, int comparand,
+                                           Label* if_ge) {
+  __ LoadP(r3, register_location(reg), r0);
+  __ Cmpi(r3, Operand(comparand), r0);
+  BranchOrBacktrack(ge, if_ge);
+}
+
+
+void RegExpMacroAssemblerPPC::IfRegisterLT(int reg, int comparand,
+                                           Label* if_lt) {
+  __ LoadP(r3, register_location(reg), r0);
+  __ Cmpi(r3, Operand(comparand), r0);
+  BranchOrBacktrack(lt, if_lt);
+}
+
+
+void RegExpMacroAssemblerPPC::IfRegisterEqPos(int reg, Label* if_eq) {
+  __ LoadP(r3, register_location(reg), r0);
+  __ cmp(r3, current_input_offset());
+  BranchOrBacktrack(eq, if_eq);
+}
+
+
+RegExpMacroAssembler::IrregexpImplementation
+RegExpMacroAssemblerPPC::Implementation() {
+  return kPPCImplementation;
+}
+
+
+void RegExpMacroAssemblerPPC::LoadCurrentCharacter(int cp_offset,
+                                                   Label* on_end_of_input,
+                                                   bool check_bounds,
+                                                   int characters) {
+  DCHECK(cp_offset >= -1);        // ^ and \b can look behind one character.
+  DCHECK(cp_offset < (1 << 30));  // Be sane! (And ensure negation works)
+  if (check_bounds) {
+    CheckPosition(cp_offset + characters - 1, on_end_of_input);
+  }
+  LoadCurrentCharacterUnchecked(cp_offset, characters);
+}
+
+
+void RegExpMacroAssemblerPPC::PopCurrentPosition() {
+  Pop(current_input_offset());
+}
+
+
+void RegExpMacroAssemblerPPC::PopRegister(int register_index) {
+  Pop(r3);
+  __ StoreP(r3, register_location(register_index), r0);
+}
+
+
+void RegExpMacroAssemblerPPC::PushBacktrack(Label* label) {
+  __ mov_label_offset(r3, label);
+  Push(r3);
+  CheckStackLimit();
+}
+
+
+void RegExpMacroAssemblerPPC::PushCurrentPosition() {
+  Push(current_input_offset());
+}
+
+
+void RegExpMacroAssemblerPPC::PushRegister(int register_index,
+                                           StackCheckFlag check_stack_limit) {
+  __ LoadP(r3, register_location(register_index), r0);
+  Push(r3);
+  if (check_stack_limit) CheckStackLimit();
+}
+
+
+void RegExpMacroAssemblerPPC::ReadCurrentPositionFromRegister(int reg) {
+  __ LoadP(current_input_offset(), register_location(reg), r0);
+}
+
+
+void RegExpMacroAssemblerPPC::ReadStackPointerFromRegister(int reg) {
+  __ LoadP(backtrack_stackpointer(), register_location(reg), r0);
+  __ LoadP(r3, MemOperand(frame_pointer(), kStackHighEnd));
+  __ add(backtrack_stackpointer(), backtrack_stackpointer(), r3);
+}
+
+
+void RegExpMacroAssemblerPPC::SetCurrentPositionFromEnd(int by) {
+  Label after_position;
+  __ Cmpi(current_input_offset(), Operand(-by * char_size()), r0);
+  __ bge(&after_position);
+  __ mov(current_input_offset(), Operand(-by * char_size()));
+  // On RegExp code entry (where this operation is used), the character before
+  // the current position is expected to be already loaded.
+  // We have advanced the position, so it's safe to read backwards.
+  LoadCurrentCharacterUnchecked(-1, 1);
+  __ bind(&after_position);
+}
+
+
+void RegExpMacroAssemblerPPC::SetRegister(int register_index, int to) {
+  DCHECK(register_index >= num_saved_registers_);  // Reserved for positions!
+  __ mov(r3, Operand(to));
+  __ StoreP(r3, register_location(register_index), r0);
+}
+
+
+bool RegExpMacroAssemblerPPC::Succeed() {
+  __ b(&success_label_);
+  return global();
+}
+
+
+void RegExpMacroAssemblerPPC::WriteCurrentPositionToRegister(int reg,
+                                                             int cp_offset) {
+  if (cp_offset == 0) {
+    __ StoreP(current_input_offset(), register_location(reg), r0);
+  } else {
+    __ mov(r0, Operand(cp_offset * char_size()));
+    __ add(r3, current_input_offset(), r0);
+    __ StoreP(r3, register_location(reg), r0);
+  }
+}
+
+
+void RegExpMacroAssemblerPPC::ClearRegisters(int reg_from, int reg_to) {
+  DCHECK(reg_from <= reg_to);
+  __ LoadP(r3, MemOperand(frame_pointer(), kInputStartMinusOne));
+  for (int reg = reg_from; reg <= reg_to; reg++) {
+    __ StoreP(r3, register_location(reg), r0);
+  }
+}
+
+
+void RegExpMacroAssemblerPPC::WriteStackPointerToRegister(int reg) {
+  __ LoadP(r4, MemOperand(frame_pointer(), kStackHighEnd));
+  __ sub(r3, backtrack_stackpointer(), r4);
+  __ StoreP(r3, register_location(reg), r0);
+}
+
+
+// Private methods:
+
+void RegExpMacroAssemblerPPC::CallCheckStackGuardState(Register scratch) {
+  int frame_alignment = masm_->ActivationFrameAlignment();
+  int stack_space = kNumRequiredStackFrameSlots;
+  int stack_passed_arguments = 1;  // space for return address pointer
+
+  // The following stack manipulation logic is similar to
+  // PrepareCallCFunction.  However, we need an extra slot on the
+  // stack to house the return address parameter.
+  if (frame_alignment > kPointerSize) {
+    // Make stack end at alignment and make room for stack arguments
+    // -- preserving original value of sp.
+    __ mr(scratch, sp);
+    __ addi(sp, sp, Operand(-(stack_passed_arguments + 1) * kPointerSize));
+    DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
+    __ ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment)));
+    __ StoreP(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
+  } else {
+    // Make room for stack arguments
+    stack_space += stack_passed_arguments;
+  }
+
+  // Allocate frame with required slots to make ABI work.
+  __ li(r0, Operand::Zero());
+  __ StorePU(r0, MemOperand(sp, -stack_space * kPointerSize));
+
+  // RegExp code frame pointer.
+  __ mr(r5, frame_pointer());
+  // Code* of self.
+  __ mov(r4, Operand(masm_->CodeObject()));
+  // r3 will point to the return address, placed by DirectCEntry.
+  __ addi(r3, sp, Operand(kStackFrameExtraParamSlot * kPointerSize));
+
+  ExternalReference stack_guard_check =
+      ExternalReference::re_check_stack_guard_state(isolate());
+  __ mov(ip, Operand(stack_guard_check));
+  DirectCEntryStub stub(isolate());
+  stub.GenerateCall(masm_, ip);
+
+  // Restore the stack pointer
+  stack_space = kNumRequiredStackFrameSlots + stack_passed_arguments;
+  if (frame_alignment > kPointerSize) {
+    __ LoadP(sp, MemOperand(sp, stack_space * kPointerSize));
+  } else {
+    __ addi(sp, sp, Operand(stack_space * kPointerSize));
+  }
+
+  __ mov(code_pointer(), Operand(masm_->CodeObject()));
+}
+
+
+// Helper function for reading a value out of a stack frame.
+template <typename T>
+static T& frame_entry(Address re_frame, int frame_offset) {
+  return reinterpret_cast<T&>(Memory::int32_at(re_frame + frame_offset));
+}
+
+
+int RegExpMacroAssemblerPPC::CheckStackGuardState(Address* return_address,
+                                                  Code* re_code,
+                                                  Address re_frame) {
+  Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate);
+  StackLimitCheck check(isolate);
+  if (check.JsHasOverflowed()) {
+    isolate->StackOverflow();
+    return EXCEPTION;
+  }
+
+  // If not real stack overflow the stack guard was used to interrupt
+  // execution for another purpose.
+
+  // If this is a direct call from JavaScript retry the RegExp forcing the call
+  // through the runtime system. Currently the direct call cannot handle a GC.
+  if (frame_entry<int>(re_frame, kDirectCall) == 1) {
+    return RETRY;
+  }
+
+  // Prepare for possible GC.
+  HandleScope handles(isolate);
+  Handle<Code> code_handle(re_code);
+
+  Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
+
+  // Current string.
+  bool is_one_byte = subject->IsOneByteRepresentationUnderneath();
+
+  DCHECK(re_code->instruction_start() <= *return_address);
+  DCHECK(*return_address <=
+         re_code->instruction_start() + re_code->instruction_size());
+
+  Object* result = isolate->stack_guard()->HandleInterrupts();
+
+  if (*code_handle != re_code) {  // Return address no longer valid
+    intptr_t delta = code_handle->address() - re_code->address();
+    // Overwrite the return address on the stack.
+    *return_address += delta;
+  }
+
+  if (result->IsException()) {
+    return EXCEPTION;
+  }
+
+  Handle<String> subject_tmp = subject;
+  int slice_offset = 0;
+
+  // Extract the underlying string and the slice offset.
+  if (StringShape(*subject_tmp).IsCons()) {
+    subject_tmp = Handle<String>(ConsString::cast(*subject_tmp)->first());
+  } else if (StringShape(*subject_tmp).IsSliced()) {
+    SlicedString* slice = SlicedString::cast(*subject_tmp);
+    subject_tmp = Handle<String>(slice->parent());
+    slice_offset = slice->offset();
+  }
+
+  // String might have changed.
+  if (subject_tmp->IsOneByteRepresentation() != is_one_byte) {
+    // If we changed between an Latin1 and an UC16 string, the specialized
+    // code cannot be used, and we need to restart regexp matching from
+    // scratch (including, potentially, compiling a new version of the code).
+    return RETRY;
+  }
+
+  // Otherwise, the content of the string might have moved. It must still
+  // be a sequential or external string with the same content.
+  // Update the start and end pointers in the stack frame to the current
+  // location (whether it has actually moved or not).
+  DCHECK(StringShape(*subject_tmp).IsSequential() ||
+         StringShape(*subject_tmp).IsExternal());
+
+  // The original start address of the characters to match.
+  const byte* start_address = frame_entry<const byte*>(re_frame, kInputStart);
+
+  // Find the current start address of the same character at the current string
+  // position.
+  int start_index = frame_entry<intptr_t>(re_frame, kStartIndex);
+  const byte* new_address =
+      StringCharacterPosition(*subject_tmp, start_index + slice_offset);
+
+  if (start_address != new_address) {
+    // If there is a difference, update the object pointer and start and end
+    // addresses in the RegExp stack frame to match the new value.
+    const byte* end_address = frame_entry<const byte*>(re_frame, kInputEnd);
+    int byte_length = static_cast<int>(end_address - start_address);
+    frame_entry<const String*>(re_frame, kInputString) = *subject;
+    frame_entry<const byte*>(re_frame, kInputStart) = new_address;
+    frame_entry<const byte*>(re_frame, kInputEnd) = new_address + byte_length;
+  } else if (frame_entry<const String*>(re_frame, kInputString) != *subject) {
+    // Subject string might have been a ConsString that underwent
+    // short-circuiting during GC. That will not change start_address but
+    // will change pointer inside the subject handle.
+    frame_entry<const String*>(re_frame, kInputString) = *subject;
+  }
+
+  return 0;
+}
+
+
+MemOperand RegExpMacroAssemblerPPC::register_location(int register_index) {
+  DCHECK(register_index < (1 << 30));
+  if (num_registers_ <= register_index) {
+    num_registers_ = register_index + 1;
+  }
+  return MemOperand(frame_pointer(),
+                    kRegisterZero - register_index * kPointerSize);
+}
+
+
+void RegExpMacroAssemblerPPC::CheckPosition(int cp_offset,
+                                            Label* on_outside_input) {
+  __ Cmpi(current_input_offset(), Operand(-cp_offset * char_size()), r0);
+  BranchOrBacktrack(ge, on_outside_input);
+}
+
+
+void RegExpMacroAssemblerPPC::BranchOrBacktrack(Condition condition, Label* to,
+                                                CRegister cr) {
+  if (condition == al) {  // Unconditional.
+    if (to == NULL) {
+      Backtrack();
+      return;
+    }
+    __ b(to);
+    return;
+  }
+  if (to == NULL) {
+    __ b(condition, &backtrack_label_, cr);
+    return;
+  }
+  __ b(condition, to, cr);
+}
+
+
+void RegExpMacroAssemblerPPC::SafeCall(Label* to, Condition cond,
+                                       CRegister cr) {
+  __ b(cond, to, cr, SetLK);
+}
+
+
+void RegExpMacroAssemblerPPC::SafeReturn() {
+  __ pop(r0);
+  __ mov(ip, Operand(masm_->CodeObject()));
+  __ add(r0, r0, ip);
+  __ mtlr(r0);
+  __ blr();
+}
+
+
+void RegExpMacroAssemblerPPC::SafeCallTarget(Label* name) {
+  __ bind(name);
+  __ mflr(r0);
+  __ mov(ip, Operand(masm_->CodeObject()));
+  __ sub(r0, r0, ip);
+  __ push(r0);
+}
+
+
+void RegExpMacroAssemblerPPC::Push(Register source) {
+  DCHECK(!source.is(backtrack_stackpointer()));
+  __ StorePU(source, MemOperand(backtrack_stackpointer(), -kPointerSize));
+}
+
+
+void RegExpMacroAssemblerPPC::Pop(Register target) {
+  DCHECK(!target.is(backtrack_stackpointer()));
+  __ LoadP(target, MemOperand(backtrack_stackpointer()));
+  __ addi(backtrack_stackpointer(), backtrack_stackpointer(),
+          Operand(kPointerSize));
+}
+
+
+void RegExpMacroAssemblerPPC::CheckPreemption() {
+  // Check for preemption.
+  ExternalReference stack_limit =
+      ExternalReference::address_of_stack_limit(isolate());
+  __ mov(r3, Operand(stack_limit));
+  __ LoadP(r3, MemOperand(r3));
+  __ cmpl(sp, r3);
+  SafeCall(&check_preempt_label_, le);
+}
+
+
+void RegExpMacroAssemblerPPC::CheckStackLimit() {
+  ExternalReference stack_limit =
+      ExternalReference::address_of_regexp_stack_limit(isolate());
+  __ mov(r3, Operand(stack_limit));
+  __ LoadP(r3, MemOperand(r3));
+  __ cmpl(backtrack_stackpointer(), r3);
+  SafeCall(&stack_overflow_label_, le);
+}
+
+
+bool RegExpMacroAssemblerPPC::CanReadUnaligned() {
+  return CpuFeatures::IsSupported(UNALIGNED_ACCESSES) && !slow_safe();
+}
+
+
+void RegExpMacroAssemblerPPC::LoadCurrentCharacterUnchecked(int cp_offset,
+                                                            int characters) {
+  Register offset = current_input_offset();
+  if (cp_offset != 0) {
+    // r25 is not being used to store the capture start index at this point.
+    __ addi(r25, current_input_offset(), Operand(cp_offset * char_size()));
+    offset = r25;
+  }
+  // The lwz, stw, lhz, sth instructions can do unaligned accesses, if the CPU
+  // and the operating system running on the target allow it.
+  // We assume we don't want to do unaligned loads on PPC, so this function
+  // must only be used to load a single character at a time.
+
+  DCHECK(characters == 1);
+  __ add(current_character(), end_of_input_address(), offset);
+  if (mode_ == LATIN1) {
+    __ lbz(current_character(), MemOperand(current_character()));
+  } else {
+    DCHECK(mode_ == UC16);
+    __ lhz(current_character(), MemOperand(current_character()));
+  }
+}
+
+
+#undef __
+
+#endif  // V8_INTERPRETED_REGEXP
+}
+}  // namespace v8::internal
+
+#endif  // V8_TARGET_ARCH_PPC
diff --git a/deps/v8/src/ppc/regexp-macro-assembler-ppc.h b/deps/v8/src/ppc/regexp-macro-assembler-ppc.h
new file mode 100644 (file)
index 0000000..1f9c3a0
--- /dev/null
@@ -0,0 +1,212 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_PPC_REGEXP_MACRO_ASSEMBLER_PPC_H_
+#define V8_PPC_REGEXP_MACRO_ASSEMBLER_PPC_H_
+
+#include "src/macro-assembler.h"
+#include "src/ppc/assembler-ppc.h"
+#include "src/ppc/assembler-ppc-inl.h"
+
+namespace v8 {
+namespace internal {
+
+
+#ifndef V8_INTERPRETED_REGEXP
+class RegExpMacroAssemblerPPC : public NativeRegExpMacroAssembler {
+ public:
+  RegExpMacroAssemblerPPC(Mode mode, int registers_to_save, Zone* zone);
+  virtual ~RegExpMacroAssemblerPPC();
+  virtual int stack_limit_slack();
+  virtual void AdvanceCurrentPosition(int by);
+  virtual void AdvanceRegister(int reg, int by);
+  virtual void Backtrack();
+  virtual void Bind(Label* label);
+  virtual void CheckAtStart(Label* on_at_start);
+  virtual void CheckCharacter(unsigned c, Label* on_equal);
+  virtual void CheckCharacterAfterAnd(unsigned c, unsigned mask,
+                                      Label* on_equal);
+  virtual void CheckCharacterGT(uc16 limit, Label* on_greater);
+  virtual void CheckCharacterLT(uc16 limit, Label* on_less);
+  // A "greedy loop" is a loop that is both greedy and with a simple
+  // body. It has a particularly simple implementation.
+  virtual void CheckGreedyLoop(Label* on_tos_equals_current_position);
+  virtual void CheckNotAtStart(Label* on_not_at_start);
+  virtual void CheckNotBackReference(int start_reg, Label* on_no_match);
+  virtual void CheckNotBackReferenceIgnoreCase(int start_reg,
+                                               Label* on_no_match);
+  virtual void CheckNotCharacter(unsigned c, Label* on_not_equal);
+  virtual void CheckNotCharacterAfterAnd(unsigned c, unsigned mask,
+                                         Label* on_not_equal);
+  virtual void CheckNotCharacterAfterMinusAnd(uc16 c, uc16 minus, uc16 mask,
+                                              Label* on_not_equal);
+  virtual void CheckCharacterInRange(uc16 from, uc16 to, Label* on_in_range);
+  virtual void CheckCharacterNotInRange(uc16 from, uc16 to,
+                                        Label* on_not_in_range);
+  virtual void CheckBitInTable(Handle<ByteArray> table, Label* on_bit_set);
+
+  // Checks whether the given offset from the current position is before
+  // the end of the string.
+  virtual void CheckPosition(int cp_offset, Label* on_outside_input);
+  virtual bool CheckSpecialCharacterClass(uc16 type, Label* on_no_match);
+  virtual void Fail();
+  virtual Handle<HeapObject> GetCode(Handle<String> source);
+  virtual void GoTo(Label* label);
+  virtual void IfRegisterGE(int reg, int comparand, Label* if_ge);
+  virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
+  virtual void IfRegisterEqPos(int reg, Label* if_eq);
+  virtual IrregexpImplementation Implementation();
+  virtual void LoadCurrentCharacter(int cp_offset, Label* on_end_of_input,
+                                    bool check_bounds = true,
+                                    int characters = 1);
+  virtual void PopCurrentPosition();
+  virtual void PopRegister(int register_index);
+  virtual void PushBacktrack(Label* label);
+  virtual void PushCurrentPosition();
+  virtual void PushRegister(int register_index,
+                            StackCheckFlag check_stack_limit);
+  virtual void ReadCurrentPositionFromRegister(int reg);
+  virtual void ReadStackPointerFromRegister(int reg);
+  virtual void SetCurrentPositionFromEnd(int by);
+  virtual void SetRegister(int register_index, int to);
+  virtual bool Succeed();
+  virtual void WriteCurrentPositionToRegister(int reg, int cp_offset);
+  virtual void ClearRegisters(int reg_from, int reg_to);
+  virtual void WriteStackPointerToRegister(int reg);
+  virtual bool CanReadUnaligned();
+
+  // Called from RegExp if the stack-guard is triggered.
+  // If the code object is relocated, the return address is fixed before
+  // returning.
+  static int CheckStackGuardState(Address* return_address, Code* re_code,
+                                  Address re_frame);
+
+ private:
+  // Offsets from frame_pointer() of function parameters and stored registers.
+  static const int kFramePointer = 0;
+
+  // Above the frame pointer - Stored registers and stack passed parameters.
+  // Register 25..31.
+  static const int kStoredRegisters = kFramePointer;
+  // Return address (stored from link register, read into pc on return).
+  static const int kReturnAddress = kStoredRegisters + 7 * kPointerSize;
+  static const int kCallerFrame = kReturnAddress + kPointerSize;
+  // Stack parameters placed by caller.
+  static const int kSecondaryReturnAddress =
+      kCallerFrame + kStackFrameExtraParamSlot * kPointerSize;
+  static const int kIsolate = kSecondaryReturnAddress + kPointerSize;
+
+  // Below the frame pointer.
+  // Register parameters stored by setup code.
+  static const int kDirectCall = kFramePointer - kPointerSize;
+  static const int kStackHighEnd = kDirectCall - kPointerSize;
+  static const int kNumOutputRegisters = kStackHighEnd - kPointerSize;
+  static const int kRegisterOutput = kNumOutputRegisters - kPointerSize;
+  static const int kInputEnd = kRegisterOutput - kPointerSize;
+  static const int kInputStart = kInputEnd - kPointerSize;
+  static const int kStartIndex = kInputStart - kPointerSize;
+  static const int kInputString = kStartIndex - kPointerSize;
+  // When adding local variables remember to push space for them in
+  // the frame in GetCode.
+  static const int kSuccessfulCaptures = kInputString - kPointerSize;
+  static const int kInputStartMinusOne = kSuccessfulCaptures - kPointerSize;
+  // First register address. Following registers are below it on the stack.
+  static const int kRegisterZero = kInputStartMinusOne - kPointerSize;
+
+  // Initial size of code buffer.
+  static const size_t kRegExpCodeSize = 1024;
+
+  // Load a number of characters at the given offset from the
+  // current position, into the current-character register.
+  void LoadCurrentCharacterUnchecked(int cp_offset, int character_count);
+
+  // Check whether preemption has been requested.
+  void CheckPreemption();
+
+  // Check whether we are exceeding the stack limit on the backtrack stack.
+  void CheckStackLimit();
+
+
+  // Generate a call to CheckStackGuardState.
+  void CallCheckStackGuardState(Register scratch);
+
+  // The ebp-relative location of a regexp register.
+  MemOperand register_location(int register_index);
+
+  // Register holding the current input position as negative offset from
+  // the end of the string.
+  inline Register current_input_offset() { return r27; }
+
+  // The register containing the current character after LoadCurrentCharacter.
+  inline Register current_character() { return r28; }
+
+  // Register holding address of the end of the input string.
+  inline Register end_of_input_address() { return r30; }
+
+  // Register holding the frame address. Local variables, parameters and
+  // regexp registers are addressed relative to this.
+  inline Register frame_pointer() { return fp; }
+
+  // The register containing the backtrack stack top. Provides a meaningful
+  // name to the register.
+  inline Register backtrack_stackpointer() { return r29; }
+
+  // Register holding pointer to the current code object.
+  inline Register code_pointer() { return r26; }
+
+  // Byte size of chars in the string to match (decided by the Mode argument)
+  inline int char_size() { return static_cast<int>(mode_); }
+
+  // Equivalent to a conditional branch to the label, unless the label
+  // is NULL, in which case it is a conditional Backtrack.
+  void BranchOrBacktrack(Condition condition, Label* to, CRegister cr = cr7);
+
+  // Call and return internally in the generated code in a way that
+  // is GC-safe (i.e., doesn't leave absolute code addresses on the stack)
+  inline void SafeCall(Label* to, Condition cond = al, CRegister cr = cr7);
+  inline void SafeReturn();
+  inline void SafeCallTarget(Label* name);
+
+  // Pushes the value of a register on the backtrack stack. Decrements the
+  // stack pointer by a word size and stores the register's value there.
+  inline void Push(Register source);
+
+  // Pops a value from the backtrack stack. Reads the word at the stack pointer
+  // and increments it by a word size.
+  inline void Pop(Register target);
+
+  Isolate* isolate() const { return masm_->isolate(); }
+
+  MacroAssembler* masm_;
+
+  // Which mode to generate code for (Latin1 or UC16).
+  Mode mode_;
+
+  // One greater than maximal register index actually used.
+  int num_registers_;
+
+  // Number of registers to output at the end (the saved registers
+  // are always 0..num_saved_registers_-1)
+  int num_saved_registers_;
+
+  // Labels used internally.
+  Label entry_label_;
+  Label start_label_;
+  Label success_label_;
+  Label backtrack_label_;
+  Label exit_label_;
+  Label check_preempt_label_;
+  Label stack_overflow_label_;
+  Label internal_failure_label_;
+};
+
+// Set of non-volatile registers saved/restored by generated regexp code.
+const RegList kRegExpCalleeSaved =
+    1 << 25 | 1 << 26 | 1 << 27 | 1 << 28 | 1 << 29 | 1 << 30 | 1 << 31;
+
+#endif  // V8_INTERPRETED_REGEXP
+}
+}  // namespace v8::internal
+
+#endif  // V8_PPC_REGEXP_MACRO_ASSEMBLER_PPC_H_
diff --git a/deps/v8/src/ppc/simulator-ppc.cc b/deps/v8/src/ppc/simulator-ppc.cc
new file mode 100644 (file)
index 0000000..0d10153
--- /dev/null
@@ -0,0 +1,3803 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdarg.h>
+#include <stdlib.h>
+#include <cmath>
+
+#include "src/v8.h"
+
+#if V8_TARGET_ARCH_PPC
+
+#include "src/assembler.h"
+#include "src/codegen.h"
+#include "src/disasm.h"
+#include "src/ppc/constants-ppc.h"
+#include "src/ppc/frames-ppc.h"
+#include "src/ppc/simulator-ppc.h"
+
+#if defined(USE_SIMULATOR)
+
+// Only build the simulator if not compiling for real PPC hardware.
+namespace v8 {
+namespace internal {
+
+// This macro provides a platform independent use of sscanf. The reason for
+// SScanF not being implemented in a platform independent way through
+// ::v8::internal::OS in the same way as SNPrintF is that the
+// Windows C Run-Time Library does not provide vsscanf.
+#define SScanF sscanf  // NOLINT
+
+// The PPCDebugger class is used by the simulator while debugging simulated
+// PowerPC code.
+class PPCDebugger {
+ public:
+  explicit PPCDebugger(Simulator* sim) : sim_(sim) {}
+  ~PPCDebugger();
+
+  void Stop(Instruction* instr);
+  void Info(Instruction* instr);
+  void Debug();
+
+ private:
+  static const Instr kBreakpointInstr = (TWI | 0x1f * B21);
+  static const Instr kNopInstr = (ORI);  // ori, 0,0,0
+
+  Simulator* sim_;
+
+  intptr_t GetRegisterValue(int regnum);
+  double GetRegisterPairDoubleValue(int regnum);
+  double GetFPDoubleRegisterValue(int regnum);
+  bool GetValue(const char* desc, intptr_t* value);
+  bool GetFPDoubleValue(const char* desc, double* value);
+
+  // Set or delete a breakpoint. Returns true if successful.
+  bool SetBreakpoint(Instruction* break_pc);
+  bool DeleteBreakpoint(Instruction* break_pc);
+
+  // Undo and redo all breakpoints. This is needed to bracket disassembly and
+  // execution to skip past breakpoints when run from the debugger.
+  void UndoBreakpoints();
+  void RedoBreakpoints();
+};
+
+
+PPCDebugger::~PPCDebugger() {}
+
+
+#ifdef GENERATED_CODE_COVERAGE
+static FILE* coverage_log = NULL;
+
+
+static void InitializeCoverage() {
+  char* file_name = getenv("V8_GENERATED_CODE_COVERAGE_LOG");
+  if (file_name != NULL) {
+    coverage_log = fopen(file_name, "aw+");
+  }
+}
+
+
+void PPCDebugger::Stop(Instruction* instr) {
+  // Get the stop code.
+  uint32_t code = instr->SvcValue() & kStopCodeMask;
+  // Retrieve the encoded address, which comes just after this stop.
+  char** msg_address =
+      reinterpret_cast<char**>(sim_->get_pc() + Instruction::kInstrSize);
+  char* msg = *msg_address;
+  DCHECK(msg != NULL);
+
+  // Update this stop description.
+  if (isWatchedStop(code) && !watched_stops_[code].desc) {
+    watched_stops_[code].desc = msg;
+  }
+
+  if (strlen(msg) > 0) {
+    if (coverage_log != NULL) {
+      fprintf(coverage_log, "%s\n", msg);
+      fflush(coverage_log);
+    }
+    // Overwrite the instruction and address with nops.
+    instr->SetInstructionBits(kNopInstr);
+    reinterpret_cast<Instruction*>(msg_address)->SetInstructionBits(kNopInstr);
+  }
+  sim_->set_pc(sim_->get_pc() + Instruction::kInstrSize + kPointerSize);
+}
+
+#else  // ndef GENERATED_CODE_COVERAGE
+
+static void InitializeCoverage() {}
+
+
+void PPCDebugger::Stop(Instruction* instr) {
+  // Get the stop code.
+  // use of kStopCodeMask not right on PowerPC
+  uint32_t code = instr->SvcValue() & kStopCodeMask;
+  // Retrieve the encoded address, which comes just after this stop.
+  char* msg =
+      *reinterpret_cast<char**>(sim_->get_pc() + Instruction::kInstrSize);
+  // Update this stop description.
+  if (sim_->isWatchedStop(code) && !sim_->watched_stops_[code].desc) {
+    sim_->watched_stops_[code].desc = msg;
+  }
+  // Print the stop message and code if it is not the default code.
+  if (code != kMaxStopCode) {
+    PrintF("Simulator hit stop %u: %s\n", code, msg);
+  } else {
+    PrintF("Simulator hit %s\n", msg);
+  }
+  sim_->set_pc(sim_->get_pc() + Instruction::kInstrSize + kPointerSize);
+  Debug();
+}
+#endif
+
+
+void PPCDebugger::Info(Instruction* instr) {
+  // Retrieve the encoded address immediately following the Info breakpoint.
+  char* msg =
+      *reinterpret_cast<char**>(sim_->get_pc() + Instruction::kInstrSize);
+  PrintF("Simulator info %s\n", msg);
+  sim_->set_pc(sim_->get_pc() + Instruction::kInstrSize + kPointerSize);
+}
+
+
+intptr_t PPCDebugger::GetRegisterValue(int regnum) {
+  return sim_->get_register(regnum);
+}
+
+
+double PPCDebugger::GetRegisterPairDoubleValue(int regnum) {
+  return sim_->get_double_from_register_pair(regnum);
+}
+
+
+double PPCDebugger::GetFPDoubleRegisterValue(int regnum) {
+  return sim_->get_double_from_d_register(regnum);
+}
+
+
+bool PPCDebugger::GetValue(const char* desc, intptr_t* value) {
+  int regnum = Registers::Number(desc);
+  if (regnum != kNoRegister) {
+    *value = GetRegisterValue(regnum);
+    return true;
+  } else {
+    if (strncmp(desc, "0x", 2) == 0) {
+      return SScanF(desc + 2, "%" V8PRIxPTR,
+                    reinterpret_cast<uintptr_t*>(value)) == 1;
+    } else {
+      return SScanF(desc, "%" V8PRIuPTR, reinterpret_cast<uintptr_t*>(value)) ==
+             1;
+    }
+  }
+  return false;
+}
+
+
+bool PPCDebugger::GetFPDoubleValue(const char* desc, double* value) {
+  int regnum = FPRegisters::Number(desc);
+  if (regnum != kNoRegister) {
+    *value = sim_->get_double_from_d_register(regnum);
+    return true;
+  }
+  return false;
+}
+
+
+bool PPCDebugger::SetBreakpoint(Instruction* break_pc) {
+  // Check if a breakpoint can be set. If not return without any side-effects.
+  if (sim_->break_pc_ != NULL) {
+    return false;
+  }
+
+  // Set the breakpoint.
+  sim_->break_pc_ = break_pc;
+  sim_->break_instr_ = break_pc->InstructionBits();
+  // Not setting the breakpoint instruction in the code itself. It will be set
+  // when the debugger shell continues.
+  return true;
+}
+
+
+bool PPCDebugger::DeleteBreakpoint(Instruction* break_pc) {
+  if (sim_->break_pc_ != NULL) {
+    sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
+  }
+
+  sim_->break_pc_ = NULL;
+  sim_->break_instr_ = 0;
+  return true;
+}
+
+
+void PPCDebugger::UndoBreakpoints() {
+  if (sim_->break_pc_ != NULL) {
+    sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
+  }
+}
+
+
+void PPCDebugger::RedoBreakpoints() {
+  if (sim_->break_pc_ != NULL) {
+    sim_->break_pc_->SetInstructionBits(kBreakpointInstr);
+  }
+}
+
+
+void PPCDebugger::Debug() {
+  intptr_t last_pc = -1;
+  bool done = false;
+
+#define COMMAND_SIZE 63
+#define ARG_SIZE 255
+
+#define STR(a) #a
+#define XSTR(a) STR(a)
+
+  char cmd[COMMAND_SIZE + 1];
+  char arg1[ARG_SIZE + 1];
+  char arg2[ARG_SIZE + 1];
+  char* argv[3] = {cmd, arg1, arg2};
+
+  // make sure to have a proper terminating character if reaching the limit
+  cmd[COMMAND_SIZE] = 0;
+  arg1[ARG_SIZE] = 0;
+  arg2[ARG_SIZE] = 0;
+
+  // Undo all set breakpoints while running in the debugger shell. This will
+  // make them invisible to all commands.
+  UndoBreakpoints();
+  // Disable tracing while simulating
+  bool trace = ::v8::internal::FLAG_trace_sim;
+  ::v8::internal::FLAG_trace_sim = false;
+
+  while (!done && !sim_->has_bad_pc()) {
+    if (last_pc != sim_->get_pc()) {
+      disasm::NameConverter converter;
+      disasm::Disassembler dasm(converter);
+      // use a reasonably large buffer
+      v8::internal::EmbeddedVector<char, 256> buffer;
+      dasm.InstructionDecode(buffer, reinterpret_cast<byte*>(sim_->get_pc()));
+      PrintF("  0x%08" V8PRIxPTR "  %s\n", sim_->get_pc(), buffer.start());
+      last_pc = sim_->get_pc();
+    }
+    char* line = ReadLine("sim> ");
+    if (line == NULL) {
+      break;
+    } else {
+      char* last_input = sim_->last_debugger_input();
+      if (strcmp(line, "\n") == 0 && last_input != NULL) {
+        line = last_input;
+      } else {
+        // Ownership is transferred to sim_;
+        sim_->set_last_debugger_input(line);
+      }
+      // Use sscanf to parse the individual parts of the command line. At the
+      // moment no command expects more than two parameters.
+      int argc = SScanF(line,
+                        "%" XSTR(COMMAND_SIZE) "s "
+                        "%" XSTR(ARG_SIZE) "s "
+                        "%" XSTR(ARG_SIZE) "s",
+                        cmd, arg1, arg2);
+      if ((strcmp(cmd, "si") == 0) || (strcmp(cmd, "stepi") == 0)) {
+        intptr_t value;
+
+        // If at a breakpoint, proceed past it.
+        if ((reinterpret_cast<Instruction*>(sim_->get_pc()))
+                ->InstructionBits() == 0x7d821008) {
+          sim_->set_pc(sim_->get_pc() + Instruction::kInstrSize);
+        } else {
+          sim_->ExecuteInstruction(
+              reinterpret_cast<Instruction*>(sim_->get_pc()));
+        }
+
+        if (argc == 2 && last_pc != sim_->get_pc() && GetValue(arg1, &value)) {
+          for (int i = 1; i < value; i++) {
+            disasm::NameConverter converter;
+            disasm::Disassembler dasm(converter);
+            // use a reasonably large buffer
+            v8::internal::EmbeddedVector<char, 256> buffer;
+            dasm.InstructionDecode(buffer,
+                                   reinterpret_cast<byte*>(sim_->get_pc()));
+            PrintF("  0x%08" V8PRIxPTR "  %s\n", sim_->get_pc(),
+                   buffer.start());
+            sim_->ExecuteInstruction(
+                reinterpret_cast<Instruction*>(sim_->get_pc()));
+          }
+        }
+      } else if ((strcmp(cmd, "c") == 0) || (strcmp(cmd, "cont") == 0)) {
+        // If at a breakpoint, proceed past it.
+        if ((reinterpret_cast<Instruction*>(sim_->get_pc()))
+                ->InstructionBits() == 0x7d821008) {
+          sim_->set_pc(sim_->get_pc() + Instruction::kInstrSize);
+        } else {
+          // Execute the one instruction we broke at with breakpoints disabled.
+          sim_->ExecuteInstruction(
+              reinterpret_cast<Instruction*>(sim_->get_pc()));
+        }
+        // Leave the debugger shell.
+        done = true;
+      } else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) {
+        if (argc == 2 || (argc == 3 && strcmp(arg2, "fp") == 0)) {
+          intptr_t value;
+          double dvalue;
+          if (strcmp(arg1, "all") == 0) {
+            for (int i = 0; i < kNumRegisters; i++) {
+              value = GetRegisterValue(i);
+              PrintF("    %3s: %08" V8PRIxPTR, Registers::Name(i), value);
+              if ((argc == 3 && strcmp(arg2, "fp") == 0) && i < 8 &&
+                  (i % 2) == 0) {
+                dvalue = GetRegisterPairDoubleValue(i);
+                PrintF(" (%f)\n", dvalue);
+              } else if (i != 0 && !((i + 1) & 3)) {
+                PrintF("\n");
+              }
+            }
+            PrintF("  pc: %08" V8PRIxPTR "  lr: %08" V8PRIxPTR
+                   "  "
+                   "ctr: %08" V8PRIxPTR "  xer: %08x  cr: %08x\n",
+                   sim_->special_reg_pc_, sim_->special_reg_lr_,
+                   sim_->special_reg_ctr_, sim_->special_reg_xer_,
+                   sim_->condition_reg_);
+          } else if (strcmp(arg1, "alld") == 0) {
+            for (int i = 0; i < kNumRegisters; i++) {
+              value = GetRegisterValue(i);
+              PrintF("     %3s: %08" V8PRIxPTR " %11" V8PRIdPTR,
+                     Registers::Name(i), value, value);
+              if ((argc == 3 && strcmp(arg2, "fp") == 0) && i < 8 &&
+                  (i % 2) == 0) {
+                dvalue = GetRegisterPairDoubleValue(i);
+                PrintF(" (%f)\n", dvalue);
+              } else if (!((i + 1) % 2)) {
+                PrintF("\n");
+              }
+            }
+            PrintF("   pc: %08" V8PRIxPTR "  lr: %08" V8PRIxPTR
+                   "  "
+                   "ctr: %08" V8PRIxPTR "  xer: %08x  cr: %08x\n",
+                   sim_->special_reg_pc_, sim_->special_reg_lr_,
+                   sim_->special_reg_ctr_, sim_->special_reg_xer_,
+                   sim_->condition_reg_);
+          } else if (strcmp(arg1, "allf") == 0) {
+            for (int i = 0; i < DoubleRegister::kNumRegisters; i++) {
+              dvalue = GetFPDoubleRegisterValue(i);
+              uint64_t as_words = bit_cast<uint64_t>(dvalue);
+              PrintF("%3s: %f 0x%08x %08x\n", FPRegisters::Name(i), dvalue,
+                     static_cast<uint32_t>(as_words >> 32),
+                     static_cast<uint32_t>(as_words & 0xffffffff));
+            }
+          } else if (arg1[0] == 'r' &&
+                     (arg1[1] >= '0' && arg1[1] <= '9' &&
+                      (arg1[2] == '\0' || (arg1[2] >= '0' && arg1[2] <= '9' &&
+                                           arg1[3] == '\0')))) {
+            int regnum = strtoul(&arg1[1], 0, 10);
+            if (regnum != kNoRegister) {
+              value = GetRegisterValue(regnum);
+              PrintF("%s: 0x%08" V8PRIxPTR " %" V8PRIdPTR "\n", arg1, value,
+                     value);
+            } else {
+              PrintF("%s unrecognized\n", arg1);
+            }
+          } else {
+            if (GetValue(arg1, &value)) {
+              PrintF("%s: 0x%08" V8PRIxPTR " %" V8PRIdPTR "\n", arg1, value,
+                     value);
+            } else if (GetFPDoubleValue(arg1, &dvalue)) {
+              uint64_t as_words = bit_cast<uint64_t>(dvalue);
+              PrintF("%s: %f 0x%08x %08x\n", arg1, dvalue,
+                     static_cast<uint32_t>(as_words >> 32),
+                     static_cast<uint32_t>(as_words & 0xffffffff));
+            } else {
+              PrintF("%s unrecognized\n", arg1);
+            }
+          }
+        } else {
+          PrintF("print <register>\n");
+        }
+      } else if ((strcmp(cmd, "po") == 0) ||
+                 (strcmp(cmd, "printobject") == 0)) {
+        if (argc == 2) {
+          intptr_t value;
+          OFStream os(stdout);
+          if (GetValue(arg1, &value)) {
+            Object* obj = reinterpret_cast<Object*>(value);
+            os << arg1 << ": \n";
+#ifdef DEBUG
+            obj->Print(os);
+            os << "\n";
+#else
+            os << Brief(obj) << "\n";
+#endif
+          } else {
+            os << arg1 << " unrecognized\n";
+          }
+        } else {
+          PrintF("printobject <value>\n");
+        }
+      } else if (strcmp(cmd, "setpc") == 0) {
+        intptr_t value;
+
+        if (!GetValue(arg1, &value)) {
+          PrintF("%s unrecognized\n", arg1);
+          continue;
+        }
+        sim_->set_pc(value);
+      } else if (strcmp(cmd, "stack") == 0 || strcmp(cmd, "mem") == 0) {
+        intptr_t* cur = NULL;
+        intptr_t* end = NULL;
+        int next_arg = 1;
+
+        if (strcmp(cmd, "stack") == 0) {
+          cur = reinterpret_cast<intptr_t*>(sim_->get_register(Simulator::sp));
+        } else {  // "mem"
+          intptr_t value;
+          if (!GetValue(arg1, &value)) {
+            PrintF("%s unrecognized\n", arg1);
+            continue;
+          }
+          cur = reinterpret_cast<intptr_t*>(value);
+          next_arg++;
+        }
+
+        intptr_t words;  // likely inaccurate variable name for 64bit
+        if (argc == next_arg) {
+          words = 10;
+        } else {
+          if (!GetValue(argv[next_arg], &words)) {
+            words = 10;
+          }
+        }
+        end = cur + words;
+
+        while (cur < end) {
+          PrintF("  0x%08" V8PRIxPTR ":  0x%08" V8PRIxPTR " %10" V8PRIdPTR,
+                 reinterpret_cast<intptr_t>(cur), *cur, *cur);
+          HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
+          intptr_t value = *cur;
+          Heap* current_heap = v8::internal::Isolate::Current()->heap();
+          if (((value & 1) == 0) || current_heap->Contains(obj)) {
+            PrintF(" (");
+            if ((value & 1) == 0) {
+              PrintF("smi %d", PlatformSmiTagging::SmiToInt(obj));
+            } else {
+              obj->ShortPrint();
+            }
+            PrintF(")");
+          }
+          PrintF("\n");
+          cur++;
+        }
+      } else if (strcmp(cmd, "disasm") == 0 || strcmp(cmd, "di") == 0) {
+        disasm::NameConverter converter;
+        disasm::Disassembler dasm(converter);
+        // use a reasonably large buffer
+        v8::internal::EmbeddedVector<char, 256> buffer;
+
+        byte* prev = NULL;
+        byte* cur = NULL;
+        byte* end = NULL;
+
+        if (argc == 1) {
+          cur = reinterpret_cast<byte*>(sim_->get_pc());
+          end = cur + (10 * Instruction::kInstrSize);
+        } else if (argc == 2) {
+          int regnum = Registers::Number(arg1);
+          if (regnum != kNoRegister || strncmp(arg1, "0x", 2) == 0) {
+            // The argument is an address or a register name.
+            intptr_t value;
+            if (GetValue(arg1, &value)) {
+              cur = reinterpret_cast<byte*>(value);
+              // Disassemble 10 instructions at <arg1>.
+              end = cur + (10 * Instruction::kInstrSize);
+            }
+          } else {
+            // The argument is the number of instructions.
+            intptr_t value;
+            if (GetValue(arg1, &value)) {
+              cur = reinterpret_cast<byte*>(sim_->get_pc());
+              // Disassemble <arg1> instructions.
+              end = cur + (value * Instruction::kInstrSize);
+            }
+          }
+        } else {
+          intptr_t value1;
+          intptr_t value2;
+          if (GetValue(arg1, &value1) && GetValue(arg2, &value2)) {
+            cur = reinterpret_cast<byte*>(value1);
+            end = cur + (value2 * Instruction::kInstrSize);
+          }
+        }
+
+        while (cur < end) {
+          prev = cur;
+          cur += dasm.InstructionDecode(buffer, cur);
+          PrintF("  0x%08" V8PRIxPTR "  %s\n", reinterpret_cast<intptr_t>(prev),
+                 buffer.start());
+        }
+      } else if (strcmp(cmd, "gdb") == 0) {
+        PrintF("relinquishing control to gdb\n");
+        v8::base::OS::DebugBreak();
+        PrintF("regaining control from gdb\n");
+      } else if (strcmp(cmd, "break") == 0) {
+        if (argc == 2) {
+          intptr_t value;
+          if (GetValue(arg1, &value)) {
+            if (!SetBreakpoint(reinterpret_cast<Instruction*>(value))) {
+              PrintF("setting breakpoint failed\n");
+            }
+          } else {
+            PrintF("%s unrecognized\n", arg1);
+          }
+        } else {
+          PrintF("break <address>\n");
+        }
+      } else if (strcmp(cmd, "del") == 0) {
+        if (!DeleteBreakpoint(NULL)) {
+          PrintF("deleting breakpoint failed\n");
+        }
+      } else if (strcmp(cmd, "cr") == 0) {
+        PrintF("Condition reg: %08x\n", sim_->condition_reg_);
+      } else if (strcmp(cmd, "lr") == 0) {
+        PrintF("Link reg: %08" V8PRIxPTR "\n", sim_->special_reg_lr_);
+      } else if (strcmp(cmd, "ctr") == 0) {
+        PrintF("Ctr reg: %08" V8PRIxPTR "\n", sim_->special_reg_ctr_);
+      } else if (strcmp(cmd, "xer") == 0) {
+        PrintF("XER: %08x\n", sim_->special_reg_xer_);
+      } else if (strcmp(cmd, "fpscr") == 0) {
+        PrintF("FPSCR: %08x\n", sim_->fp_condition_reg_);
+      } else if (strcmp(cmd, "stop") == 0) {
+        intptr_t value;
+        intptr_t stop_pc =
+            sim_->get_pc() - (Instruction::kInstrSize + kPointerSize);
+        Instruction* stop_instr = reinterpret_cast<Instruction*>(stop_pc);
+        Instruction* msg_address =
+            reinterpret_cast<Instruction*>(stop_pc + Instruction::kInstrSize);
+        if ((argc == 2) && (strcmp(arg1, "unstop") == 0)) {
+          // Remove the current stop.
+          if (sim_->isStopInstruction(stop_instr)) {
+            stop_instr->SetInstructionBits(kNopInstr);
+            msg_address->SetInstructionBits(kNopInstr);
+          } else {
+            PrintF("Not at debugger stop.\n");
+          }
+        } else if (argc == 3) {
+          // Print information about all/the specified breakpoint(s).
+          if (strcmp(arg1, "info") == 0) {
+            if (strcmp(arg2, "all") == 0) {
+              PrintF("Stop information:\n");
+              for (uint32_t i = 0; i < sim_->kNumOfWatchedStops; i++) {
+                sim_->PrintStopInfo(i);
+              }
+            } else if (GetValue(arg2, &value)) {
+              sim_->PrintStopInfo(value);
+            } else {
+              PrintF("Unrecognized argument.\n");
+            }
+          } else if (strcmp(arg1, "enable") == 0) {
+            // Enable all/the specified breakpoint(s).
+            if (strcmp(arg2, "all") == 0) {
+              for (uint32_t i = 0; i < sim_->kNumOfWatchedStops; i++) {
+                sim_->EnableStop(i);
+              }
+            } else if (GetValue(arg2, &value)) {
+              sim_->EnableStop(value);
+            } else {
+              PrintF("Unrecognized argument.\n");
+            }
+          } else if (strcmp(arg1, "disable") == 0) {
+            // Disable all/the specified breakpoint(s).
+            if (strcmp(arg2, "all") == 0) {
+              for (uint32_t i = 0; i < sim_->kNumOfWatchedStops; i++) {
+                sim_->DisableStop(i);
+              }
+            } else if (GetValue(arg2, &value)) {
+              sim_->DisableStop(value);
+            } else {
+              PrintF("Unrecognized argument.\n");
+            }
+          }
+        } else {
+          PrintF("Wrong usage. Use help command for more information.\n");
+        }
+      } else if ((strcmp(cmd, "t") == 0) || strcmp(cmd, "trace") == 0) {
+        ::v8::internal::FLAG_trace_sim = !::v8::internal::FLAG_trace_sim;
+        PrintF("Trace of executed instructions is %s\n",
+               ::v8::internal::FLAG_trace_sim ? "on" : "off");
+      } else if ((strcmp(cmd, "h") == 0) || (strcmp(cmd, "help") == 0)) {
+        PrintF("cont\n");
+        PrintF("  continue execution (alias 'c')\n");
+        PrintF("stepi [num instructions]\n");
+        PrintF("  step one/num instruction(s) (alias 'si')\n");
+        PrintF("print <register>\n");
+        PrintF("  print register content (alias 'p')\n");
+        PrintF("  use register name 'all' to display all integer registers\n");
+        PrintF(
+            "  use register name 'alld' to display integer registers "
+            "with decimal values\n");
+        PrintF("  use register name 'rN' to display register number 'N'\n");
+        PrintF("  add argument 'fp' to print register pair double values\n");
+        PrintF(
+            "  use register name 'allf' to display floating-point "
+            "registers\n");
+        PrintF("printobject <register>\n");
+        PrintF("  print an object from a register (alias 'po')\n");
+        PrintF("cr\n");
+        PrintF("  print condition register\n");
+        PrintF("lr\n");
+        PrintF("  print link register\n");
+        PrintF("ctr\n");
+        PrintF("  print ctr register\n");
+        PrintF("xer\n");
+        PrintF("  print XER\n");
+        PrintF("fpscr\n");
+        PrintF("  print FPSCR\n");
+        PrintF("stack [<num words>]\n");
+        PrintF("  dump stack content, default dump 10 words)\n");
+        PrintF("mem <address> [<num words>]\n");
+        PrintF("  dump memory content, default dump 10 words)\n");
+        PrintF("disasm [<instructions>]\n");
+        PrintF("disasm [<address/register>]\n");
+        PrintF("disasm [[<address/register>] <instructions>]\n");
+        PrintF("  disassemble code, default is 10 instructions\n");
+        PrintF("  from pc (alias 'di')\n");
+        PrintF("gdb\n");
+        PrintF("  enter gdb\n");
+        PrintF("break <address>\n");
+        PrintF("  set a break point on the address\n");
+        PrintF("del\n");
+        PrintF("  delete the breakpoint\n");
+        PrintF("trace (alias 't')\n");
+        PrintF("  toogle the tracing of all executed statements\n");
+        PrintF("stop feature:\n");
+        PrintF("  Description:\n");
+        PrintF("    Stops are debug instructions inserted by\n");
+        PrintF("    the Assembler::stop() function.\n");
+        PrintF("    When hitting a stop, the Simulator will\n");
+        PrintF("    stop and and give control to the PPCDebugger.\n");
+        PrintF("    The first %d stop codes are watched:\n",
+               Simulator::kNumOfWatchedStops);
+        PrintF("    - They can be enabled / disabled: the Simulator\n");
+        PrintF("      will / won't stop when hitting them.\n");
+        PrintF("    - The Simulator keeps track of how many times they \n");
+        PrintF("      are met. (See the info command.) Going over a\n");
+        PrintF("      disabled stop still increases its counter. \n");
+        PrintF("  Commands:\n");
+        PrintF("    stop info all/<code> : print infos about number <code>\n");
+        PrintF("      or all stop(s).\n");
+        PrintF("    stop enable/disable all/<code> : enables / disables\n");
+        PrintF("      all or number <code> stop(s)\n");
+        PrintF("    stop unstop\n");
+        PrintF("      ignore the stop instruction at the current location\n");
+        PrintF("      from now on\n");
+      } else {
+        PrintF("Unknown command: %s\n", cmd);
+      }
+    }
+  }
+
+  // Add all the breakpoints back to stop execution and enter the debugger
+  // shell when hit.
+  RedoBreakpoints();
+  // Restore tracing
+  ::v8::internal::FLAG_trace_sim = trace;
+
+#undef COMMAND_SIZE
+#undef ARG_SIZE
+
+#undef STR
+#undef XSTR
+}
+
+
+static bool ICacheMatch(void* one, void* two) {
+  DCHECK((reinterpret_cast<intptr_t>(one) & CachePage::kPageMask) == 0);
+  DCHECK((reinterpret_cast<intptr_t>(two) & CachePage::kPageMask) == 0);
+  return one == two;
+}
+
+
+static uint32_t ICacheHash(void* key) {
+  return static_cast<uint32_t>(reinterpret_cast<uintptr_t>(key)) >> 2;
+}
+
+
+static bool AllOnOnePage(uintptr_t start, int size) {
+  intptr_t start_page = (start & ~CachePage::kPageMask);
+  intptr_t end_page = ((start + size) & ~CachePage::kPageMask);
+  return start_page == end_page;
+}
+
+
+void Simulator::set_last_debugger_input(char* input) {
+  DeleteArray(last_debugger_input_);
+  last_debugger_input_ = input;
+}
+
+
+void Simulator::FlushICache(v8::internal::HashMap* i_cache, void* start_addr,
+                            size_t size) {
+  intptr_t start = reinterpret_cast<intptr_t>(start_addr);
+  int intra_line = (start & CachePage::kLineMask);
+  start -= intra_line;
+  size += intra_line;
+  size = ((size - 1) | CachePage::kLineMask) + 1;
+  int offset = (start & CachePage::kPageMask);
+  while (!AllOnOnePage(start, size - 1)) {
+    int bytes_to_flush = CachePage::kPageSize - offset;
+    FlushOnePage(i_cache, start, bytes_to_flush);
+    start += bytes_to_flush;
+    size -= bytes_to_flush;
+    DCHECK_EQ(0, static_cast<int>(start & CachePage::kPageMask));
+    offset = 0;
+  }
+  if (size != 0) {
+    FlushOnePage(i_cache, start, size);
+  }
+}
+
+
+CachePage* Simulator::GetCachePage(v8::internal::HashMap* i_cache, void* page) {
+  v8::internal::HashMap::Entry* entry =
+      i_cache->Lookup(page, ICacheHash(page), true);
+  if (entry->value == NULL) {
+    CachePage* new_page = new CachePage();
+    entry->value = new_page;
+  }
+  return reinterpret_cast<CachePage*>(entry->value);
+}
+
+
+// Flush from start up to and not including start + size.
+void Simulator::FlushOnePage(v8::internal::HashMap* i_cache, intptr_t start,
+                             int size) {
+  DCHECK(size <= CachePage::kPageSize);
+  DCHECK(AllOnOnePage(start, size - 1));
+  DCHECK((start & CachePage::kLineMask) == 0);
+  DCHECK((size & CachePage::kLineMask) == 0);
+  void* page = reinterpret_cast<void*>(start & (~CachePage::kPageMask));
+  int offset = (start & CachePage::kPageMask);
+  CachePage* cache_page = GetCachePage(i_cache, page);
+  char* valid_bytemap = cache_page->ValidityByte(offset);
+  memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift);
+}
+
+
+void Simulator::CheckICache(v8::internal::HashMap* i_cache,
+                            Instruction* instr) {
+  intptr_t address = reinterpret_cast<intptr_t>(instr);
+  void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
+  void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
+  int offset = (address & CachePage::kPageMask);
+  CachePage* cache_page = GetCachePage(i_cache, page);
+  char* cache_valid_byte = cache_page->ValidityByte(offset);
+  bool cache_hit = (*cache_valid_byte == CachePage::LINE_VALID);
+  char* cached_line = cache_page->CachedData(offset & ~CachePage::kLineMask);
+  if (cache_hit) {
+    // Check that the data in memory matches the contents of the I-cache.
+    CHECK_EQ(0,
+             memcmp(reinterpret_cast<void*>(instr),
+                    cache_page->CachedData(offset), Instruction::kInstrSize));
+  } else {
+    // Cache miss.  Load memory into the cache.
+    memcpy(cached_line, line, CachePage::kLineLength);
+    *cache_valid_byte = CachePage::LINE_VALID;
+  }
+}
+
+
+void Simulator::Initialize(Isolate* isolate) {
+  if (isolate->simulator_initialized()) return;
+  isolate->set_simulator_initialized(true);
+  ::v8::internal::ExternalReference::set_redirector(isolate,
+                                                    &RedirectExternalReference);
+}
+
+
+Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
+  i_cache_ = isolate_->simulator_i_cache();
+  if (i_cache_ == NULL) {
+    i_cache_ = new v8::internal::HashMap(&ICacheMatch);
+    isolate_->set_simulator_i_cache(i_cache_);
+  }
+  Initialize(isolate);
+// Set up simulator support first. Some of this information is needed to
+// setup the architecture state.
+#if V8_TARGET_ARCH_PPC64
+  size_t stack_size = 2 * 1024 * 1024;  // allocate 2MB for stack
+#else
+  size_t stack_size = 1 * 1024 * 1024;  // allocate 1MB for stack
+#endif
+  stack_ = reinterpret_cast<char*>(malloc(stack_size));
+  pc_modified_ = false;
+  icount_ = 0;
+  break_pc_ = NULL;
+  break_instr_ = 0;
+
+  // Set up architecture state.
+  // All registers are initialized to zero to start with.
+  for (int i = 0; i < kNumGPRs; i++) {
+    registers_[i] = 0;
+  }
+  condition_reg_ = 0;
+  fp_condition_reg_ = 0;
+  special_reg_pc_ = 0;
+  special_reg_lr_ = 0;
+  special_reg_ctr_ = 0;
+
+  // Initializing FP registers.
+  for (int i = 0; i < kNumFPRs; i++) {
+    fp_registers_[i] = 0.0;
+  }
+
+  // The sp is initialized to point to the bottom (high address) of the
+  // allocated stack area. To be safe in potential stack underflows we leave
+  // some buffer below.
+  registers_[sp] = reinterpret_cast<intptr_t>(stack_) + stack_size - 64;
+  InitializeCoverage();
+
+  last_debugger_input_ = NULL;
+}
+
+
+Simulator::~Simulator() {}
+
+
+// When the generated code calls an external reference we need to catch that in
+// the simulator.  The external reference will be a function compiled for the
+// host architecture.  We need to call that function instead of trying to
+// execute it with the simulator.  We do that by redirecting the external
+// reference to a svc (Supervisor Call) instruction that is handled by
+// the simulator.  We write the original destination of the jump just at a known
+// offset from the svc instruction so the simulator knows what to call.
+class Redirection {
+ public:
+  Redirection(void* external_function, ExternalReference::Type type)
+      : external_function_(external_function),
+        swi_instruction_(rtCallRedirInstr | kCallRtRedirected),
+        type_(type),
+        next_(NULL) {
+    Isolate* isolate = Isolate::Current();
+    next_ = isolate->simulator_redirection();
+    Simulator::current(isolate)->FlushICache(
+        isolate->simulator_i_cache(),
+        reinterpret_cast<void*>(&swi_instruction_), Instruction::kInstrSize);
+    isolate->set_simulator_redirection(this);
+  }
+
+  void* address_of_swi_instruction() {
+    return reinterpret_cast<void*>(&swi_instruction_);
+  }
+
+  void* external_function() { return external_function_; }
+  ExternalReference::Type type() { return type_; }
+
+  static Redirection* Get(void* external_function,
+                          ExternalReference::Type type) {
+    Isolate* isolate = Isolate::Current();
+    Redirection* current = isolate->simulator_redirection();
+    for (; current != NULL; current = current->next_) {
+      if (current->external_function_ == external_function) {
+        DCHECK_EQ(current->type(), type);
+        return current;
+      }
+    }
+    return new Redirection(external_function, type);
+  }
+
+  static Redirection* FromSwiInstruction(Instruction* swi_instruction) {
+    char* addr_of_swi = reinterpret_cast<char*>(swi_instruction);
+    char* addr_of_redirection =
+        addr_of_swi - OFFSET_OF(Redirection, swi_instruction_);
+    return reinterpret_cast<Redirection*>(addr_of_redirection);
+  }
+
+  static void* ReverseRedirection(intptr_t reg) {
+    Redirection* redirection = FromSwiInstruction(
+        reinterpret_cast<Instruction*>(reinterpret_cast<void*>(reg)));
+    return redirection->external_function();
+  }
+
+ private:
+  void* external_function_;
+  uint32_t swi_instruction_;
+  ExternalReference::Type type_;
+  Redirection* next_;
+};
+
+
+void* Simulator::RedirectExternalReference(void* external_function,
+                                           ExternalReference::Type type) {
+  Redirection* redirection = Redirection::Get(external_function, type);
+  return redirection->address_of_swi_instruction();
+}
+
+
+// Get the active Simulator for the current thread.
+Simulator* Simulator::current(Isolate* isolate) {
+  v8::internal::Isolate::PerIsolateThreadData* isolate_data =
+      isolate->FindOrAllocatePerThreadDataForThisThread();
+  DCHECK(isolate_data != NULL);
+
+  Simulator* sim = isolate_data->simulator();
+  if (sim == NULL) {
+    // TODO(146): delete the simulator object when a thread/isolate goes away.
+    sim = new Simulator(isolate);
+    isolate_data->set_simulator(sim);
+  }
+  return sim;
+}
+
+
+// Sets the register in the architecture state.
+void Simulator::set_register(int reg, intptr_t value) {
+  DCHECK((reg >= 0) && (reg < kNumGPRs));
+  registers_[reg] = value;
+}
+
+
+// Get the register from the architecture state.
+intptr_t Simulator::get_register(int reg) const {
+  DCHECK((reg >= 0) && (reg < kNumGPRs));
+  // Stupid code added to avoid bug in GCC.
+  // See: http://gcc.gnu.org/bugzilla/show_bug.cgi?id=43949
+  if (reg >= kNumGPRs) return 0;
+  // End stupid code.
+  return registers_[reg];
+}
+
+
+double Simulator::get_double_from_register_pair(int reg) {
+  DCHECK((reg >= 0) && (reg < kNumGPRs) && ((reg % 2) == 0));
+
+  double dm_val = 0.0;
+#if !V8_TARGET_ARCH_PPC64  // doesn't make sense in 64bit mode
+  // Read the bits from the unsigned integer register_[] array
+  // into the double precision floating point value and return it.
+  char buffer[sizeof(fp_registers_[0])];
+  memcpy(buffer, &registers_[reg], 2 * sizeof(registers_[0]));
+  memcpy(&dm_val, buffer, 2 * sizeof(registers_[0]));
+#endif
+  return (dm_val);
+}
+
+
+// Raw access to the PC register.
+void Simulator::set_pc(intptr_t value) {
+  pc_modified_ = true;
+  special_reg_pc_ = value;
+}
+
+
+bool Simulator::has_bad_pc() const {
+  return ((special_reg_pc_ == bad_lr) || (special_reg_pc_ == end_sim_pc));
+}
+
+
+// Raw access to the PC register without the special adjustment when reading.
+intptr_t Simulator::get_pc() const { return special_reg_pc_; }
+
+
+// Runtime FP routines take:
+// - two double arguments
+// - one double argument and zero or one integer arguments.
+// All are consructed here from d1, d2 and r3.
+void Simulator::GetFpArgs(double* x, double* y, intptr_t* z) {
+  *x = get_double_from_d_register(1);
+  *y = get_double_from_d_register(2);
+  *z = get_register(3);
+}
+
+
+// The return value is in d1.
+void Simulator::SetFpResult(const double& result) { fp_registers_[1] = result; }
+
+
+void Simulator::TrashCallerSaveRegisters() {
+// We don't trash the registers with the return value.
+#if 0  // A good idea to trash volatile registers, needs to be done
+  registers_[2] = 0x50Bad4U;
+  registers_[3] = 0x50Bad4U;
+  registers_[12] = 0x50Bad4U;
+#endif
+}
+
+
+uint32_t Simulator::ReadWU(intptr_t addr, Instruction* instr) {
+  uint32_t* ptr = reinterpret_cast<uint32_t*>(addr);
+  return *ptr;
+}
+
+
+int32_t Simulator::ReadW(intptr_t addr, Instruction* instr) {
+  int32_t* ptr = reinterpret_cast<int32_t*>(addr);
+  return *ptr;
+}
+
+
+void Simulator::WriteW(intptr_t addr, uint32_t value, Instruction* instr) {
+  uint32_t* ptr = reinterpret_cast<uint32_t*>(addr);
+  *ptr = value;
+  return;
+}
+
+
+void Simulator::WriteW(intptr_t addr, int32_t value, Instruction* instr) {
+  int32_t* ptr = reinterpret_cast<int32_t*>(addr);
+  *ptr = value;
+  return;
+}
+
+
+uint16_t Simulator::ReadHU(intptr_t addr, Instruction* instr) {
+  uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
+  return *ptr;
+}
+
+
+int16_t Simulator::ReadH(intptr_t addr, Instruction* instr) {
+  int16_t* ptr = reinterpret_cast<int16_t*>(addr);
+  return *ptr;
+}
+
+
+void Simulator::WriteH(intptr_t addr, uint16_t value, Instruction* instr) {
+  uint16_t* ptr = reinterpret_cast<uint16_t*>(addr);
+  *ptr = value;
+  return;
+}
+
+
+void Simulator::WriteH(intptr_t addr, int16_t value, Instruction* instr) {
+  int16_t* ptr = reinterpret_cast<int16_t*>(addr);
+  *ptr = value;
+  return;
+}
+
+
+uint8_t Simulator::ReadBU(intptr_t addr) {
+  uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
+  return *ptr;
+}
+
+
+int8_t Simulator::ReadB(intptr_t addr) {
+  int8_t* ptr = reinterpret_cast<int8_t*>(addr);
+  return *ptr;
+}
+
+
+void Simulator::WriteB(intptr_t addr, uint8_t value) {
+  uint8_t* ptr = reinterpret_cast<uint8_t*>(addr);
+  *ptr = value;
+}
+
+
+void Simulator::WriteB(intptr_t addr, int8_t value) {
+  int8_t* ptr = reinterpret_cast<int8_t*>(addr);
+  *ptr = value;
+}
+
+
+intptr_t* Simulator::ReadDW(intptr_t addr) {
+  intptr_t* ptr = reinterpret_cast<intptr_t*>(addr);
+  return ptr;
+}
+
+
+void Simulator::WriteDW(intptr_t addr, int64_t value) {
+  int64_t* ptr = reinterpret_cast<int64_t*>(addr);
+  *ptr = value;
+  return;
+}
+
+
+// Returns the limit of the stack area to enable checking for stack overflows.
+uintptr_t Simulator::StackLimit() const {
+  // Leave a safety margin of 1024 bytes to prevent overrunning the stack when
+  // pushing values.
+  return reinterpret_cast<uintptr_t>(stack_) + 1024;
+}
+
+
+// Unsupported instructions use Format to print an error and stop execution.
+void Simulator::Format(Instruction* instr, const char* format) {
+  PrintF("Simulator found unsupported instruction:\n 0x%08" V8PRIxPTR ": %s\n",
+         reinterpret_cast<intptr_t>(instr), format);
+  UNIMPLEMENTED();
+}
+
+
+// Calculate C flag value for additions.
+bool Simulator::CarryFrom(int32_t left, int32_t right, int32_t carry) {
+  uint32_t uleft = static_cast<uint32_t>(left);
+  uint32_t uright = static_cast<uint32_t>(right);
+  uint32_t urest = 0xffffffffU - uleft;
+
+  return (uright > urest) ||
+         (carry && (((uright + 1) > urest) || (uright > (urest - 1))));
+}
+
+
+// Calculate C flag value for subtractions.
+bool Simulator::BorrowFrom(int32_t left, int32_t right) {
+  uint32_t uleft = static_cast<uint32_t>(left);
+  uint32_t uright = static_cast<uint32_t>(right);
+
+  return (uright > uleft);
+}
+
+
+// Calculate V flag value for additions and subtractions.
+bool Simulator::OverflowFrom(int32_t alu_out, int32_t left, int32_t right,
+                             bool addition) {
+  bool overflow;
+  if (addition) {
+    // operands have the same sign
+    overflow = ((left >= 0 && right >= 0) || (left < 0 && right < 0))
+               // and operands and result have different sign
+               &&
+               ((left < 0 && alu_out >= 0) || (left >= 0 && alu_out < 0));
+  } else {
+    // operands have different signs
+    overflow = ((left < 0 && right >= 0) || (left >= 0 && right < 0))
+               // and first operand and result have different signs
+               &&
+               ((left < 0 && alu_out >= 0) || (left >= 0 && alu_out < 0));
+  }
+  return overflow;
+}
+
+
+#if !V8_TARGET_ARCH_PPC64
+// Calls into the V8 runtime are based on this very simple interface.
+// Note: To be able to return two values from some calls the code in runtime.cc
+// uses the ObjectPair which is essentially two 32-bit values stuffed into a
+// 64-bit value. With the code below we assume that all runtime calls return
+// 64 bits of result. If they don't, the r4 result register contains a bogus
+// value, which is fine because it is caller-saved.
+typedef int64_t (*SimulatorRuntimeCall)(intptr_t arg0, intptr_t arg1,
+                                        intptr_t arg2, intptr_t arg3,
+                                        intptr_t arg4, intptr_t arg5);
+#else
+// For 64-bit, we need to be more explicit.
+typedef intptr_t (*SimulatorRuntimeCall)(intptr_t arg0, intptr_t arg1,
+                                         intptr_t arg2, intptr_t arg3,
+                                         intptr_t arg4, intptr_t arg5);
+struct ObjectPair {
+  intptr_t x;
+  intptr_t y;
+};
+
+typedef struct ObjectPair (*SimulatorRuntimeObjectPairCall)(
+    intptr_t arg0, intptr_t arg1, intptr_t arg2, intptr_t arg3, intptr_t arg4,
+    intptr_t arg5);
+#endif
+
+// These prototypes handle the four types of FP calls.
+typedef int (*SimulatorRuntimeCompareCall)(double darg0, double darg1);
+typedef double (*SimulatorRuntimeFPFPCall)(double darg0, double darg1);
+typedef double (*SimulatorRuntimeFPCall)(double darg0);
+typedef double (*SimulatorRuntimeFPIntCall)(double darg0, intptr_t arg0);
+
+// This signature supports direct call in to API function native callback
+// (refer to InvocationCallback in v8.h).
+typedef void (*SimulatorRuntimeDirectApiCall)(intptr_t arg0);
+typedef void (*SimulatorRuntimeProfilingApiCall)(intptr_t arg0, void* arg1);
+
+// This signature supports direct call to accessor getter callback.
+typedef void (*SimulatorRuntimeDirectGetterCall)(intptr_t arg0, intptr_t arg1);
+typedef void (*SimulatorRuntimeProfilingGetterCall)(intptr_t arg0,
+                                                    intptr_t arg1, void* arg2);
+
+// Software interrupt instructions are used by the simulator to call into the
+// C-based V8 runtime.
+void Simulator::SoftwareInterrupt(Instruction* instr) {
+  int svc = instr->SvcValue();
+  switch (svc) {
+    case kCallRtRedirected: {
+      // Check if stack is aligned. Error if not aligned is reported below to
+      // include information on the function called.
+      bool stack_aligned =
+          (get_register(sp) & (::v8::internal::FLAG_sim_stack_alignment - 1)) ==
+          0;
+      Redirection* redirection = Redirection::FromSwiInstruction(instr);
+      const int kArgCount = 6;
+      int arg0_regnum = 3;
+#if V8_TARGET_ARCH_PPC64 && !ABI_RETURNS_OBJECT_PAIRS_IN_REGS
+      intptr_t result_buffer = 0;
+      if (redirection->type() == ExternalReference::BUILTIN_OBJECTPAIR_CALL) {
+        result_buffer = get_register(r3);
+        arg0_regnum++;
+      }
+#endif
+      intptr_t arg[kArgCount];
+      for (int i = 0; i < kArgCount; i++) {
+        arg[i] = get_register(arg0_regnum + i);
+      }
+      bool fp_call =
+          (redirection->type() == ExternalReference::BUILTIN_FP_FP_CALL) ||
+          (redirection->type() == ExternalReference::BUILTIN_COMPARE_CALL) ||
+          (redirection->type() == ExternalReference::BUILTIN_FP_CALL) ||
+          (redirection->type() == ExternalReference::BUILTIN_FP_INT_CALL);
+      // This is dodgy but it works because the C entry stubs are never moved.
+      // See comment in codegen-arm.cc and bug 1242173.
+      intptr_t saved_lr = special_reg_lr_;
+      intptr_t external =
+          reinterpret_cast<intptr_t>(redirection->external_function());
+      if (fp_call) {
+        double dval0, dval1;  // one or two double parameters
+        intptr_t ival;        // zero or one integer parameters
+        int iresult = 0;      // integer return value
+        double dresult = 0;   // double return value
+        GetFpArgs(&dval0, &dval1, &ival);
+        if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
+          SimulatorRuntimeCall generic_target =
+              reinterpret_cast<SimulatorRuntimeCall>(external);
+          switch (redirection->type()) {
+            case ExternalReference::BUILTIN_FP_FP_CALL:
+            case ExternalReference::BUILTIN_COMPARE_CALL:
+              PrintF("Call to host function at %p with args %f, %f",
+                     FUNCTION_ADDR(generic_target), dval0, dval1);
+              break;
+            case ExternalReference::BUILTIN_FP_CALL:
+              PrintF("Call to host function at %p with arg %f",
+                     FUNCTION_ADDR(generic_target), dval0);
+              break;
+            case ExternalReference::BUILTIN_FP_INT_CALL:
+              PrintF("Call to host function at %p with args %f, %" V8PRIdPTR,
+                     FUNCTION_ADDR(generic_target), dval0, ival);
+              break;
+            default:
+              UNREACHABLE();
+              break;
+          }
+          if (!stack_aligned) {
+            PrintF(" with unaligned stack %08" V8PRIxPTR "\n",
+                   get_register(sp));
+          }
+          PrintF("\n");
+        }
+        CHECK(stack_aligned);
+        switch (redirection->type()) {
+          case ExternalReference::BUILTIN_COMPARE_CALL: {
+            SimulatorRuntimeCompareCall target =
+                reinterpret_cast<SimulatorRuntimeCompareCall>(external);
+            iresult = target(dval0, dval1);
+            set_register(r3, iresult);
+            break;
+          }
+          case ExternalReference::BUILTIN_FP_FP_CALL: {
+            SimulatorRuntimeFPFPCall target =
+                reinterpret_cast<SimulatorRuntimeFPFPCall>(external);
+            dresult = target(dval0, dval1);
+            SetFpResult(dresult);
+            break;
+          }
+          case ExternalReference::BUILTIN_FP_CALL: {
+            SimulatorRuntimeFPCall target =
+                reinterpret_cast<SimulatorRuntimeFPCall>(external);
+            dresult = target(dval0);
+            SetFpResult(dresult);
+            break;
+          }
+          case ExternalReference::BUILTIN_FP_INT_CALL: {
+            SimulatorRuntimeFPIntCall target =
+                reinterpret_cast<SimulatorRuntimeFPIntCall>(external);
+            dresult = target(dval0, ival);
+            SetFpResult(dresult);
+            break;
+          }
+          default:
+            UNREACHABLE();
+            break;
+        }
+        if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
+          switch (redirection->type()) {
+            case ExternalReference::BUILTIN_COMPARE_CALL:
+              PrintF("Returned %08x\n", iresult);
+              break;
+            case ExternalReference::BUILTIN_FP_FP_CALL:
+            case ExternalReference::BUILTIN_FP_CALL:
+            case ExternalReference::BUILTIN_FP_INT_CALL:
+              PrintF("Returned %f\n", dresult);
+              break;
+            default:
+              UNREACHABLE();
+              break;
+          }
+        }
+      } else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
+        // See callers of MacroAssembler::CallApiFunctionAndReturn for
+        // explanation of register usage.
+        if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
+          PrintF("Call to host function at %p args %08" V8PRIxPTR,
+                 reinterpret_cast<void*>(external), arg[0]);
+          if (!stack_aligned) {
+            PrintF(" with unaligned stack %08" V8PRIxPTR "\n",
+                   get_register(sp));
+          }
+          PrintF("\n");
+        }
+        CHECK(stack_aligned);
+        SimulatorRuntimeDirectApiCall target =
+            reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
+        target(arg[0]);
+      } else if (redirection->type() == ExternalReference::PROFILING_API_CALL) {
+        // See callers of MacroAssembler::CallApiFunctionAndReturn for
+        // explanation of register usage.
+        if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
+          PrintF("Call to host function at %p args %08" V8PRIxPTR
+                 " %08" V8PRIxPTR,
+                 reinterpret_cast<void*>(external), arg[0], arg[1]);
+          if (!stack_aligned) {
+            PrintF(" with unaligned stack %08" V8PRIxPTR "\n",
+                   get_register(sp));
+          }
+          PrintF("\n");
+        }
+        CHECK(stack_aligned);
+        SimulatorRuntimeProfilingApiCall target =
+            reinterpret_cast<SimulatorRuntimeProfilingApiCall>(external);
+        target(arg[0], Redirection::ReverseRedirection(arg[1]));
+      } else if (redirection->type() == ExternalReference::DIRECT_GETTER_CALL) {
+        // See callers of MacroAssembler::CallApiFunctionAndReturn for
+        // explanation of register usage.
+        if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
+          PrintF("Call to host function at %p args %08" V8PRIxPTR
+                 " %08" V8PRIxPTR,
+                 reinterpret_cast<void*>(external), arg[0], arg[1]);
+          if (!stack_aligned) {
+            PrintF(" with unaligned stack %08" V8PRIxPTR "\n",
+                   get_register(sp));
+          }
+          PrintF("\n");
+        }
+        CHECK(stack_aligned);
+        SimulatorRuntimeDirectGetterCall target =
+            reinterpret_cast<SimulatorRuntimeDirectGetterCall>(external);
+#if !ABI_PASSES_HANDLES_IN_REGS
+        arg[0] = *(reinterpret_cast<intptr_t*>(arg[0]));
+#endif
+        target(arg[0], arg[1]);
+      } else if (redirection->type() ==
+                 ExternalReference::PROFILING_GETTER_CALL) {
+        if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
+          PrintF("Call to host function at %p args %08" V8PRIxPTR
+                 " %08" V8PRIxPTR " %08" V8PRIxPTR,
+                 reinterpret_cast<void*>(external), arg[0], arg[1], arg[2]);
+          if (!stack_aligned) {
+            PrintF(" with unaligned stack %08" V8PRIxPTR "\n",
+                   get_register(sp));
+          }
+          PrintF("\n");
+        }
+        CHECK(stack_aligned);
+        SimulatorRuntimeProfilingGetterCall target =
+            reinterpret_cast<SimulatorRuntimeProfilingGetterCall>(external);
+#if !ABI_PASSES_HANDLES_IN_REGS
+        arg[0] = *(reinterpret_cast<intptr_t*>(arg[0]));
+#endif
+        target(arg[0], arg[1], Redirection::ReverseRedirection(arg[2]));
+      } else {
+        // builtin call.
+        if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
+          SimulatorRuntimeCall target =
+              reinterpret_cast<SimulatorRuntimeCall>(external);
+          PrintF(
+              "Call to host function at %p,\n"
+              "\t\t\t\targs %08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR
+              ", %08" V8PRIxPTR ", %08" V8PRIxPTR ", %08" V8PRIxPTR,
+              FUNCTION_ADDR(target), arg[0], arg[1], arg[2], arg[3], arg[4],
+              arg[5]);
+          if (!stack_aligned) {
+            PrintF(" with unaligned stack %08" V8PRIxPTR "\n",
+                   get_register(sp));
+          }
+          PrintF("\n");
+        }
+        CHECK(stack_aligned);
+#if !V8_TARGET_ARCH_PPC64
+        DCHECK(redirection->type() == ExternalReference::BUILTIN_CALL);
+        SimulatorRuntimeCall target =
+            reinterpret_cast<SimulatorRuntimeCall>(external);
+        int64_t result = target(arg[0], arg[1], arg[2], arg[3], arg[4], arg[5]);
+        int32_t lo_res = static_cast<int32_t>(result);
+        int32_t hi_res = static_cast<int32_t>(result >> 32);
+#if V8_TARGET_BIG_ENDIAN
+        if (::v8::internal::FLAG_trace_sim) {
+          PrintF("Returned %08x\n", hi_res);
+        }
+        set_register(r3, hi_res);
+        set_register(r4, lo_res);
+#else
+        if (::v8::internal::FLAG_trace_sim) {
+          PrintF("Returned %08x\n", lo_res);
+        }
+        set_register(r3, lo_res);
+        set_register(r4, hi_res);
+#endif
+#else
+        if (redirection->type() == ExternalReference::BUILTIN_CALL) {
+          SimulatorRuntimeCall target =
+              reinterpret_cast<SimulatorRuntimeCall>(external);
+          intptr_t result =
+              target(arg[0], arg[1], arg[2], arg[3], arg[4], arg[5]);
+          if (::v8::internal::FLAG_trace_sim) {
+            PrintF("Returned %08" V8PRIxPTR "\n", result);
+          }
+          set_register(r3, result);
+        } else {
+          DCHECK(redirection->type() ==
+                 ExternalReference::BUILTIN_OBJECTPAIR_CALL);
+          SimulatorRuntimeObjectPairCall target =
+              reinterpret_cast<SimulatorRuntimeObjectPairCall>(external);
+          struct ObjectPair result =
+              target(arg[0], arg[1], arg[2], arg[3], arg[4], arg[5]);
+          if (::v8::internal::FLAG_trace_sim) {
+            PrintF("Returned %08" V8PRIxPTR ", %08" V8PRIxPTR "\n", result.x,
+                   result.y);
+          }
+#if ABI_RETURNS_OBJECT_PAIRS_IN_REGS
+          set_register(r3, result.x);
+          set_register(r4, result.y);
+#else
+          memcpy(reinterpret_cast<void*>(result_buffer), &result,
+                 sizeof(struct ObjectPair));
+#endif
+        }
+#endif
+      }
+      set_pc(saved_lr);
+      break;
+    }
+    case kBreakpoint: {
+      PPCDebugger dbg(this);
+      dbg.Debug();
+      break;
+    }
+    case kInfo: {
+      PPCDebugger dbg(this);
+      dbg.Info(instr);
+      break;
+    }
+    // stop uses all codes greater than 1 << 23.
+    default: {
+      if (svc >= (1 << 23)) {
+        uint32_t code = svc & kStopCodeMask;
+        if (isWatchedStop(code)) {
+          IncreaseStopCounter(code);
+        }
+        // Stop if it is enabled, otherwise go on jumping over the stop
+        // and the message address.
+        if (isEnabledStop(code)) {
+          PPCDebugger dbg(this);
+          dbg.Stop(instr);
+        } else {
+          set_pc(get_pc() + Instruction::kInstrSize + kPointerSize);
+        }
+      } else {
+        // This is not a valid svc code.
+        UNREACHABLE();
+        break;
+      }
+    }
+  }
+}
+
+
+// Stop helper functions.
+bool Simulator::isStopInstruction(Instruction* instr) {
+  return (instr->Bits(27, 24) == 0xF) && (instr->SvcValue() >= kStopCode);
+}
+
+
+bool Simulator::isWatchedStop(uint32_t code) {
+  DCHECK(code <= kMaxStopCode);
+  return code < kNumOfWatchedStops;
+}
+
+
+bool Simulator::isEnabledStop(uint32_t code) {
+  DCHECK(code <= kMaxStopCode);
+  // Unwatched stops are always enabled.
+  return !isWatchedStop(code) ||
+         !(watched_stops_[code].count & kStopDisabledBit);
+}
+
+
+void Simulator::EnableStop(uint32_t code) {
+  DCHECK(isWatchedStop(code));
+  if (!isEnabledStop(code)) {
+    watched_stops_[code].count &= ~kStopDisabledBit;
+  }
+}
+
+
+void Simulator::DisableStop(uint32_t code) {
+  DCHECK(isWatchedStop(code));
+  if (isEnabledStop(code)) {
+    watched_stops_[code].count |= kStopDisabledBit;
+  }
+}
+
+
+void Simulator::IncreaseStopCounter(uint32_t code) {
+  DCHECK(code <= kMaxStopCode);
+  DCHECK(isWatchedStop(code));
+  if ((watched_stops_[code].count & ~(1 << 31)) == 0x7fffffff) {
+    PrintF(
+        "Stop counter for code %i has overflowed.\n"
+        "Enabling this code and reseting the counter to 0.\n",
+        code);
+    watched_stops_[code].count = 0;
+    EnableStop(code);
+  } else {
+    watched_stops_[code].count++;
+  }
+}
+
+
+// Print a stop status.
+void Simulator::PrintStopInfo(uint32_t code) {
+  DCHECK(code <= kMaxStopCode);
+  if (!isWatchedStop(code)) {
+    PrintF("Stop not watched.");
+  } else {
+    const char* state = isEnabledStop(code) ? "Enabled" : "Disabled";
+    int32_t count = watched_stops_[code].count & ~kStopDisabledBit;
+    // Don't print the state of unused breakpoints.
+    if (count != 0) {
+      if (watched_stops_[code].desc) {
+        PrintF("stop %i - 0x%x: \t%s, \tcounter = %i, \t%s\n", code, code,
+               state, count, watched_stops_[code].desc);
+      } else {
+        PrintF("stop %i - 0x%x: \t%s, \tcounter = %i\n", code, code, state,
+               count);
+      }
+    }
+  }
+}
+
+
+void Simulator::SetCR0(intptr_t result, bool setSO) {
+  int bf = 0;
+  if (result < 0) {
+    bf |= 0x80000000;
+  }
+  if (result > 0) {
+    bf |= 0x40000000;
+  }
+  if (result == 0) {
+    bf |= 0x20000000;
+  }
+  if (setSO) {
+    bf |= 0x10000000;
+  }
+  condition_reg_ = (condition_reg_ & ~0xF0000000) | bf;
+}
+
+
+void Simulator::ExecuteBranchConditional(Instruction* instr) {
+  int bo = instr->Bits(25, 21) << 21;
+  int offset = (instr->Bits(15, 2) << 18) >> 16;
+  int condition_bit = instr->Bits(20, 16);
+  int condition_mask = 0x80000000 >> condition_bit;
+  switch (bo) {
+    case DCBNZF:  // Decrement CTR; branch if CTR != 0 and condition false
+    case DCBEZF:  // Decrement CTR; branch if CTR == 0 and condition false
+      UNIMPLEMENTED();
+    case BF: {  // Branch if condition false
+      if (!(condition_reg_ & condition_mask)) {
+        if (instr->Bit(0) == 1) {  // LK flag set
+          special_reg_lr_ = get_pc() + 4;
+        }
+        set_pc(get_pc() + offset);
+      }
+      break;
+    }
+    case DCBNZT:  // Decrement CTR; branch if CTR != 0 and condition true
+    case DCBEZT:  // Decrement CTR; branch if CTR == 0 and condition true
+      UNIMPLEMENTED();
+    case BT: {  // Branch if condition true
+      if (condition_reg_ & condition_mask) {
+        if (instr->Bit(0) == 1) {  // LK flag set
+          special_reg_lr_ = get_pc() + 4;
+        }
+        set_pc(get_pc() + offset);
+      }
+      break;
+    }
+    case DCBNZ:  // Decrement CTR; branch if CTR != 0
+    case DCBEZ:  // Decrement CTR; branch if CTR == 0
+      special_reg_ctr_ -= 1;
+      if ((special_reg_ctr_ == 0) == (bo == DCBEZ)) {
+        if (instr->Bit(0) == 1) {  // LK flag set
+          special_reg_lr_ = get_pc() + 4;
+        }
+        set_pc(get_pc() + offset);
+      }
+      break;
+    case BA: {                   // Branch always
+      if (instr->Bit(0) == 1) {  // LK flag set
+        special_reg_lr_ = get_pc() + 4;
+      }
+      set_pc(get_pc() + offset);
+      break;
+    }
+    default:
+      UNIMPLEMENTED();  // Invalid encoding
+  }
+}
+
+
+// Handle execution based on instruction types.
+void Simulator::ExecuteExt1(Instruction* instr) {
+  switch (instr->Bits(10, 1) << 1) {
+    case MCRF:
+      UNIMPLEMENTED();  // Not used by V8.
+    case BCLRX: {
+      // need to check BO flag
+      intptr_t old_pc = get_pc();
+      set_pc(special_reg_lr_);
+      if (instr->Bit(0) == 1) {  // LK flag set
+        special_reg_lr_ = old_pc + 4;
+      }
+      break;
+    }
+    case BCCTRX: {
+      // need to check BO flag
+      intptr_t old_pc = get_pc();
+      set_pc(special_reg_ctr_);
+      if (instr->Bit(0) == 1) {  // LK flag set
+        special_reg_lr_ = old_pc + 4;
+      }
+      break;
+    }
+    case CRNOR:
+    case RFI:
+    case CRANDC:
+      UNIMPLEMENTED();
+    case ISYNC: {
+      // todo - simulate isync
+      break;
+    }
+    case CRXOR: {
+      int bt = instr->Bits(25, 21);
+      int ba = instr->Bits(20, 16);
+      int bb = instr->Bits(15, 11);
+      int ba_val = ((0x80000000 >> ba) & condition_reg_) == 0 ? 0 : 1;
+      int bb_val = ((0x80000000 >> bb) & condition_reg_) == 0 ? 0 : 1;
+      int bt_val = ba_val ^ bb_val;
+      bt_val = bt_val << (31 - bt);  // shift bit to correct destination
+      condition_reg_ &= ~(0x80000000 >> bt);
+      condition_reg_ |= bt_val;
+      break;
+    }
+    case CREQV: {
+      int bt = instr->Bits(25, 21);
+      int ba = instr->Bits(20, 16);
+      int bb = instr->Bits(15, 11);
+      int ba_val = ((0x80000000 >> ba) & condition_reg_) == 0 ? 0 : 1;
+      int bb_val = ((0x80000000 >> bb) & condition_reg_) == 0 ? 0 : 1;
+      int bt_val = 1 - (ba_val ^ bb_val);
+      bt_val = bt_val << (31 - bt);  // shift bit to correct destination
+      condition_reg_ &= ~(0x80000000 >> bt);
+      condition_reg_ |= bt_val;
+      break;
+    }
+    case CRNAND:
+    case CRAND:
+    case CRORC:
+    case CROR:
+    default: {
+      UNIMPLEMENTED();  // Not used by V8.
+    }
+  }
+}
+
+
+bool Simulator::ExecuteExt2_10bit(Instruction* instr) {
+  bool found = true;
+
+  int opcode = instr->Bits(10, 1) << 1;
+  switch (opcode) {
+    case SRWX: {
+      int rs = instr->RSValue();
+      int ra = instr->RAValue();
+      int rb = instr->RBValue();
+      uint32_t rs_val = get_register(rs);
+      uintptr_t rb_val = get_register(rb);
+      intptr_t result = rs_val >> (rb_val & 0x3f);
+      set_register(ra, result);
+      if (instr->Bit(0)) {  // RC bit set
+        SetCR0(result);
+      }
+      break;
+    }
+#if V8_TARGET_ARCH_PPC64
+    case SRDX: {
+      int rs = instr->RSValue();
+      int ra = instr->RAValue();
+      int rb = instr->RBValue();
+      uintptr_t rs_val = get_register(rs);
+      uintptr_t rb_val = get_register(rb);
+      intptr_t result = rs_val >> (rb_val & 0x7f);
+      set_register(ra, result);
+      if (instr->Bit(0)) {  // RC bit set
+        SetCR0(result);
+      }
+      break;
+    }
+#endif
+    case SRAW: {
+      int rs = instr->RSValue();
+      int ra = instr->RAValue();
+      int rb = instr->RBValue();
+      int32_t rs_val = get_register(rs);
+      intptr_t rb_val = get_register(rb);
+      intptr_t result = rs_val >> (rb_val & 0x3f);
+      set_register(ra, result);
+      if (instr->Bit(0)) {  // RC bit set
+        SetCR0(result);
+      }
+      break;
+    }
+#if V8_TARGET_ARCH_PPC64
+    case SRAD: {
+      int rs = instr->RSValue();
+      int ra = instr->RAValue();
+      int rb = instr->RBValue();
+      intptr_t rs_val = get_register(rs);
+      intptr_t rb_val = get_register(rb);
+      intptr_t result = rs_val >> (rb_val & 0x7f);
+      set_register(ra, result);
+      if (instr->Bit(0)) {  // RC bit set
+        SetCR0(result);
+      }
+      break;
+    }
+#endif
+    case SRAWIX: {
+      int ra = instr->RAValue();
+      int rs = instr->RSValue();
+      int sh = instr->Bits(15, 11);
+      int32_t rs_val = get_register(rs);
+      intptr_t result = rs_val >> sh;
+      set_register(ra, result);
+      if (instr->Bit(0)) {  // RC bit set
+        SetCR0(result);
+      }
+      break;
+    }
+#if V8_TARGET_ARCH_PPC64
+    case EXTSW: {
+      const int shift = kBitsPerPointer - 32;
+      int ra = instr->RAValue();
+      int rs = instr->RSValue();
+      intptr_t rs_val = get_register(rs);
+      intptr_t ra_val = (rs_val << shift) >> shift;
+      set_register(ra, ra_val);
+      if (instr->Bit(0)) {  // RC bit set
+        SetCR0(ra_val);
+      }
+      break;
+    }
+#endif
+    case EXTSH: {
+      const int shift = kBitsPerPointer - 16;
+      int ra = instr->RAValue();
+      int rs = instr->RSValue();
+      intptr_t rs_val = get_register(rs);
+      intptr_t ra_val = (rs_val << shift) >> shift;
+      set_register(ra, ra_val);
+      if (instr->Bit(0)) {  // RC bit set
+        SetCR0(ra_val);
+      }
+      break;
+    }
+    case EXTSB: {
+      const int shift = kBitsPerPointer - 8;
+      int ra = instr->RAValue();
+      int rs = instr->RSValue();
+      intptr_t rs_val = get_register(rs);
+      intptr_t ra_val = (rs_val << shift) >> shift;
+      set_register(ra, ra_val);
+      if (instr->Bit(0)) {  // RC bit set
+        SetCR0(ra_val);
+      }
+      break;
+    }
+    case LFSUX:
+    case LFSX: {
+      int frt = instr->RTValue();
+      int ra = instr->RAValue();
+      int rb = instr->RBValue();
+      intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+      intptr_t rb_val = get_register(rb);
+      int32_t val = ReadW(ra_val + rb_val, instr);
+      float* fptr = reinterpret_cast<float*>(&val);
+      set_d_register_from_double(frt, static_cast<double>(*fptr));
+      if (opcode == LFSUX) {
+        DCHECK(ra != 0);
+        set_register(ra, ra_val + rb_val);
+      }
+      break;
+    }
+    case LFDUX:
+    case LFDX: {
+      int frt = instr->RTValue();
+      int ra = instr->RAValue();
+      int rb = instr->RBValue();
+      intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+      intptr_t rb_val = get_register(rb);
+      double* dptr = reinterpret_cast<double*>(ReadDW(ra_val + rb_val));
+      set_d_register_from_double(frt, *dptr);
+      if (opcode == LFDUX) {
+        DCHECK(ra != 0);
+        set_register(ra, ra_val + rb_val);
+      }
+      break;
+    }
+    case STFSUX: {
+      case STFSX:
+        int frs = instr->RSValue();
+        int ra = instr->RAValue();
+        int rb = instr->RBValue();
+        intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+        intptr_t rb_val = get_register(rb);
+        float frs_val = static_cast<float>(get_double_from_d_register(frs));
+        int32_t* p = reinterpret_cast<int32_t*>(&frs_val);
+        WriteW(ra_val + rb_val, *p, instr);
+        if (opcode == STFSUX) {
+          DCHECK(ra != 0);
+          set_register(ra, ra_val + rb_val);
+        }
+        break;
+    }
+    case STFDUX: {
+      case STFDX:
+        int frs = instr->RSValue();
+        int ra = instr->RAValue();
+        int rb = instr->RBValue();
+        intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+        intptr_t rb_val = get_register(rb);
+        double frs_val = get_double_from_d_register(frs);
+        int64_t* p = reinterpret_cast<int64_t*>(&frs_val);
+        WriteDW(ra_val + rb_val, *p);
+        if (opcode == STFDUX) {
+          DCHECK(ra != 0);
+          set_register(ra, ra_val + rb_val);
+        }
+        break;
+    }
+    case SYNC: {
+      // todo - simulate sync
+      break;
+    }
+    case ICBI: {
+      // todo - simulate icbi
+      break;
+    }
+    default: {
+      found = false;
+      break;
+    }
+  }
+
+  if (found) return found;
+
+  found = true;
+  opcode = instr->Bits(10, 2) << 2;
+  switch (opcode) {
+    case SRADIX: {
+      int ra = instr->RAValue();
+      int rs = instr->RSValue();
+      int sh = (instr->Bits(15, 11) | (instr->Bit(1) << 5));
+      intptr_t rs_val = get_register(rs);
+      intptr_t result = rs_val >> sh;
+      set_register(ra, result);
+      if (instr->Bit(0)) {  // RC bit set
+        SetCR0(result);
+      }
+      break;
+    }
+    default: {
+      found = false;
+      break;
+    }
+  }
+
+  return found;
+}
+
+
+bool Simulator::ExecuteExt2_9bit_part1(Instruction* instr) {
+  bool found = true;
+
+  int opcode = instr->Bits(9, 1) << 1;
+  switch (opcode) {
+    case TW: {
+      // used for call redirection in simulation mode
+      SoftwareInterrupt(instr);
+      break;
+    }
+    case CMP: {
+      int ra = instr->RAValue();
+      int rb = instr->RBValue();
+      int cr = instr->Bits(25, 23);
+      uint32_t bf = 0;
+#if V8_TARGET_ARCH_PPC64
+      int L = instr->Bit(21);
+      if (L) {
+#endif
+        intptr_t ra_val = get_register(ra);
+        intptr_t rb_val = get_register(rb);
+        if (ra_val < rb_val) {
+          bf |= 0x80000000;
+        }
+        if (ra_val > rb_val) {
+          bf |= 0x40000000;
+        }
+        if (ra_val == rb_val) {
+          bf |= 0x20000000;
+        }
+#if V8_TARGET_ARCH_PPC64
+      } else {
+        int32_t ra_val = get_register(ra);
+        int32_t rb_val = get_register(rb);
+        if (ra_val < rb_val) {
+          bf |= 0x80000000;
+        }
+        if (ra_val > rb_val) {
+          bf |= 0x40000000;
+        }
+        if (ra_val == rb_val) {
+          bf |= 0x20000000;
+        }
+      }
+#endif
+      uint32_t condition_mask = 0xF0000000U >> (cr * 4);
+      uint32_t condition = bf >> (cr * 4);
+      condition_reg_ = (condition_reg_ & ~condition_mask) | condition;
+      break;
+    }
+    case SUBFCX: {
+      int rt = instr->RTValue();
+      int ra = instr->RAValue();
+      int rb = instr->RBValue();
+      // int oe = instr->Bit(10);
+      uintptr_t ra_val = get_register(ra);
+      uintptr_t rb_val = get_register(rb);
+      uintptr_t alu_out = ~ra_val + rb_val + 1;
+      set_register(rt, alu_out);
+      // If the sign of rb and alu_out don't match, carry = 0
+      if ((alu_out ^ rb_val) & 0x80000000) {
+        special_reg_xer_ &= ~0xF0000000;
+      } else {
+        special_reg_xer_ = (special_reg_xer_ & ~0xF0000000) | 0x20000000;
+      }
+      if (instr->Bit(0)) {  // RC bit set
+        SetCR0(alu_out);
+      }
+      // todo - handle OE bit
+      break;
+    }
+    case ADDCX: {
+      int rt = instr->RTValue();
+      int ra = instr->RAValue();
+      int rb = instr->RBValue();
+      // int oe = instr->Bit(10);
+      uintptr_t ra_val = get_register(ra);
+      uintptr_t rb_val = get_register(rb);
+      uintptr_t alu_out = ra_val + rb_val;
+      // Check overflow
+      if (~ra_val < rb_val) {
+        special_reg_xer_ = (special_reg_xer_ & ~0xF0000000) | 0x20000000;
+      } else {
+        special_reg_xer_ &= ~0xF0000000;
+      }
+      set_register(rt, alu_out);
+      if (instr->Bit(0)) {  // RC bit set
+        SetCR0(static_cast<intptr_t>(alu_out));
+      }
+      // todo - handle OE bit
+      break;
+    }
+    case MULHWX: {
+      int rt = instr->RTValue();
+      int ra = instr->RAValue();
+      int rb = instr->RBValue();
+      int32_t ra_val = (get_register(ra) & 0xFFFFFFFF);
+      int32_t rb_val = (get_register(rb) & 0xFFFFFFFF);
+      int64_t alu_out = (int64_t)ra_val * (int64_t)rb_val;
+      alu_out >>= 32;
+      set_register(rt, alu_out);
+      if (instr->Bit(0)) {  // RC bit set
+        SetCR0(static_cast<intptr_t>(alu_out));
+      }
+      // todo - handle OE bit
+      break;
+    }
+    case NEGX: {
+      int rt = instr->RTValue();
+      int ra = instr->RAValue();
+      intptr_t ra_val = get_register(ra);
+      intptr_t alu_out = 1 + ~ra_val;
+#if V8_TARGET_ARCH_PPC64
+      intptr_t one = 1;  // work-around gcc
+      intptr_t kOverflowVal = (one << 63);
+#else
+      intptr_t kOverflowVal = kMinInt;
+#endif
+      set_register(rt, alu_out);
+      if (instr->Bit(10)) {  // OE bit set
+        if (ra_val == kOverflowVal) {
+          special_reg_xer_ |= 0xC0000000;  // set SO,OV
+        } else {
+          special_reg_xer_ &= ~0x40000000;  // clear OV
+        }
+      }
+      if (instr->Bit(0)) {  // RC bit set
+        bool setSO = (special_reg_xer_ & 0x80000000);
+        SetCR0(alu_out, setSO);
+      }
+      break;
+    }
+    case SLWX: {
+      int rs = instr->RSValue();
+      int ra = instr->RAValue();
+      int rb = instr->RBValue();
+      uint32_t rs_val = get_register(rs);
+      uintptr_t rb_val = get_register(rb);
+      uint32_t result = rs_val << (rb_val & 0x3f);
+      set_register(ra, result);
+      if (instr->Bit(0)) {  // RC bit set
+        SetCR0(result);
+      }
+      break;
+    }
+#if V8_TARGET_ARCH_PPC64
+    case SLDX: {
+      int rs = instr->RSValue();
+      int ra = instr->RAValue();
+      int rb = instr->RBValue();
+      uintptr_t rs_val = get_register(rs);
+      uintptr_t rb_val = get_register(rb);
+      uintptr_t result = rs_val << (rb_val & 0x7f);
+      set_register(ra, result);
+      if (instr->Bit(0)) {  // RC bit set
+        SetCR0(result);
+      }
+      break;
+    }
+    case MFVSRD: {
+      DCHECK(!instr->Bit(0));
+      int frt = instr->RTValue();
+      int ra = instr->RAValue();
+      double frt_val = get_double_from_d_register(frt);
+      int64_t* p = reinterpret_cast<int64_t*>(&frt_val);
+      set_register(ra, *p);
+      break;
+    }
+    case MFVSRWZ: {
+      DCHECK(!instr->Bit(0));
+      int frt = instr->RTValue();
+      int ra = instr->RAValue();
+      double frt_val = get_double_from_d_register(frt);
+      int64_t* p = reinterpret_cast<int64_t*>(&frt_val);
+      set_register(ra, static_cast<uint32_t>(*p));
+      break;
+    }
+    case MTVSRD: {
+      DCHECK(!instr->Bit(0));
+      int frt = instr->RTValue();
+      int ra = instr->RAValue();
+      int64_t ra_val = get_register(ra);
+      double* p = reinterpret_cast<double*>(&ra_val);
+      set_d_register_from_double(frt, *p);
+      break;
+    }
+    case MTVSRWA: {
+      DCHECK(!instr->Bit(0));
+      int frt = instr->RTValue();
+      int ra = instr->RAValue();
+      int64_t ra_val = static_cast<int32_t>(get_register(ra));
+      double* p = reinterpret_cast<double*>(&ra_val);
+      set_d_register_from_double(frt, *p);
+      break;
+    }
+    case MTVSRWZ: {
+      DCHECK(!instr->Bit(0));
+      int frt = instr->RTValue();
+      int ra = instr->RAValue();
+      uint64_t ra_val = static_cast<uint32_t>(get_register(ra));
+      double* p = reinterpret_cast<double*>(&ra_val);
+      set_d_register_from_double(frt, *p);
+      break;
+    }
+#endif
+    default: {
+      found = false;
+      break;
+    }
+  }
+
+  return found;
+}
+
+
+void Simulator::ExecuteExt2_9bit_part2(Instruction* instr) {
+  int opcode = instr->Bits(9, 1) << 1;
+  switch (opcode) {
+    case CNTLZWX: {
+      int rs = instr->RSValue();
+      int ra = instr->RAValue();
+      uintptr_t rs_val = get_register(rs);
+      uintptr_t count = 0;
+      int n = 0;
+      uintptr_t bit = 0x80000000;
+      for (; n < 32; n++) {
+        if (bit & rs_val) break;
+        count++;
+        bit >>= 1;
+      }
+      set_register(ra, count);
+      if (instr->Bit(0)) {  // RC Bit set
+        int bf = 0;
+        if (count > 0) {
+          bf |= 0x40000000;
+        }
+        if (count == 0) {
+          bf |= 0x20000000;
+        }
+        condition_reg_ = (condition_reg_ & ~0xF0000000) | bf;
+      }
+      break;
+    }
+#if V8_TARGET_ARCH_PPC64
+    case CNTLZDX: {
+      int rs = instr->RSValue();
+      int ra = instr->RAValue();
+      uintptr_t rs_val = get_register(rs);
+      uintptr_t count = 0;
+      int n = 0;
+      uintptr_t bit = 0x8000000000000000UL;
+      for (; n < 64; n++) {
+        if (bit & rs_val) break;
+        count++;
+        bit >>= 1;
+      }
+      set_register(ra, count);
+      if (instr->Bit(0)) {  // RC Bit set
+        int bf = 0;
+        if (count > 0) {
+          bf |= 0x40000000;
+        }
+        if (count == 0) {
+          bf |= 0x20000000;
+        }
+        condition_reg_ = (condition_reg_ & ~0xF0000000) | bf;
+      }
+      break;
+    }
+#endif
+    case ANDX: {
+      int rs = instr->RSValue();
+      int ra = instr->RAValue();
+      int rb = instr->RBValue();
+      intptr_t rs_val = get_register(rs);
+      intptr_t rb_val = get_register(rb);
+      intptr_t alu_out = rs_val & rb_val;
+      set_register(ra, alu_out);
+      if (instr->Bit(0)) {  // RC Bit set
+        SetCR0(alu_out);
+      }
+      break;
+    }
+    case ANDCX: {
+      int rs = instr->RSValue();
+      int ra = instr->RAValue();
+      int rb = instr->RBValue();
+      intptr_t rs_val = get_register(rs);
+      intptr_t rb_val = get_register(rb);
+      intptr_t alu_out = rs_val & ~rb_val;
+      set_register(ra, alu_out);
+      if (instr->Bit(0)) {  // RC Bit set
+        SetCR0(alu_out);
+      }
+      break;
+    }
+    case CMPL: {
+      int ra = instr->RAValue();
+      int rb = instr->RBValue();
+      int cr = instr->Bits(25, 23);
+      uint32_t bf = 0;
+#if V8_TARGET_ARCH_PPC64
+      int L = instr->Bit(21);
+      if (L) {
+#endif
+        uintptr_t ra_val = get_register(ra);
+        uintptr_t rb_val = get_register(rb);
+        if (ra_val < rb_val) {
+          bf |= 0x80000000;
+        }
+        if (ra_val > rb_val) {
+          bf |= 0x40000000;
+        }
+        if (ra_val == rb_val) {
+          bf |= 0x20000000;
+        }
+#if V8_TARGET_ARCH_PPC64
+      } else {
+        uint32_t ra_val = get_register(ra);
+        uint32_t rb_val = get_register(rb);
+        if (ra_val < rb_val) {
+          bf |= 0x80000000;
+        }
+        if (ra_val > rb_val) {
+          bf |= 0x40000000;
+        }
+        if (ra_val == rb_val) {
+          bf |= 0x20000000;
+        }
+      }
+#endif
+      uint32_t condition_mask = 0xF0000000U >> (cr * 4);
+      uint32_t condition = bf >> (cr * 4);
+      condition_reg_ = (condition_reg_ & ~condition_mask) | condition;
+      break;
+    }
+    case SUBFX: {
+      int rt = instr->RTValue();
+      int ra = instr->RAValue();
+      int rb = instr->RBValue();
+      // int oe = instr->Bit(10);
+      intptr_t ra_val = get_register(ra);
+      intptr_t rb_val = get_register(rb);
+      intptr_t alu_out = rb_val - ra_val;
+      // todo - figure out underflow
+      set_register(rt, alu_out);
+      if (instr->Bit(0)) {  // RC Bit set
+        SetCR0(alu_out);
+      }
+      // todo - handle OE bit
+      break;
+    }
+    case ADDZEX: {
+      int rt = instr->RTValue();
+      int ra = instr->RAValue();
+      intptr_t ra_val = get_register(ra);
+      if (special_reg_xer_ & 0x20000000) {
+        ra_val += 1;
+      }
+      set_register(rt, ra_val);
+      if (instr->Bit(0)) {  // RC bit set
+        SetCR0(ra_val);
+      }
+      // todo - handle OE bit
+      break;
+    }
+    case NORX: {
+      int rs = instr->RSValue();
+      int ra = instr->RAValue();
+      int rb = instr->RBValue();
+      intptr_t rs_val = get_register(rs);
+      intptr_t rb_val = get_register(rb);
+      intptr_t alu_out = ~(rs_val | rb_val);
+      set_register(ra, alu_out);
+      if (instr->Bit(0)) {  // RC bit set
+        SetCR0(alu_out);
+      }
+      break;
+    }
+    case MULLW: {
+      int rt = instr->RTValue();
+      int ra = instr->RAValue();
+      int rb = instr->RBValue();
+      int32_t ra_val = (get_register(ra) & 0xFFFFFFFF);
+      int32_t rb_val = (get_register(rb) & 0xFFFFFFFF);
+      int32_t alu_out = ra_val * rb_val;
+      set_register(rt, alu_out);
+      if (instr->Bit(0)) {  // RC bit set
+        SetCR0(alu_out);
+      }
+      // todo - handle OE bit
+      break;
+    }
+#if V8_TARGET_ARCH_PPC64
+    case MULLD: {
+      int rt = instr->RTValue();
+      int ra = instr->RAValue();
+      int rb = instr->RBValue();
+      int64_t ra_val = get_register(ra);
+      int64_t rb_val = get_register(rb);
+      int64_t alu_out = ra_val * rb_val;
+      set_register(rt, alu_out);
+      if (instr->Bit(0)) {  // RC bit set
+        SetCR0(alu_out);
+      }
+      // todo - handle OE bit
+      break;
+    }
+#endif
+    case DIVW: {
+      int rt = instr->RTValue();
+      int ra = instr->RAValue();
+      int rb = instr->RBValue();
+      int32_t ra_val = get_register(ra);
+      int32_t rb_val = get_register(rb);
+      bool overflow = (ra_val == kMinInt && rb_val == -1);
+      // result is undefined if divisor is zero or if operation
+      // is 0x80000000 / -1.
+      int32_t alu_out = (rb_val == 0 || overflow) ? -1 : ra_val / rb_val;
+      set_register(rt, alu_out);
+      if (instr->Bit(10)) {  // OE bit set
+        if (overflow) {
+          special_reg_xer_ |= 0xC0000000;  // set SO,OV
+        } else {
+          special_reg_xer_ &= ~0x40000000;  // clear OV
+        }
+      }
+      if (instr->Bit(0)) {  // RC bit set
+        bool setSO = (special_reg_xer_ & 0x80000000);
+        SetCR0(alu_out, setSO);
+      }
+      break;
+    }
+#if V8_TARGET_ARCH_PPC64
+    case DIVD: {
+      int rt = instr->RTValue();
+      int ra = instr->RAValue();
+      int rb = instr->RBValue();
+      int64_t ra_val = get_register(ra);
+      int64_t rb_val = get_register(rb);
+      int64_t one = 1;  // work-around gcc
+      int64_t kMinLongLong = (one << 63);
+      // result is undefined if divisor is zero or if operation
+      // is 0x80000000_00000000 / -1.
+      int64_t alu_out =
+          (rb_val == 0 || (ra_val == kMinLongLong && rb_val == -1))
+              ? -1
+              : ra_val / rb_val;
+      set_register(rt, alu_out);
+      if (instr->Bit(0)) {  // RC bit set
+        SetCR0(alu_out);
+      }
+      // todo - handle OE bit
+      break;
+    }
+#endif
+    case ADDX: {
+      int rt = instr->RTValue();
+      int ra = instr->RAValue();
+      int rb = instr->RBValue();
+      // int oe = instr->Bit(10);
+      intptr_t ra_val = get_register(ra);
+      intptr_t rb_val = get_register(rb);
+      intptr_t alu_out = ra_val + rb_val;
+      set_register(rt, alu_out);
+      if (instr->Bit(0)) {  // RC bit set
+        SetCR0(alu_out);
+      }
+      // todo - handle OE bit
+      break;
+    }
+    case XORX: {
+      int rs = instr->RSValue();
+      int ra = instr->RAValue();
+      int rb = instr->RBValue();
+      intptr_t rs_val = get_register(rs);
+      intptr_t rb_val = get_register(rb);
+      intptr_t alu_out = rs_val ^ rb_val;
+      set_register(ra, alu_out);
+      if (instr->Bit(0)) {  // RC bit set
+        SetCR0(alu_out);
+      }
+      break;
+    }
+    case ORX: {
+      int rs = instr->RSValue();
+      int ra = instr->RAValue();
+      int rb = instr->RBValue();
+      intptr_t rs_val = get_register(rs);
+      intptr_t rb_val = get_register(rb);
+      intptr_t alu_out = rs_val | rb_val;
+      set_register(ra, alu_out);
+      if (instr->Bit(0)) {  // RC bit set
+        SetCR0(alu_out);
+      }
+      break;
+    }
+    case MFSPR: {
+      int rt = instr->RTValue();
+      int spr = instr->Bits(20, 11);
+      if (spr != 256) {
+        UNIMPLEMENTED();  // Only LRLR supported
+      }
+      set_register(rt, special_reg_lr_);
+      break;
+    }
+    case MTSPR: {
+      int rt = instr->RTValue();
+      intptr_t rt_val = get_register(rt);
+      int spr = instr->Bits(20, 11);
+      if (spr == 256) {
+        special_reg_lr_ = rt_val;
+      } else if (spr == 288) {
+        special_reg_ctr_ = rt_val;
+      } else if (spr == 32) {
+        special_reg_xer_ = rt_val;
+      } else {
+        UNIMPLEMENTED();  // Only LR supported
+      }
+      break;
+    }
+    case MFCR: {
+      int rt = instr->RTValue();
+      set_register(rt, condition_reg_);
+      break;
+    }
+    case STWUX:
+    case STWX: {
+      int rs = instr->RSValue();
+      int ra = instr->RAValue();
+      int rb = instr->RBValue();
+      intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+      int32_t rs_val = get_register(rs);
+      intptr_t rb_val = get_register(rb);
+      WriteW(ra_val + rb_val, rs_val, instr);
+      if (opcode == STWUX) {
+        DCHECK(ra != 0);
+        set_register(ra, ra_val + rb_val);
+      }
+      break;
+    }
+    case STBUX:
+    case STBX: {
+      int rs = instr->RSValue();
+      int ra = instr->RAValue();
+      int rb = instr->RBValue();
+      intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+      int8_t rs_val = get_register(rs);
+      intptr_t rb_val = get_register(rb);
+      WriteB(ra_val + rb_val, rs_val);
+      if (opcode == STBUX) {
+        DCHECK(ra != 0);
+        set_register(ra, ra_val + rb_val);
+      }
+      break;
+    }
+    case STHUX:
+    case STHX: {
+      int rs = instr->RSValue();
+      int ra = instr->RAValue();
+      int rb = instr->RBValue();
+      intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+      int16_t rs_val = get_register(rs);
+      intptr_t rb_val = get_register(rb);
+      WriteH(ra_val + rb_val, rs_val, instr);
+      if (opcode == STHUX) {
+        DCHECK(ra != 0);
+        set_register(ra, ra_val + rb_val);
+      }
+      break;
+    }
+    case LWZX:
+    case LWZUX: {
+      int rt = instr->RTValue();
+      int ra = instr->RAValue();
+      int rb = instr->RBValue();
+      intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+      intptr_t rb_val = get_register(rb);
+      set_register(rt, ReadWU(ra_val + rb_val, instr));
+      if (opcode == LWZUX) {
+        DCHECK(ra != 0 && ra != rt);
+        set_register(ra, ra_val + rb_val);
+      }
+      break;
+    }
+#if V8_TARGET_ARCH_PPC64
+    case LDX:
+    case LDUX: {
+      int rt = instr->RTValue();
+      int ra = instr->RAValue();
+      int rb = instr->RBValue();
+      intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+      intptr_t rb_val = get_register(rb);
+      intptr_t* result = ReadDW(ra_val + rb_val);
+      set_register(rt, *result);
+      if (opcode == LDUX) {
+        DCHECK(ra != 0 && ra != rt);
+        set_register(ra, ra_val + rb_val);
+      }
+      break;
+    }
+    case STDX:
+    case STDUX: {
+      int rs = instr->RSValue();
+      int ra = instr->RAValue();
+      int rb = instr->RBValue();
+      intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+      intptr_t rs_val = get_register(rs);
+      intptr_t rb_val = get_register(rb);
+      WriteDW(ra_val + rb_val, rs_val);
+      if (opcode == STDUX) {
+        DCHECK(ra != 0);
+        set_register(ra, ra_val + rb_val);
+      }
+      break;
+    }
+#endif
+    case LBZX:
+    case LBZUX: {
+      int rt = instr->RTValue();
+      int ra = instr->RAValue();
+      int rb = instr->RBValue();
+      intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+      intptr_t rb_val = get_register(rb);
+      set_register(rt, ReadBU(ra_val + rb_val) & 0xFF);
+      if (opcode == LBZUX) {
+        DCHECK(ra != 0 && ra != rt);
+        set_register(ra, ra_val + rb_val);
+      }
+      break;
+    }
+    case LHZX:
+    case LHZUX: {
+      int rt = instr->RTValue();
+      int ra = instr->RAValue();
+      int rb = instr->RBValue();
+      intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+      intptr_t rb_val = get_register(rb);
+      set_register(rt, ReadHU(ra_val + rb_val, instr) & 0xFFFF);
+      if (opcode == LHZUX) {
+        DCHECK(ra != 0 && ra != rt);
+        set_register(ra, ra_val + rb_val);
+      }
+      break;
+    }
+    case DCBF: {
+      // todo - simulate dcbf
+      break;
+    }
+    default: {
+      PrintF("Unimplemented: %08x\n", instr->InstructionBits());
+      UNIMPLEMENTED();  // Not used by V8.
+    }
+  }
+}
+
+
+void Simulator::ExecuteExt2(Instruction* instr) {
+  // Check first the 10-1 bit versions
+  if (ExecuteExt2_10bit(instr)) return;
+  // Now look at the lesser encodings
+  if (ExecuteExt2_9bit_part1(instr)) return;
+  ExecuteExt2_9bit_part2(instr);
+}
+
+
+void Simulator::ExecuteExt4(Instruction* instr) {
+  switch (instr->Bits(5, 1) << 1) {
+    case FDIV: {
+      int frt = instr->RTValue();
+      int fra = instr->RAValue();
+      int frb = instr->RBValue();
+      double fra_val = get_double_from_d_register(fra);
+      double frb_val = get_double_from_d_register(frb);
+      double frt_val = fra_val / frb_val;
+      set_d_register_from_double(frt, frt_val);
+      return;
+    }
+    case FSUB: {
+      int frt = instr->RTValue();
+      int fra = instr->RAValue();
+      int frb = instr->RBValue();
+      double fra_val = get_double_from_d_register(fra);
+      double frb_val = get_double_from_d_register(frb);
+      double frt_val = fra_val - frb_val;
+      set_d_register_from_double(frt, frt_val);
+      return;
+    }
+    case FADD: {
+      int frt = instr->RTValue();
+      int fra = instr->RAValue();
+      int frb = instr->RBValue();
+      double fra_val = get_double_from_d_register(fra);
+      double frb_val = get_double_from_d_register(frb);
+      double frt_val = fra_val + frb_val;
+      set_d_register_from_double(frt, frt_val);
+      return;
+    }
+    case FSQRT: {
+      int frt = instr->RTValue();
+      int frb = instr->RBValue();
+      double frb_val = get_double_from_d_register(frb);
+      double frt_val = std::sqrt(frb_val);
+      set_d_register_from_double(frt, frt_val);
+      return;
+    }
+    case FSEL: {
+      int frt = instr->RTValue();
+      int fra = instr->RAValue();
+      int frb = instr->RBValue();
+      int frc = instr->RCValue();
+      double fra_val = get_double_from_d_register(fra);
+      double frb_val = get_double_from_d_register(frb);
+      double frc_val = get_double_from_d_register(frc);
+      double frt_val = ((fra_val >= 0.0) ? frc_val : frb_val);
+      set_d_register_from_double(frt, frt_val);
+      return;
+    }
+    case FMUL: {
+      int frt = instr->RTValue();
+      int fra = instr->RAValue();
+      int frc = instr->RCValue();
+      double fra_val = get_double_from_d_register(fra);
+      double frc_val = get_double_from_d_register(frc);
+      double frt_val = fra_val * frc_val;
+      set_d_register_from_double(frt, frt_val);
+      return;
+    }
+    case FMSUB: {
+      int frt = instr->RTValue();
+      int fra = instr->RAValue();
+      int frb = instr->RBValue();
+      int frc = instr->RCValue();
+      double fra_val = get_double_from_d_register(fra);
+      double frb_val = get_double_from_d_register(frb);
+      double frc_val = get_double_from_d_register(frc);
+      double frt_val = (fra_val * frc_val) - frb_val;
+      set_d_register_from_double(frt, frt_val);
+      return;
+    }
+    case FMADD: {
+      int frt = instr->RTValue();
+      int fra = instr->RAValue();
+      int frb = instr->RBValue();
+      int frc = instr->RCValue();
+      double fra_val = get_double_from_d_register(fra);
+      double frb_val = get_double_from_d_register(frb);
+      double frc_val = get_double_from_d_register(frc);
+      double frt_val = (fra_val * frc_val) + frb_val;
+      set_d_register_from_double(frt, frt_val);
+      return;
+    }
+  }
+  int opcode = instr->Bits(10, 1) << 1;
+  switch (opcode) {
+    case FCMPU: {
+      int fra = instr->RAValue();
+      int frb = instr->RBValue();
+      double fra_val = get_double_from_d_register(fra);
+      double frb_val = get_double_from_d_register(frb);
+      int cr = instr->Bits(25, 23);
+      int bf = 0;
+      if (fra_val < frb_val) {
+        bf |= 0x80000000;
+      }
+      if (fra_val > frb_val) {
+        bf |= 0x40000000;
+      }
+      if (fra_val == frb_val) {
+        bf |= 0x20000000;
+      }
+      if (std::isunordered(fra_val, frb_val)) {
+        bf |= 0x10000000;
+      }
+      int condition_mask = 0xF0000000 >> (cr * 4);
+      int condition = bf >> (cr * 4);
+      condition_reg_ = (condition_reg_ & ~condition_mask) | condition;
+      return;
+    }
+    case FRSP: {
+      int frt = instr->RTValue();
+      int frb = instr->RBValue();
+      double frb_val = get_double_from_d_register(frb);
+      // frsp round 8-byte double-precision value to 8-byte
+      // single-precision value, ignore the round here
+      set_d_register_from_double(frt, frb_val);
+      if (instr->Bit(0)) {  // RC bit set
+                            //  UNIMPLEMENTED();
+      }
+      return;
+    }
+    case FCFID: {
+      int frt = instr->RTValue();
+      int frb = instr->RBValue();
+      double t_val = get_double_from_d_register(frb);
+      int64_t* frb_val_p = reinterpret_cast<int64_t*>(&t_val);
+      double frt_val = static_cast<double>(*frb_val_p);
+      set_d_register_from_double(frt, frt_val);
+      return;
+    }
+    case FCTID: {
+      int frt = instr->RTValue();
+      int frb = instr->RBValue();
+      double frb_val = get_double_from_d_register(frb);
+      int64_t frt_val;
+      int64_t one = 1;  // work-around gcc
+      int64_t kMinLongLong = (one << 63);
+      int64_t kMaxLongLong = kMinLongLong - 1;
+
+      if (frb_val > kMaxLongLong) {
+        frt_val = kMaxLongLong;
+      } else if (frb_val < kMinLongLong) {
+        frt_val = kMinLongLong;
+      } else {
+        switch (fp_condition_reg_ & kFPRoundingModeMask) {
+          case kRoundToZero:
+            frt_val = (int64_t)frb_val;
+            break;
+          case kRoundToPlusInf:
+            frt_val = (int64_t)std::ceil(frb_val);
+            break;
+          case kRoundToMinusInf:
+            frt_val = (int64_t)std::floor(frb_val);
+            break;
+          default:
+            frt_val = (int64_t)frb_val;
+            UNIMPLEMENTED();  // Not used by V8.
+            break;
+        }
+      }
+      double* p = reinterpret_cast<double*>(&frt_val);
+      set_d_register_from_double(frt, *p);
+      return;
+    }
+    case FCTIDZ: {
+      int frt = instr->RTValue();
+      int frb = instr->RBValue();
+      double frb_val = get_double_from_d_register(frb);
+      int64_t frt_val;
+      int64_t one = 1;  // work-around gcc
+      int64_t kMinLongLong = (one << 63);
+      int64_t kMaxLongLong = kMinLongLong - 1;
+
+      if (frb_val > kMaxLongLong) {
+        frt_val = kMaxLongLong;
+      } else if (frb_val < kMinLongLong) {
+        frt_val = kMinLongLong;
+      } else {
+        frt_val = (int64_t)frb_val;
+      }
+      double* p = reinterpret_cast<double*>(&frt_val);
+      set_d_register_from_double(frt, *p);
+      return;
+    }
+    case FCTIW:
+    case FCTIWZ: {
+      int frt = instr->RTValue();
+      int frb = instr->RBValue();
+      double frb_val = get_double_from_d_register(frb);
+      int64_t frt_val;
+      if (frb_val > kMaxInt) {
+        frt_val = kMaxInt;
+      } else if (frb_val < kMinInt) {
+        frt_val = kMinInt;
+      } else {
+        if (opcode == FCTIWZ) {
+          frt_val = (int64_t)frb_val;
+        } else {
+          switch (fp_condition_reg_ & kFPRoundingModeMask) {
+            case kRoundToZero:
+              frt_val = (int64_t)frb_val;
+              break;
+            case kRoundToPlusInf:
+              frt_val = (int64_t)std::ceil(frb_val);
+              break;
+            case kRoundToMinusInf:
+              frt_val = (int64_t)std::floor(frb_val);
+              break;
+            case kRoundToNearest:
+              frt_val = (int64_t)lround(frb_val);
+
+              // Round to even if exactly halfway.  (lround rounds up)
+              if (std::fabs(static_cast<double>(frt_val) - frb_val) == 0.5 &&
+                  (frt_val % 2)) {
+                frt_val += ((frt_val > 0) ? -1 : 1);
+              }
+
+              break;
+            default:
+              DCHECK(false);
+              frt_val = (int64_t)frb_val;
+              break;
+          }
+        }
+      }
+      double* p = reinterpret_cast<double*>(&frt_val);
+      set_d_register_from_double(frt, *p);
+      return;
+    }
+    case FNEG: {
+      int frt = instr->RTValue();
+      int frb = instr->RBValue();
+      double frb_val = get_double_from_d_register(frb);
+      double frt_val = -frb_val;
+      set_d_register_from_double(frt, frt_val);
+      return;
+    }
+    case FMR: {
+      int frt = instr->RTValue();
+      int frb = instr->RBValue();
+      double frb_val = get_double_from_d_register(frb);
+      double frt_val = frb_val;
+      set_d_register_from_double(frt, frt_val);
+      return;
+    }
+    case MTFSFI: {
+      int bf = instr->Bits(25, 23);
+      int imm = instr->Bits(15, 12);
+      int fp_condition_mask = 0xF0000000 >> (bf * 4);
+      fp_condition_reg_ &= ~fp_condition_mask;
+      fp_condition_reg_ |= (imm << (28 - (bf * 4)));
+      if (instr->Bit(0)) {  // RC bit set
+        condition_reg_ &= 0xF0FFFFFF;
+        condition_reg_ |= (imm << 23);
+      }
+      return;
+    }
+    case MTFSF: {
+      int frb = instr->RBValue();
+      double frb_dval = get_double_from_d_register(frb);
+      int64_t* p = reinterpret_cast<int64_t*>(&frb_dval);
+      int32_t frb_ival = static_cast<int32_t>((*p) & 0xffffffff);
+      int l = instr->Bits(25, 25);
+      if (l == 1) {
+        fp_condition_reg_ = frb_ival;
+      } else {
+        UNIMPLEMENTED();
+      }
+      if (instr->Bit(0)) {  // RC bit set
+        UNIMPLEMENTED();
+        // int w = instr->Bits(16, 16);
+        // int flm = instr->Bits(24, 17);
+      }
+      return;
+    }
+    case MFFS: {
+      int frt = instr->RTValue();
+      int64_t lval = static_cast<int64_t>(fp_condition_reg_);
+      double* p = reinterpret_cast<double*>(&lval);
+      set_d_register_from_double(frt, *p);
+      return;
+    }
+    case FABS: {
+      int frt = instr->RTValue();
+      int frb = instr->RBValue();
+      double frb_val = get_double_from_d_register(frb);
+      double frt_val = std::fabs(frb_val);
+      set_d_register_from_double(frt, frt_val);
+      return;
+    }
+    case FRIM: {
+      int frt = instr->RTValue();
+      int frb = instr->RBValue();
+      double frb_val = get_double_from_d_register(frb);
+      int64_t floor_val = (int64_t)frb_val;
+      if (floor_val > frb_val) floor_val--;
+      double frt_val = static_cast<double>(floor_val);
+      set_d_register_from_double(frt, frt_val);
+      return;
+    }
+  }
+  UNIMPLEMENTED();  // Not used by V8.
+}
+
+#if V8_TARGET_ARCH_PPC64
+void Simulator::ExecuteExt5(Instruction* instr) {
+  switch (instr->Bits(4, 2) << 2) {
+    case RLDICL: {
+      int ra = instr->RAValue();
+      int rs = instr->RSValue();
+      uintptr_t rs_val = get_register(rs);
+      int sh = (instr->Bits(15, 11) | (instr->Bit(1) << 5));
+      int mb = (instr->Bits(10, 6) | (instr->Bit(5) << 5));
+      DCHECK(sh >= 0 && sh <= 63);
+      DCHECK(mb >= 0 && mb <= 63);
+      // rotate left
+      uintptr_t result = (rs_val << sh) | (rs_val >> (64 - sh));
+      uintptr_t mask = 0xffffffffffffffff >> mb;
+      result &= mask;
+      set_register(ra, result);
+      if (instr->Bit(0)) {  // RC bit set
+        SetCR0(result);
+      }
+      return;
+    }
+    case RLDICR: {
+      int ra = instr->RAValue();
+      int rs = instr->RSValue();
+      uintptr_t rs_val = get_register(rs);
+      int sh = (instr->Bits(15, 11) | (instr->Bit(1) << 5));
+      int me = (instr->Bits(10, 6) | (instr->Bit(5) << 5));
+      DCHECK(sh >= 0 && sh <= 63);
+      DCHECK(me >= 0 && me <= 63);
+      // rotate left
+      uintptr_t result = (rs_val << sh) | (rs_val >> (64 - sh));
+      uintptr_t mask = 0xffffffffffffffff << (63 - me);
+      result &= mask;
+      set_register(ra, result);
+      if (instr->Bit(0)) {  // RC bit set
+        SetCR0(result);
+      }
+      return;
+    }
+    case RLDIC: {
+      int ra = instr->RAValue();
+      int rs = instr->RSValue();
+      uintptr_t rs_val = get_register(rs);
+      int sh = (instr->Bits(15, 11) | (instr->Bit(1) << 5));
+      int mb = (instr->Bits(10, 6) | (instr->Bit(5) << 5));
+      DCHECK(sh >= 0 && sh <= 63);
+      DCHECK(mb >= 0 && mb <= 63);
+      // rotate left
+      uintptr_t result = (rs_val << sh) | (rs_val >> (64 - sh));
+      uintptr_t mask = (0xffffffffffffffff >> mb) & (0xffffffffffffffff << sh);
+      result &= mask;
+      set_register(ra, result);
+      if (instr->Bit(0)) {  // RC bit set
+        SetCR0(result);
+      }
+      return;
+    }
+    case RLDIMI: {
+      int ra = instr->RAValue();
+      int rs = instr->RSValue();
+      uintptr_t rs_val = get_register(rs);
+      intptr_t ra_val = get_register(ra);
+      int sh = (instr->Bits(15, 11) | (instr->Bit(1) << 5));
+      int mb = (instr->Bits(10, 6) | (instr->Bit(5) << 5));
+      int me = 63 - sh;
+      // rotate left
+      uintptr_t result = (rs_val << sh) | (rs_val >> (64 - sh));
+      uintptr_t mask = 0;
+      if (mb < me + 1) {
+        uintptr_t bit = 0x8000000000000000 >> mb;
+        for (; mb <= me; mb++) {
+          mask |= bit;
+          bit >>= 1;
+        }
+      } else if (mb == me + 1) {
+        mask = 0xffffffffffffffff;
+      } else {                                           // mb > me+1
+        uintptr_t bit = 0x8000000000000000 >> (me + 1);  // needs to be tested
+        mask = 0xffffffffffffffff;
+        for (; me < mb; me++) {
+          mask ^= bit;
+          bit >>= 1;
+        }
+      }
+      result &= mask;
+      ra_val &= ~mask;
+      result |= ra_val;
+      set_register(ra, result);
+      if (instr->Bit(0)) {  // RC bit set
+        SetCR0(result);
+      }
+      return;
+    }
+  }
+  switch (instr->Bits(4, 1) << 1) {
+    case RLDCL: {
+      int ra = instr->RAValue();
+      int rs = instr->RSValue();
+      int rb = instr->RBValue();
+      uintptr_t rs_val = get_register(rs);
+      uintptr_t rb_val = get_register(rb);
+      int sh = (rb_val & 0x3f);
+      int mb = (instr->Bits(10, 6) | (instr->Bit(5) << 5));
+      DCHECK(sh >= 0 && sh <= 63);
+      DCHECK(mb >= 0 && mb <= 63);
+      // rotate left
+      uintptr_t result = (rs_val << sh) | (rs_val >> (64 - sh));
+      uintptr_t mask = 0xffffffffffffffff >> mb;
+      result &= mask;
+      set_register(ra, result);
+      if (instr->Bit(0)) {  // RC bit set
+        SetCR0(result);
+      }
+      return;
+    }
+  }
+  UNIMPLEMENTED();  // Not used by V8.
+}
+#endif
+
+
+void Simulator::ExecuteGeneric(Instruction* instr) {
+  int opcode = instr->OpcodeValue() << 26;
+  switch (opcode) {
+    case SUBFIC: {
+      int rt = instr->RTValue();
+      int ra = instr->RAValue();
+      intptr_t ra_val = get_register(ra);
+      int32_t im_val = instr->Bits(15, 0);
+      im_val = SIGN_EXT_IMM16(im_val);
+      intptr_t alu_out = im_val - ra_val;
+      set_register(rt, alu_out);
+      // todo - handle RC bit
+      break;
+    }
+    case CMPLI: {
+      int ra = instr->RAValue();
+      uint32_t im_val = instr->Bits(15, 0);
+      int cr = instr->Bits(25, 23);
+      uint32_t bf = 0;
+#if V8_TARGET_ARCH_PPC64
+      int L = instr->Bit(21);
+      if (L) {
+#endif
+        uintptr_t ra_val = get_register(ra);
+        if (ra_val < im_val) {
+          bf |= 0x80000000;
+        }
+        if (ra_val > im_val) {
+          bf |= 0x40000000;
+        }
+        if (ra_val == im_val) {
+          bf |= 0x20000000;
+        }
+#if V8_TARGET_ARCH_PPC64
+      } else {
+        uint32_t ra_val = get_register(ra);
+        if (ra_val < im_val) {
+          bf |= 0x80000000;
+        }
+        if (ra_val > im_val) {
+          bf |= 0x40000000;
+        }
+        if (ra_val == im_val) {
+          bf |= 0x20000000;
+        }
+      }
+#endif
+      uint32_t condition_mask = 0xF0000000U >> (cr * 4);
+      uint32_t condition = bf >> (cr * 4);
+      condition_reg_ = (condition_reg_ & ~condition_mask) | condition;
+      break;
+    }
+    case CMPI: {
+      int ra = instr->RAValue();
+      int32_t im_val = instr->Bits(15, 0);
+      im_val = SIGN_EXT_IMM16(im_val);
+      int cr = instr->Bits(25, 23);
+      uint32_t bf = 0;
+#if V8_TARGET_ARCH_PPC64
+      int L = instr->Bit(21);
+      if (L) {
+#endif
+        intptr_t ra_val = get_register(ra);
+        if (ra_val < im_val) {
+          bf |= 0x80000000;
+        }
+        if (ra_val > im_val) {
+          bf |= 0x40000000;
+        }
+        if (ra_val == im_val) {
+          bf |= 0x20000000;
+        }
+#if V8_TARGET_ARCH_PPC64
+      } else {
+        int32_t ra_val = get_register(ra);
+        if (ra_val < im_val) {
+          bf |= 0x80000000;
+        }
+        if (ra_val > im_val) {
+          bf |= 0x40000000;
+        }
+        if (ra_val == im_val) {
+          bf |= 0x20000000;
+        }
+      }
+#endif
+      uint32_t condition_mask = 0xF0000000U >> (cr * 4);
+      uint32_t condition = bf >> (cr * 4);
+      condition_reg_ = (condition_reg_ & ~condition_mask) | condition;
+      break;
+    }
+    case ADDIC: {
+      int rt = instr->RTValue();
+      int ra = instr->RAValue();
+      uintptr_t ra_val = get_register(ra);
+      uintptr_t im_val = SIGN_EXT_IMM16(instr->Bits(15, 0));
+      uintptr_t alu_out = ra_val + im_val;
+      // Check overflow
+      if (~ra_val < im_val) {
+        special_reg_xer_ = (special_reg_xer_ & ~0xF0000000) | 0x20000000;
+      } else {
+        special_reg_xer_ &= ~0xF0000000;
+      }
+      set_register(rt, alu_out);
+      break;
+    }
+    case ADDI: {
+      int rt = instr->RTValue();
+      int ra = instr->RAValue();
+      int32_t im_val = SIGN_EXT_IMM16(instr->Bits(15, 0));
+      intptr_t alu_out;
+      if (ra == 0) {
+        alu_out = im_val;
+      } else {
+        intptr_t ra_val = get_register(ra);
+        alu_out = ra_val + im_val;
+      }
+      set_register(rt, alu_out);
+      // todo - handle RC bit
+      break;
+    }
+    case ADDIS: {
+      int rt = instr->RTValue();
+      int ra = instr->RAValue();
+      int32_t im_val = (instr->Bits(15, 0) << 16);
+      intptr_t alu_out;
+      if (ra == 0) {  // treat r0 as zero
+        alu_out = im_val;
+      } else {
+        intptr_t ra_val = get_register(ra);
+        alu_out = ra_val + im_val;
+      }
+      set_register(rt, alu_out);
+      break;
+    }
+    case BCX: {
+      ExecuteBranchConditional(instr);
+      break;
+    }
+    case BX: {
+      int offset = (instr->Bits(25, 2) << 8) >> 6;
+      if (instr->Bit(0) == 1) {  // LK flag set
+        special_reg_lr_ = get_pc() + 4;
+      }
+      set_pc(get_pc() + offset);
+      // todo - AA flag
+      break;
+    }
+    case EXT1: {
+      ExecuteExt1(instr);
+      break;
+    }
+    case RLWIMIX: {
+      int ra = instr->RAValue();
+      int rs = instr->RSValue();
+      uint32_t rs_val = get_register(rs);
+      int32_t ra_val = get_register(ra);
+      int sh = instr->Bits(15, 11);
+      int mb = instr->Bits(10, 6);
+      int me = instr->Bits(5, 1);
+      // rotate left
+      uint32_t result = (rs_val << sh) | (rs_val >> (32 - sh));
+      int mask = 0;
+      if (mb < me + 1) {
+        int bit = 0x80000000 >> mb;
+        for (; mb <= me; mb++) {
+          mask |= bit;
+          bit >>= 1;
+        }
+      } else if (mb == me + 1) {
+        mask = 0xffffffff;
+      } else {                             // mb > me+1
+        int bit = 0x80000000 >> (me + 1);  // needs to be tested
+        mask = 0xffffffff;
+        for (; me < mb; me++) {
+          mask ^= bit;
+          bit >>= 1;
+        }
+      }
+      result &= mask;
+      ra_val &= ~mask;
+      result |= ra_val;
+      set_register(ra, result);
+      if (instr->Bit(0)) {  // RC bit set
+        SetCR0(result);
+      }
+      break;
+    }
+    case RLWINMX:
+    case RLWNMX: {
+      int ra = instr->RAValue();
+      int rs = instr->RSValue();
+      uint32_t rs_val = get_register(rs);
+      int sh = 0;
+      if (opcode == RLWINMX) {
+        sh = instr->Bits(15, 11);
+      } else {
+        int rb = instr->RBValue();
+        uint32_t rb_val = get_register(rb);
+        sh = (rb_val & 0x1f);
+      }
+      int mb = instr->Bits(10, 6);
+      int me = instr->Bits(5, 1);
+      // rotate left
+      uint32_t result = (rs_val << sh) | (rs_val >> (32 - sh));
+      int mask = 0;
+      if (mb < me + 1) {
+        int bit = 0x80000000 >> mb;
+        for (; mb <= me; mb++) {
+          mask |= bit;
+          bit >>= 1;
+        }
+      } else if (mb == me + 1) {
+        mask = 0xffffffff;
+      } else {                             // mb > me+1
+        int bit = 0x80000000 >> (me + 1);  // needs to be tested
+        mask = 0xffffffff;
+        for (; me < mb; me++) {
+          mask ^= bit;
+          bit >>= 1;
+        }
+      }
+      result &= mask;
+      set_register(ra, result);
+      if (instr->Bit(0)) {  // RC bit set
+        SetCR0(result);
+      }
+      break;
+    }
+    case ORI: {
+      int rs = instr->RSValue();
+      int ra = instr->RAValue();
+      intptr_t rs_val = get_register(rs);
+      uint32_t im_val = instr->Bits(15, 0);
+      intptr_t alu_out = rs_val | im_val;
+      set_register(ra, alu_out);
+      break;
+    }
+    case ORIS: {
+      int rs = instr->RSValue();
+      int ra = instr->RAValue();
+      intptr_t rs_val = get_register(rs);
+      uint32_t im_val = instr->Bits(15, 0);
+      intptr_t alu_out = rs_val | (im_val << 16);
+      set_register(ra, alu_out);
+      break;
+    }
+    case XORI: {
+      int rs = instr->RSValue();
+      int ra = instr->RAValue();
+      intptr_t rs_val = get_register(rs);
+      uint32_t im_val = instr->Bits(15, 0);
+      intptr_t alu_out = rs_val ^ im_val;
+      set_register(ra, alu_out);
+      // todo - set condition based SO bit
+      break;
+    }
+    case XORIS: {
+      int rs = instr->RSValue();
+      int ra = instr->RAValue();
+      intptr_t rs_val = get_register(rs);
+      uint32_t im_val = instr->Bits(15, 0);
+      intptr_t alu_out = rs_val ^ (im_val << 16);
+      set_register(ra, alu_out);
+      break;
+    }
+    case ANDIx: {
+      int rs = instr->RSValue();
+      int ra = instr->RAValue();
+      intptr_t rs_val = get_register(rs);
+      uint32_t im_val = instr->Bits(15, 0);
+      intptr_t alu_out = rs_val & im_val;
+      set_register(ra, alu_out);
+      SetCR0(alu_out);
+      break;
+    }
+    case ANDISx: {
+      int rs = instr->RSValue();
+      int ra = instr->RAValue();
+      intptr_t rs_val = get_register(rs);
+      uint32_t im_val = instr->Bits(15, 0);
+      intptr_t alu_out = rs_val & (im_val << 16);
+      set_register(ra, alu_out);
+      SetCR0(alu_out);
+      break;
+    }
+    case EXT2: {
+      ExecuteExt2(instr);
+      break;
+    }
+
+    case LWZU:
+    case LWZ: {
+      int ra = instr->RAValue();
+      int rt = instr->RTValue();
+      intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+      int offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
+      set_register(rt, ReadWU(ra_val + offset, instr));
+      if (opcode == LWZU) {
+        DCHECK(ra != 0);
+        set_register(ra, ra_val + offset);
+      }
+      break;
+    }
+
+    case LBZU:
+    case LBZ: {
+      int ra = instr->RAValue();
+      int rt = instr->RTValue();
+      intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+      int offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
+      set_register(rt, ReadB(ra_val + offset) & 0xFF);
+      if (opcode == LBZU) {
+        DCHECK(ra != 0);
+        set_register(ra, ra_val + offset);
+      }
+      break;
+    }
+
+    case STWU:
+    case STW: {
+      int ra = instr->RAValue();
+      int rs = instr->RSValue();
+      intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+      int32_t rs_val = get_register(rs);
+      int offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
+      WriteW(ra_val + offset, rs_val, instr);
+      if (opcode == STWU) {
+        DCHECK(ra != 0);
+        set_register(ra, ra_val + offset);
+      }
+      // printf("r%d %08x -> %08x\n", rs, rs_val, offset); // 0xdead
+      break;
+    }
+
+    case STBU:
+    case STB: {
+      int ra = instr->RAValue();
+      int rs = instr->RSValue();
+      intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+      int8_t rs_val = get_register(rs);
+      int offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
+      WriteB(ra_val + offset, rs_val);
+      if (opcode == STBU) {
+        DCHECK(ra != 0);
+        set_register(ra, ra_val + offset);
+      }
+      break;
+    }
+
+    case LHZU:
+    case LHZ: {
+      int ra = instr->RAValue();
+      int rt = instr->RTValue();
+      intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+      int offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
+      uintptr_t result = ReadHU(ra_val + offset, instr) & 0xffff;
+      set_register(rt, result);
+      if (opcode == LHZU) {
+        DCHECK(ra != 0);
+        set_register(ra, ra_val + offset);
+      }
+      break;
+    }
+
+    case LHA:
+    case LHAU: {
+      UNIMPLEMENTED();
+      break;
+    }
+
+    case STHU:
+    case STH: {
+      int ra = instr->RAValue();
+      int rs = instr->RSValue();
+      intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+      int16_t rs_val = get_register(rs);
+      int offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
+      WriteH(ra_val + offset, rs_val, instr);
+      if (opcode == STHU) {
+        DCHECK(ra != 0);
+        set_register(ra, ra_val + offset);
+      }
+      break;
+    }
+
+    case LMW:
+    case STMW: {
+      UNIMPLEMENTED();
+      break;
+    }
+
+    case LFSU:
+    case LFS: {
+      int frt = instr->RTValue();
+      int ra = instr->RAValue();
+      int32_t offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
+      intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+      int32_t val = ReadW(ra_val + offset, instr);
+      float* fptr = reinterpret_cast<float*>(&val);
+      set_d_register_from_double(frt, static_cast<double>(*fptr));
+      if (opcode == LFSU) {
+        DCHECK(ra != 0);
+        set_register(ra, ra_val + offset);
+      }
+      break;
+    }
+
+    case LFDU:
+    case LFD: {
+      int frt = instr->RTValue();
+      int ra = instr->RAValue();
+      int32_t offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
+      intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+      double* dptr = reinterpret_cast<double*>(ReadDW(ra_val + offset));
+      set_d_register_from_double(frt, *dptr);
+      if (opcode == LFDU) {
+        DCHECK(ra != 0);
+        set_register(ra, ra_val + offset);
+      }
+      break;
+    }
+
+    case STFSU: {
+      case STFS:
+        int frs = instr->RSValue();
+        int ra = instr->RAValue();
+        int32_t offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
+        intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+        float frs_val = static_cast<float>(get_double_from_d_register(frs));
+        int32_t* p = reinterpret_cast<int32_t*>(&frs_val);
+        WriteW(ra_val + offset, *p, instr);
+        if (opcode == STFSU) {
+          DCHECK(ra != 0);
+          set_register(ra, ra_val + offset);
+        }
+        break;
+    }
+
+    case STFDU:
+    case STFD: {
+      int frs = instr->RSValue();
+      int ra = instr->RAValue();
+      int32_t offset = SIGN_EXT_IMM16(instr->Bits(15, 0));
+      intptr_t ra_val = ra == 0 ? 0 : get_register(ra);
+      double frs_val = get_double_from_d_register(frs);
+      int64_t* p = reinterpret_cast<int64_t*>(&frs_val);
+      WriteDW(ra_val + offset, *p);
+      if (opcode == STFDU) {
+        DCHECK(ra != 0);
+        set_register(ra, ra_val + offset);
+      }
+      break;
+    }
+
+    case EXT3:
+      UNIMPLEMENTED();
+    case EXT4: {
+      ExecuteExt4(instr);
+      break;
+    }
+
+#if V8_TARGET_ARCH_PPC64
+    case EXT5: {
+      ExecuteExt5(instr);
+      break;
+    }
+    case LD: {
+      int ra = instr->RAValue();
+      int rt = instr->RTValue();
+      int64_t ra_val = ra == 0 ? 0 : get_register(ra);
+      int offset = SIGN_EXT_IMM16(instr->Bits(15, 0) & ~3);
+      switch (instr->Bits(1, 0)) {
+        case 0: {  // ld
+          intptr_t* result = ReadDW(ra_val + offset);
+          set_register(rt, *result);
+          break;
+        }
+        case 1: {  // ldu
+          intptr_t* result = ReadDW(ra_val + offset);
+          set_register(rt, *result);
+          DCHECK(ra != 0);
+          set_register(ra, ra_val + offset);
+          break;
+        }
+        case 2: {  // lwa
+          intptr_t result = ReadW(ra_val + offset, instr);
+          set_register(rt, result);
+          break;
+        }
+      }
+      break;
+    }
+
+    case STD: {
+      int ra = instr->RAValue();
+      int rs = instr->RSValue();
+      int64_t ra_val = ra == 0 ? 0 : get_register(ra);
+      int64_t rs_val = get_register(rs);
+      int offset = SIGN_EXT_IMM16(instr->Bits(15, 0) & ~3);
+      WriteDW(ra_val + offset, rs_val);
+      if (instr->Bit(0) == 1) {  // This is the STDU form
+        DCHECK(ra != 0);
+        set_register(ra, ra_val + offset);
+      }
+      break;
+    }
+#endif
+
+    case FAKE_OPCODE: {
+      if (instr->Bits(MARKER_SUBOPCODE_BIT, MARKER_SUBOPCODE_BIT) == 1) {
+        int marker_code = instr->Bits(STUB_MARKER_HIGH_BIT, 0);
+        DCHECK(marker_code < F_NEXT_AVAILABLE_STUB_MARKER);
+        PrintF("Hit stub-marker: %d (EMIT_STUB_MARKER)\n", marker_code);
+      } else {
+        int fake_opcode = instr->Bits(FAKE_OPCODE_HIGH_BIT, 0);
+        if (fake_opcode == fBKPT) {
+          PPCDebugger dbg(this);
+          PrintF("Simulator hit BKPT.\n");
+          dbg.Debug();
+        } else {
+          DCHECK(fake_opcode < fLastFaker);
+          PrintF("Hit ARM opcode: %d(FAKE_OPCODE defined in constant-ppc.h)\n",
+                 fake_opcode);
+          UNIMPLEMENTED();
+        }
+      }
+      break;
+    }
+
+    default: {
+      UNIMPLEMENTED();
+      break;
+    }
+  }
+}  // NOLINT
+
+
+void Simulator::Trace(Instruction* instr) {
+  disasm::NameConverter converter;
+  disasm::Disassembler dasm(converter);
+  // use a reasonably large buffer
+  v8::internal::EmbeddedVector<char, 256> buffer;
+  dasm.InstructionDecode(buffer, reinterpret_cast<byte*>(instr));
+  PrintF("%05d  %08" V8PRIxPTR "  %s\n", icount_,
+         reinterpret_cast<intptr_t>(instr), buffer.start());
+}
+
+
+// Executes the current instruction.
+void Simulator::ExecuteInstruction(Instruction* instr) {
+  if (v8::internal::FLAG_check_icache) {
+    CheckICache(isolate_->simulator_i_cache(), instr);
+  }
+  pc_modified_ = false;
+  if (::v8::internal::FLAG_trace_sim) {
+    Trace(instr);
+  }
+  int opcode = instr->OpcodeValue() << 26;
+  if (opcode == TWI) {
+    SoftwareInterrupt(instr);
+  } else {
+    ExecuteGeneric(instr);
+  }
+  if (!pc_modified_) {
+    set_pc(reinterpret_cast<intptr_t>(instr) + Instruction::kInstrSize);
+  }
+}
+
+
+void Simulator::Execute() {
+  // Get the PC to simulate. Cannot use the accessor here as we need the
+  // raw PC value and not the one used as input to arithmetic instructions.
+  intptr_t program_counter = get_pc();
+
+  if (::v8::internal::FLAG_stop_sim_at == 0) {
+    // Fast version of the dispatch loop without checking whether the simulator
+    // should be stopping at a particular executed instruction.
+    while (program_counter != end_sim_pc) {
+      Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
+      icount_++;
+      ExecuteInstruction(instr);
+      program_counter = get_pc();
+    }
+  } else {
+    // FLAG_stop_sim_at is at the non-default value. Stop in the debugger when
+    // we reach the particular instuction count.
+    while (program_counter != end_sim_pc) {
+      Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
+      icount_++;
+      if (icount_ == ::v8::internal::FLAG_stop_sim_at) {
+        PPCDebugger dbg(this);
+        dbg.Debug();
+      } else {
+        ExecuteInstruction(instr);
+      }
+      program_counter = get_pc();
+    }
+  }
+}
+
+
+void Simulator::CallInternal(byte* entry) {
+// Prepare to execute the code at entry
+#if ABI_USES_FUNCTION_DESCRIPTORS
+  // entry is the function descriptor
+  set_pc(*(reinterpret_cast<intptr_t*>(entry)));
+#else
+  // entry is the instruction address
+  set_pc(reinterpret_cast<intptr_t>(entry));
+#endif
+
+  // Put down marker for end of simulation. The simulator will stop simulation
+  // when the PC reaches this value. By saving the "end simulation" value into
+  // the LR the simulation stops when returning to this call point.
+  special_reg_lr_ = end_sim_pc;
+
+  // Remember the values of non-volatile registers.
+  intptr_t r2_val = get_register(r2);
+  intptr_t r13_val = get_register(r13);
+  intptr_t r14_val = get_register(r14);
+  intptr_t r15_val = get_register(r15);
+  intptr_t r16_val = get_register(r16);
+  intptr_t r17_val = get_register(r17);
+  intptr_t r18_val = get_register(r18);
+  intptr_t r19_val = get_register(r19);
+  intptr_t r20_val = get_register(r20);
+  intptr_t r21_val = get_register(r21);
+  intptr_t r22_val = get_register(r22);
+  intptr_t r23_val = get_register(r23);
+  intptr_t r24_val = get_register(r24);
+  intptr_t r25_val = get_register(r25);
+  intptr_t r26_val = get_register(r26);
+  intptr_t r27_val = get_register(r27);
+  intptr_t r28_val = get_register(r28);
+  intptr_t r29_val = get_register(r29);
+  intptr_t r30_val = get_register(r30);
+  intptr_t r31_val = get_register(fp);
+
+  // Set up the non-volatile registers with a known value. To be able to check
+  // that they are preserved properly across JS execution.
+  intptr_t callee_saved_value = icount_;
+  set_register(r2, callee_saved_value);
+  set_register(r13, callee_saved_value);
+  set_register(r14, callee_saved_value);
+  set_register(r15, callee_saved_value);
+  set_register(r16, callee_saved_value);
+  set_register(r17, callee_saved_value);
+  set_register(r18, callee_saved_value);
+  set_register(r19, callee_saved_value);
+  set_register(r20, callee_saved_value);
+  set_register(r21, callee_saved_value);
+  set_register(r22, callee_saved_value);
+  set_register(r23, callee_saved_value);
+  set_register(r24, callee_saved_value);
+  set_register(r25, callee_saved_value);
+  set_register(r26, callee_saved_value);
+  set_register(r27, callee_saved_value);
+  set_register(r28, callee_saved_value);
+  set_register(r29, callee_saved_value);
+  set_register(r30, callee_saved_value);
+  set_register(fp, callee_saved_value);
+
+  // Start the simulation
+  Execute();
+
+  // Check that the non-volatile registers have been preserved.
+  CHECK_EQ(callee_saved_value, get_register(r2));
+  CHECK_EQ(callee_saved_value, get_register(r13));
+  CHECK_EQ(callee_saved_value, get_register(r14));
+  CHECK_EQ(callee_saved_value, get_register(r15));
+  CHECK_EQ(callee_saved_value, get_register(r16));
+  CHECK_EQ(callee_saved_value, get_register(r17));
+  CHECK_EQ(callee_saved_value, get_register(r18));
+  CHECK_EQ(callee_saved_value, get_register(r19));
+  CHECK_EQ(callee_saved_value, get_register(r20));
+  CHECK_EQ(callee_saved_value, get_register(r21));
+  CHECK_EQ(callee_saved_value, get_register(r22));
+  CHECK_EQ(callee_saved_value, get_register(r23));
+  CHECK_EQ(callee_saved_value, get_register(r24));
+  CHECK_EQ(callee_saved_value, get_register(r25));
+  CHECK_EQ(callee_saved_value, get_register(r26));
+  CHECK_EQ(callee_saved_value, get_register(r27));
+  CHECK_EQ(callee_saved_value, get_register(r28));
+  CHECK_EQ(callee_saved_value, get_register(r29));
+  CHECK_EQ(callee_saved_value, get_register(r30));
+  CHECK_EQ(callee_saved_value, get_register(fp));
+
+  // Restore non-volatile registers with the original value.
+  set_register(r2, r2_val);
+  set_register(r13, r13_val);
+  set_register(r14, r14_val);
+  set_register(r15, r15_val);
+  set_register(r16, r16_val);
+  set_register(r17, r17_val);
+  set_register(r18, r18_val);
+  set_register(r19, r19_val);
+  set_register(r20, r20_val);
+  set_register(r21, r21_val);
+  set_register(r22, r22_val);
+  set_register(r23, r23_val);
+  set_register(r24, r24_val);
+  set_register(r25, r25_val);
+  set_register(r26, r26_val);
+  set_register(r27, r27_val);
+  set_register(r28, r28_val);
+  set_register(r29, r29_val);
+  set_register(r30, r30_val);
+  set_register(fp, r31_val);
+}
+
+
+intptr_t Simulator::Call(byte* entry, int argument_count, ...) {
+  va_list parameters;
+  va_start(parameters, argument_count);
+  // Set up arguments
+
+  // First eight arguments passed in registers r3-r10.
+  int reg_arg_count = (argument_count > 8) ? 8 : argument_count;
+  int stack_arg_count = argument_count - reg_arg_count;
+  for (int i = 0; i < reg_arg_count; i++) {
+    set_register(i + 3, va_arg(parameters, intptr_t));
+  }
+
+  // Remaining arguments passed on stack.
+  intptr_t original_stack = get_register(sp);
+  // Compute position of stack on entry to generated code.
+  intptr_t entry_stack =
+      (original_stack -
+       (kNumRequiredStackFrameSlots + stack_arg_count) * sizeof(intptr_t));
+  if (base::OS::ActivationFrameAlignment() != 0) {
+    entry_stack &= -base::OS::ActivationFrameAlignment();
+  }
+  // Store remaining arguments on stack, from low to high memory.
+  // +2 is a hack for the LR slot + old SP on PPC
+  intptr_t* stack_argument =
+      reinterpret_cast<intptr_t*>(entry_stack) + kStackFrameExtraParamSlot;
+  for (int i = 0; i < stack_arg_count; i++) {
+    stack_argument[i] = va_arg(parameters, intptr_t);
+  }
+  va_end(parameters);
+  set_register(sp, entry_stack);
+
+  CallInternal(entry);
+
+  // Pop stack passed arguments.
+  CHECK_EQ(entry_stack, get_register(sp));
+  set_register(sp, original_stack);
+
+  intptr_t result = get_register(r3);
+  return result;
+}
+
+
+void Simulator::CallFP(byte* entry, double d0, double d1) {
+  set_d_register_from_double(1, d0);
+  set_d_register_from_double(2, d1);
+  CallInternal(entry);
+}
+
+
+int32_t Simulator::CallFPReturnsInt(byte* entry, double d0, double d1) {
+  CallFP(entry, d0, d1);
+  int32_t result = get_register(r3);
+  return result;
+}
+
+
+double Simulator::CallFPReturnsDouble(byte* entry, double d0, double d1) {
+  CallFP(entry, d0, d1);
+  return get_double_from_d_register(1);
+}
+
+
+uintptr_t Simulator::PushAddress(uintptr_t address) {
+  uintptr_t new_sp = get_register(sp) - sizeof(uintptr_t);
+  uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(new_sp);
+  *stack_slot = address;
+  set_register(sp, new_sp);
+  return new_sp;
+}
+
+
+uintptr_t Simulator::PopAddress() {
+  uintptr_t current_sp = get_register(sp);
+  uintptr_t* stack_slot = reinterpret_cast<uintptr_t*>(current_sp);
+  uintptr_t address = *stack_slot;
+  set_register(sp, current_sp + sizeof(uintptr_t));
+  return address;
+}
+}
+}  // namespace v8::internal
+
+#endif  // USE_SIMULATOR
+#endif  // V8_TARGET_ARCH_PPC
diff --git a/deps/v8/src/ppc/simulator-ppc.h b/deps/v8/src/ppc/simulator-ppc.h
new file mode 100644 (file)
index 0000000..98fe9a5
--- /dev/null
@@ -0,0 +1,413 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+// Declares a Simulator for PPC instructions if we are not generating a native
+// PPC binary. This Simulator allows us to run and debug PPC code generation on
+// regular desktop machines.
+// V8 calls into generated code by "calling" the CALL_GENERATED_CODE macro,
+// which will start execution in the Simulator or forwards to the real entry
+// on a PPC HW platform.
+
+#ifndef V8_PPC_SIMULATOR_PPC_H_
+#define V8_PPC_SIMULATOR_PPC_H_
+
+#include "src/allocation.h"
+
+#if !defined(USE_SIMULATOR)
+// Running without a simulator on a native ppc platform.
+
+namespace v8 {
+namespace internal {
+
+// When running without a simulator we call the entry directly.
+#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
+  (entry(p0, p1, p2, p3, p4))
+
+typedef int (*ppc_regexp_matcher)(String*, int, const byte*, const byte*, int*,
+                                  int, Address, int, void*, Isolate*);
+
+
+// Call the generated regexp code directly. The code at the entry address
+// should act as a function matching the type ppc_regexp_matcher.
+// The ninth argument is a dummy that reserves the space used for
+// the return address added by the ExitFrame in native calls.
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
+  (FUNCTION_CAST<ppc_regexp_matcher>(entry)(p0, p1, p2, p3, p4, p5, p6, p7,   \
+                                            NULL, p8))
+
+// The stack limit beyond which we will throw stack overflow errors in
+// generated code. Because generated code on ppc uses the C stack, we
+// just use the C stack limit.
+class SimulatorStack : public v8::internal::AllStatic {
+ public:
+  static inline uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
+                                            uintptr_t c_limit) {
+    USE(isolate);
+    return c_limit;
+  }
+
+  static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
+    return try_catch_address;
+  }
+
+  static inline void UnregisterCTryCatch() {}
+};
+}
+}  // namespace v8::internal
+
+#else  // !defined(USE_SIMULATOR)
+// Running with a simulator.
+
+#include "src/assembler.h"
+#include "src/hashmap.h"
+#include "src/ppc/constants-ppc.h"
+
+namespace v8 {
+namespace internal {
+
+class CachePage {
+ public:
+  static const int LINE_VALID = 0;
+  static const int LINE_INVALID = 1;
+
+  static const int kPageShift = 12;
+  static const int kPageSize = 1 << kPageShift;
+  static const int kPageMask = kPageSize - 1;
+  static const int kLineShift = 2;  // The cache line is only 4 bytes right now.
+  static const int kLineLength = 1 << kLineShift;
+  static const int kLineMask = kLineLength - 1;
+
+  CachePage() { memset(&validity_map_, LINE_INVALID, sizeof(validity_map_)); }
+
+  char* ValidityByte(int offset) {
+    return &validity_map_[offset >> kLineShift];
+  }
+
+  char* CachedData(int offset) { return &data_[offset]; }
+
+ private:
+  char data_[kPageSize];  // The cached data.
+  static const int kValidityMapSize = kPageSize >> kLineShift;
+  char validity_map_[kValidityMapSize];  // One byte per line.
+};
+
+
+class Simulator {
+ public:
+  friend class PPCDebugger;
+  enum Register {
+    no_reg = -1,
+    r0 = 0,
+    sp,
+    r2,
+    r3,
+    r4,
+    r5,
+    r6,
+    r7,
+    r8,
+    r9,
+    r10,
+    r11,
+    r12,
+    r13,
+    r14,
+    r15,
+    r16,
+    r17,
+    r18,
+    r19,
+    r20,
+    r21,
+    r22,
+    r23,
+    r24,
+    r25,
+    r26,
+    r27,
+    r28,
+    r29,
+    r30,
+    fp,
+    kNumGPRs = 32,
+    d0 = 0,
+    d1,
+    d2,
+    d3,
+    d4,
+    d5,
+    d6,
+    d7,
+    d8,
+    d9,
+    d10,
+    d11,
+    d12,
+    d13,
+    d14,
+    d15,
+    d16,
+    d17,
+    d18,
+    d19,
+    d20,
+    d21,
+    d22,
+    d23,
+    d24,
+    d25,
+    d26,
+    d27,
+    d28,
+    d29,
+    d30,
+    d31,
+    kNumFPRs = 32
+  };
+
+  explicit Simulator(Isolate* isolate);
+  ~Simulator();
+
+  // The currently executing Simulator instance. Potentially there can be one
+  // for each native thread.
+  static Simulator* current(v8::internal::Isolate* isolate);
+
+  // Accessors for register state.
+  void set_register(int reg, intptr_t value);
+  intptr_t get_register(int reg) const;
+  double get_double_from_register_pair(int reg);
+  void set_d_register_from_double(int dreg, const double dbl) {
+    DCHECK(dreg >= 0 && dreg < kNumFPRs);
+    fp_registers_[dreg] = dbl;
+  }
+  double get_double_from_d_register(int dreg) { return fp_registers_[dreg]; }
+
+  // Special case of set_register and get_register to access the raw PC value.
+  void set_pc(intptr_t value);
+  intptr_t get_pc() const;
+
+  Address get_sp() {
+    return reinterpret_cast<Address>(static_cast<intptr_t>(get_register(sp)));
+  }
+
+  // Accessor to the internal simulator stack area.
+  uintptr_t StackLimit() const;
+
+  // Executes PPC instructions until the PC reaches end_sim_pc.
+  void Execute();
+
+  // Call on program start.
+  static void Initialize(Isolate* isolate);
+
+  // V8 generally calls into generated JS code with 5 parameters and into
+  // generated RegExp code with 7 parameters. This is a convenience function,
+  // which sets up the simulator state and grabs the result on return.
+  intptr_t Call(byte* entry, int argument_count, ...);
+  // Alternative: call a 2-argument double function.
+  void CallFP(byte* entry, double d0, double d1);
+  int32_t CallFPReturnsInt(byte* entry, double d0, double d1);
+  double CallFPReturnsDouble(byte* entry, double d0, double d1);
+
+  // Push an address onto the JS stack.
+  uintptr_t PushAddress(uintptr_t address);
+
+  // Pop an address from the JS stack.
+  uintptr_t PopAddress();
+
+  // Debugger input.
+  void set_last_debugger_input(char* input);
+  char* last_debugger_input() { return last_debugger_input_; }
+
+  // ICache checking.
+  static void FlushICache(v8::internal::HashMap* i_cache, void* start,
+                          size_t size);
+
+  // Returns true if pc register contains one of the 'special_values' defined
+  // below (bad_lr, end_sim_pc).
+  bool has_bad_pc() const;
+
+ private:
+  enum special_values {
+    // Known bad pc value to ensure that the simulator does not execute
+    // without being properly setup.
+    bad_lr = -1,
+    // A pc value used to signal the simulator to stop execution.  Generally
+    // the lr is set to this value on transition from native C code to
+    // simulated execution, so that the simulator can "return" to the native
+    // C code.
+    end_sim_pc = -2
+  };
+
+  // Unsupported instructions use Format to print an error and stop execution.
+  void Format(Instruction* instr, const char* format);
+
+  // Helper functions to set the conditional flags in the architecture state.
+  bool CarryFrom(int32_t left, int32_t right, int32_t carry = 0);
+  bool BorrowFrom(int32_t left, int32_t right);
+  bool OverflowFrom(int32_t alu_out, int32_t left, int32_t right,
+                    bool addition);
+
+  // Helper functions to decode common "addressing" modes
+  int32_t GetShiftRm(Instruction* instr, bool* carry_out);
+  int32_t GetImm(Instruction* instr, bool* carry_out);
+  void ProcessPUW(Instruction* instr, int num_regs, int operand_size,
+                  intptr_t* start_address, intptr_t* end_address);
+  void HandleRList(Instruction* instr, bool load);
+  void HandleVList(Instruction* inst);
+  void SoftwareInterrupt(Instruction* instr);
+
+  // Stop helper functions.
+  inline bool isStopInstruction(Instruction* instr);
+  inline bool isWatchedStop(uint32_t bkpt_code);
+  inline bool isEnabledStop(uint32_t bkpt_code);
+  inline void EnableStop(uint32_t bkpt_code);
+  inline void DisableStop(uint32_t bkpt_code);
+  inline void IncreaseStopCounter(uint32_t bkpt_code);
+  void PrintStopInfo(uint32_t code);
+
+  // Read and write memory.
+  inline uint8_t ReadBU(intptr_t addr);
+  inline int8_t ReadB(intptr_t addr);
+  inline void WriteB(intptr_t addr, uint8_t value);
+  inline void WriteB(intptr_t addr, int8_t value);
+
+  inline uint16_t ReadHU(intptr_t addr, Instruction* instr);
+  inline int16_t ReadH(intptr_t addr, Instruction* instr);
+  // Note: Overloaded on the sign of the value.
+  inline void WriteH(intptr_t addr, uint16_t value, Instruction* instr);
+  inline void WriteH(intptr_t addr, int16_t value, Instruction* instr);
+
+  inline uint32_t ReadWU(intptr_t addr, Instruction* instr);
+  inline int32_t ReadW(intptr_t addr, Instruction* instr);
+  inline void WriteW(intptr_t addr, uint32_t value, Instruction* instr);
+  inline void WriteW(intptr_t addr, int32_t value, Instruction* instr);
+
+  intptr_t* ReadDW(intptr_t addr);
+  void WriteDW(intptr_t addr, int64_t value);
+
+  void Trace(Instruction* instr);
+  void SetCR0(intptr_t result, bool setSO = false);
+  void ExecuteBranchConditional(Instruction* instr);
+  void ExecuteExt1(Instruction* instr);
+  bool ExecuteExt2_10bit(Instruction* instr);
+  bool ExecuteExt2_9bit_part1(Instruction* instr);
+  void ExecuteExt2_9bit_part2(Instruction* instr);
+  void ExecuteExt2(Instruction* instr);
+  void ExecuteExt4(Instruction* instr);
+#if V8_TARGET_ARCH_PPC64
+  void ExecuteExt5(Instruction* instr);
+#endif
+  void ExecuteGeneric(Instruction* instr);
+
+  // Executes one instruction.
+  void ExecuteInstruction(Instruction* instr);
+
+  // ICache.
+  static void CheckICache(v8::internal::HashMap* i_cache, Instruction* instr);
+  static void FlushOnePage(v8::internal::HashMap* i_cache, intptr_t start,
+                           int size);
+  static CachePage* GetCachePage(v8::internal::HashMap* i_cache, void* page);
+
+  // Runtime call support.
+  static void* RedirectExternalReference(
+      void* external_function, v8::internal::ExternalReference::Type type);
+
+  // Handle arguments and return value for runtime FP functions.
+  void GetFpArgs(double* x, double* y, intptr_t* z);
+  void SetFpResult(const double& result);
+  void TrashCallerSaveRegisters();
+
+  void CallInternal(byte* entry);
+
+  // Architecture state.
+  // Saturating instructions require a Q flag to indicate saturation.
+  // There is currently no way to read the CPSR directly, and thus read the Q
+  // flag, so this is left unimplemented.
+  intptr_t registers_[kNumGPRs];
+  int32_t condition_reg_;
+  int32_t fp_condition_reg_;
+  intptr_t special_reg_lr_;
+  intptr_t special_reg_pc_;
+  intptr_t special_reg_ctr_;
+  int32_t special_reg_xer_;
+
+  double fp_registers_[kNumFPRs];
+
+  // Simulator support.
+  char* stack_;
+  bool pc_modified_;
+  int icount_;
+
+  // Debugger input.
+  char* last_debugger_input_;
+
+  // Icache simulation
+  v8::internal::HashMap* i_cache_;
+
+  // Registered breakpoints.
+  Instruction* break_pc_;
+  Instr break_instr_;
+
+  v8::internal::Isolate* isolate_;
+
+  // A stop is watched if its code is less than kNumOfWatchedStops.
+  // Only watched stops support enabling/disabling and the counter feature.
+  static const uint32_t kNumOfWatchedStops = 256;
+
+  // Breakpoint is disabled if bit 31 is set.
+  static const uint32_t kStopDisabledBit = 1 << 31;
+
+  // A stop is enabled, meaning the simulator will stop when meeting the
+  // instruction, if bit 31 of watched_stops_[code].count is unset.
+  // The value watched_stops_[code].count & ~(1 << 31) indicates how many times
+  // the breakpoint was hit or gone through.
+  struct StopCountAndDesc {
+    uint32_t count;
+    char* desc;
+  };
+  StopCountAndDesc watched_stops_[kNumOfWatchedStops];
+};
+
+
+// When running with the simulator transition into simulated execution at this
+// point.
+#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4)                    \
+  reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->Call( \
+      FUNCTION_ADDR(entry), 5, (intptr_t)p0, (intptr_t)p1, (intptr_t)p2,  \
+      (intptr_t)p3, (intptr_t)p4))
+
+#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7, p8) \
+  Simulator::current(Isolate::Current())                                      \
+      ->Call(entry, 10, (intptr_t)p0, (intptr_t)p1, (intptr_t)p2,             \
+             (intptr_t)p3, (intptr_t)p4, (intptr_t)p5, (intptr_t)p6,          \
+             (intptr_t)p7, (intptr_t)NULL, (intptr_t)p8)
+
+
+// The simulator has its own stack. Thus it has a different stack limit from
+// the C-based native code.  Setting the c_limit to indicate a very small
+// stack cause stack overflow errors, since the simulator ignores the input.
+// This is unlikely to be an issue in practice, though it might cause testing
+// trouble down the line.
+class SimulatorStack : public v8::internal::AllStatic {
+ public:
+  static inline uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
+                                            uintptr_t c_limit) {
+    return Simulator::current(isolate)->StackLimit();
+  }
+
+  static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
+    Simulator* sim = Simulator::current(Isolate::Current());
+    return sim->PushAddress(try_catch_address);
+  }
+
+  static inline void UnregisterCTryCatch() {
+    Simulator::current(Isolate::Current())->PopAddress();
+  }
+};
+}
+}  // namespace v8::internal
+
+#endif  // !defined(USE_SIMULATOR)
+#endif  // V8_PPC_SIMULATOR_PPC_H_
index 9fd6e23..b552676 100644 (file)
@@ -105,7 +105,7 @@ PreParser::PreParseResult PreParser::PreParseLazyFunction(
     StrictMode strict_mode, bool is_generator, ParserRecorder* log) {
   log_ = log;
   // Lazy functions always have trivial outer scopes (no with/catch scopes).
-  PreParserScope top_scope(scope_, GLOBAL_SCOPE);
+  PreParserScope top_scope(scope_, SCRIPT_SCOPE);
   PreParserFactory top_factory(NULL);
   FunctionState top_state(&function_state_, &scope_, &top_scope, &top_factory);
   scope_->SetStrictMode(strict_mode);
@@ -125,13 +125,21 @@ PreParser::PreParseResult PreParser::PreParseLazyFunction(
     DCHECK_EQ(Token::RBRACE, scanner()->peek());
     if (scope_->strict_mode() == STRICT) {
       int end_pos = scanner()->location().end_pos;
-      CheckOctalLiteral(start_position, end_pos, &ok);
+      CheckStrictOctalLiteral(start_position, end_pos, &ok);
     }
   }
   return kPreParseSuccess;
 }
 
 
+PreParserExpression PreParserTraits::ParseClassLiteral(
+    PreParserIdentifier name, Scanner::Location class_name_location,
+    bool name_is_strict_reserved, int pos, bool* ok) {
+  return pre_parser_->ParseClassLiteral(name, class_name_location,
+                                        name_is_strict_reserved, pos, ok);
+}
+
+
 // Preparsing checks a JavaScript program and emits preparse-data that helps
 // a later parsing to be faster.
 // See preparser-data.h for the data.
@@ -342,6 +350,12 @@ PreParser::Statement PreParser::ParseFunctionDeclaration(bool* ok) {
 
 PreParser::Statement PreParser::ParseClassDeclaration(bool* ok) {
   Expect(Token::CLASS, CHECK_OK);
+  if (!allow_harmony_sloppy() && strict_mode() == SLOPPY) {
+    ReportMessage("sloppy_lexical");
+    *ok = false;
+    return Statement::Default();
+  }
+
   int pos = position();
   bool is_strict_reserved = false;
   Identifier name =
@@ -501,6 +515,13 @@ PreParser::Statement PreParser::ParseExpressionOrLabelledStatement(bool* ok) {
     // accept "native function" in the preparser.
   }
   // Parsed expression statement.
+  // Detect attempts at 'let' declarations in sloppy mode.
+  if (peek() == Token::IDENTIFIER && strict_mode() == SLOPPY &&
+      expr.IsIdentifier() && expr.AsIdentifier().IsLet()) {
+    ReportMessage("sloppy_lexical", NULL);
+    *ok = false;
+    return Statement::Default();
+  }
   ExpectSemicolon(CHECK_OK);
   return Statement::ExpressionStatement(expr);
 }
@@ -680,6 +701,7 @@ PreParser::Statement PreParser::ParseForStatement(bool* ok) {
 
   Expect(Token::FOR, CHECK_OK);
   Expect(Token::LPAREN, CHECK_OK);
+  bool is_let_identifier_expression = false;
   if (peek() != Token::SEMICOLON) {
     if (peek() == Token::VAR || peek() == Token::CONST ||
         (peek() == Token::LET && strict_mode() == STRICT)) {
@@ -701,6 +723,8 @@ PreParser::Statement PreParser::ParseForStatement(bool* ok) {
       }
     } else {
       Expression lhs = ParseExpression(false, CHECK_OK);
+      is_let_identifier_expression =
+          lhs.IsIdentifier() && lhs.AsIdentifier().IsLet();
       if (CheckInOrOf(lhs.IsIdentifier())) {
         ParseExpression(true, CHECK_OK);
         Expect(Token::RPAREN, CHECK_OK);
@@ -712,6 +736,13 @@ PreParser::Statement PreParser::ParseForStatement(bool* ok) {
   }
 
   // Parsed initializer at this point.
+  // Detect attempts at 'let' declarations in sloppy mode.
+  if (peek() == Token::IDENTIFIER && strict_mode() == SLOPPY &&
+      is_let_identifier_expression) {
+    ReportMessage("sloppy_lexical", NULL);
+    *ok = false;
+    return Statement::Default();
+  }
   Expect(Token::SEMICOLON, CHECK_OK);
 
   if (peek() != Token::SEMICOLON) {
@@ -863,7 +894,7 @@ PreParser::Expression PreParser::ParseFunctionLiteral(
 
   // See Parser::ParseFunctionLiteral for more information about lazy parsing
   // and lazy compilation.
-  bool is_lazily_parsed = (outer_scope_type == GLOBAL_SCOPE && allow_lazy() &&
+  bool is_lazily_parsed = (outer_scope_type == SCRIPT_SCOPE && allow_lazy() &&
                            !parenthesized_function_);
   parenthesized_function_ = false;
 
@@ -906,7 +937,7 @@ PreParser::Expression PreParser::ParseFunctionLiteral(
     }
 
     int end_position = scanner()->location().end_pos;
-    CheckOctalLiteral(start_position, end_position, CHECK_OK);
+    CheckStrictOctalLiteral(start_position, end_position, CHECK_OK);
   }
 
   return Expression::Default();
@@ -928,11 +959,52 @@ void PreParser::ParseLazyFunctionLiteralBody(bool* ok) {
 }
 
 
+PreParserExpression PreParser::ParseClassLiteral(
+    PreParserIdentifier name, Scanner::Location class_name_location,
+    bool name_is_strict_reserved, int pos, bool* ok) {
+  // All parts of a ClassDeclaration and ClassExpression are strict code.
+  if (name_is_strict_reserved) {
+    ReportMessageAt(class_name_location, "unexpected_strict_reserved");
+    *ok = false;
+    return EmptyExpression();
+  }
+  if (IsEvalOrArguments(name)) {
+    ReportMessageAt(class_name_location, "strict_eval_arguments");
+    *ok = false;
+    return EmptyExpression();
+  }
+
+  PreParserScope scope = NewScope(scope_, BLOCK_SCOPE);
+  BlockState block_state(&scope_, &scope);
+  scope_->SetStrictMode(STRICT);
+  scope_->SetScopeName(name);
+
+  if (Check(Token::EXTENDS)) {
+    ParseLeftHandSideExpression(CHECK_OK);
+  }
+
+  bool has_seen_constructor = false;
+
+  Expect(Token::LBRACE, CHECK_OK);
+  while (peek() != Token::RBRACE) {
+    if (Check(Token::SEMICOLON)) continue;
+    const bool in_class = true;
+    const bool is_static = false;
+    ParsePropertyDefinition(NULL, in_class, is_static, &has_seen_constructor,
+                            CHECK_OK);
+  }
+
+  Expect(Token::RBRACE, CHECK_OK);
+
+  return Expression::Default();
+}
+
+
 PreParser::Expression PreParser::ParseV8Intrinsic(bool* ok) {
   // CallRuntime ::
   //   '%' Identifier Arguments
   Expect(Token::MOD, CHECK_OK);
-  if (!allow_natives_syntax()) {
+  if (!allow_natives()) {
     *ok = false;
     return Expression::Default();
   }
index ddf9cec..18004a5 100644 (file)
@@ -83,42 +83,63 @@ class ParserBase : public Traits {
         scanner_(scanner),
         stack_overflow_(false),
         allow_lazy_(false),
-        allow_natives_syntax_(false),
-        allow_arrow_functions_(false),
+        allow_natives_(false),
+        allow_harmony_arrow_functions_(false),
         allow_harmony_object_literals_(false),
+        allow_harmony_sloppy_(false),
         zone_(zone) {}
 
   // Getters that indicate whether certain syntactical constructs are
   // allowed to be parsed by this instance of the parser.
   bool allow_lazy() const { return allow_lazy_; }
-  bool allow_natives_syntax() const { return allow_natives_syntax_; }
-  bool allow_arrow_functions() const { return allow_arrow_functions_; }
-  bool allow_modules() const { return scanner()->HarmonyModules(); }
+  bool allow_natives() const { return allow_natives_; }
+  bool allow_harmony_arrow_functions() const {
+    return allow_harmony_arrow_functions_;
+  }
+  bool allow_harmony_modules() const { return scanner()->HarmonyModules(); }
   bool allow_harmony_scoping() const { return scanner()->HarmonyScoping(); }
   bool allow_harmony_numeric_literals() const {
     return scanner()->HarmonyNumericLiterals();
   }
-  bool allow_classes() const { return scanner()->HarmonyClasses(); }
+  bool allow_harmony_classes() const { return scanner()->HarmonyClasses(); }
   bool allow_harmony_object_literals() const {
     return allow_harmony_object_literals_;
   }
+  bool allow_harmony_templates() const { return scanner()->HarmonyTemplates(); }
+  bool allow_harmony_sloppy() const { return allow_harmony_sloppy_; }
+  bool allow_harmony_unicode() const { return scanner()->HarmonyUnicode(); }
 
   // Setters that determine whether certain syntactical constructs are
   // allowed to be parsed by this instance of the parser.
   void set_allow_lazy(bool allow) { allow_lazy_ = allow; }
-  void set_allow_natives_syntax(bool allow) { allow_natives_syntax_ = allow; }
-  void set_allow_arrow_functions(bool allow) { allow_arrow_functions_ = allow; }
-  void set_allow_modules(bool allow) { scanner()->SetHarmonyModules(allow); }
+  void set_allow_natives(bool allow) { allow_natives_ = allow; }
+  void set_allow_harmony_arrow_functions(bool allow) {
+    allow_harmony_arrow_functions_ = allow;
+  }
+  void set_allow_harmony_modules(bool allow) {
+    scanner()->SetHarmonyModules(allow);
+  }
   void set_allow_harmony_scoping(bool allow) {
     scanner()->SetHarmonyScoping(allow);
   }
   void set_allow_harmony_numeric_literals(bool allow) {
     scanner()->SetHarmonyNumericLiterals(allow);
   }
-  void set_allow_classes(bool allow) { scanner()->SetHarmonyClasses(allow); }
+  void set_allow_harmony_classes(bool allow) {
+    scanner()->SetHarmonyClasses(allow);
+  }
   void set_allow_harmony_object_literals(bool allow) {
     allow_harmony_object_literals_ = allow;
   }
+  void set_allow_harmony_templates(bool allow) {
+    scanner()->SetHarmonyTemplates(allow);
+  }
+  void set_allow_harmony_sloppy(bool allow) {
+    allow_harmony_sloppy_ = allow;
+  }
+  void set_allow_harmony_unicode(bool allow) {
+    scanner()->SetHarmonyUnicode(allow);
+  }
 
  protected:
   enum AllowEvalOrArgumentsAsIdentifier {
@@ -364,17 +385,26 @@ class ParserBase : public Traits {
   }
 
   // Checks whether an octal literal was last seen between beg_pos and end_pos.
-  // If so, reports an error. Only called for strict mode.
-  void CheckOctalLiteral(int beg_pos, int end_pos, bool* ok) {
+  // If so, reports an error. Only called for strict mode and template strings.
+  void CheckOctalLiteral(int beg_pos, int end_pos, const char* error,
+                         bool* ok) {
     Scanner::Location octal = scanner()->octal_position();
     if (octal.IsValid() && beg_pos <= octal.beg_pos &&
         octal.end_pos <= end_pos) {
-      ReportMessageAt(octal, "strict_octal_literal");
+      ReportMessageAt(octal, error);
       scanner()->clear_octal_position();
       *ok = false;
     }
   }
 
+  inline void CheckStrictOctalLiteral(int beg_pos, int end_pos, bool* ok) {
+    CheckOctalLiteral(beg_pos, end_pos, "strict_octal_literal", ok);
+  }
+
+  inline void CheckTemplateOctalLiteral(int beg_pos, int end_pos, bool* ok) {
+    CheckOctalLiteral(beg_pos, end_pos, "template_octal_literal", ok);
+  }
+
   // Validates strict mode for function parameter lists. This has to be
   // done after parsing the function, since the function can declare
   // itself strict.
@@ -490,10 +520,8 @@ class ParserBase : public Traits {
                                                 bool* ok);
   ExpressionT ParseArrowFunctionLiteral(int start_pos, ExpressionT params_ast,
                                         bool* ok);
-  ExpressionT ParseClassLiteral(IdentifierT name,
-                                Scanner::Location function_name_location,
-                                bool name_is_strict_reserved, int pos,
-                                bool* ok);
+  ExpressionT ParseTemplateLiteral(ExpressionT tag, int start, bool* ok);
+  void AddTemplateExpression(ExpressionT);
 
   // Checks if the expression is a valid reference expression (e.g., on the
   // left-hand side of assignments). Although ruled out by ECMA as early errors,
@@ -575,9 +603,10 @@ class ParserBase : public Traits {
   bool stack_overflow_;
 
   bool allow_lazy_;
-  bool allow_natives_syntax_;
-  bool allow_arrow_functions_;
+  bool allow_natives_;
+  bool allow_harmony_arrow_functions_;
   bool allow_harmony_object_literals_;
+  bool allow_harmony_sloppy_;
 
   typename Traits::Type::Zone* zone_;  // Only used by Parser.
 };
@@ -736,6 +765,12 @@ class PreParserExpression {
                                ExpressionTypeField::encode(kCallExpression));
   }
 
+  static PreParserExpression NoTemplateTag() {
+    return PreParserExpression(TypeField::encode(kExpression) |
+                               ExpressionTypeField::encode(
+                                  kNoTemplateTagExpression));
+  }
+
   bool IsIdentifier() const {
     return TypeField::decode(code_) == kIdentifierExpression;
   }
@@ -789,6 +824,11 @@ class PreParserExpression {
   bool IsFunctionLiteral() const { return false; }
   bool IsCallNew() const { return false; }
 
+  bool IsNoTemplateTag() const {
+    return TypeField::decode(code_) == kExpression &&
+           ExpressionTypeField::decode(code_) == kNoTemplateTagExpression;
+  }
+
   PreParserExpression AsFunctionLiteral() { return *this; }
 
   bool IsBinaryOperation() const {
@@ -815,8 +855,6 @@ class PreParserExpression {
 
   int position() const { return RelocInfo::kNoPosition; }
   void set_function_token_position(int position) {}
-  void set_ast_properties(int* ast_properties) {}
-  void set_dont_optimize_reason(BailoutReason dont_optimize_reason) {}
 
  private:
   enum Type {
@@ -837,7 +875,8 @@ class PreParserExpression {
     kThisPropertyExpression,
     kPropertyExpression,
     kCallExpression,
-    kSuperExpression
+    kSuperExpression,
+    kNoTemplateTagExpression
   };
 
   explicit PreParserExpression(uint32_t expression_code)
@@ -965,6 +1004,8 @@ class PreParserScope {
   bool IsDeclared(const PreParserIdentifier& identifier) const { return false; }
   void DeclareParameter(const PreParserIdentifier& identifier, VariableMode) {}
   void RecordArgumentsUsage() {}
+  void RecordSuperPropertyUsage() {}
+  void RecordSuperConstructorCallUsage() {}
   void RecordThisUsage() {}
 
   // Allow scope->Foo() to work.
@@ -1091,18 +1132,10 @@ class PreParserFactory {
       int position) {
     return PreParserExpression::Default();
   }
-  PreParserExpression NewClassLiteral(PreParserIdentifier name,
-                                      PreParserExpression extends,
-                                      PreParserExpression constructor,
-                                      PreParserExpressionList properties,
-                                      int start_position, int end_position) {
-    return PreParserExpression::Default();
-  }
 
   // Return the object itself as AstVisitor and implement the needed
   // dummy method right in this class.
   PreParserFactory* visitor() { return this; }
-  BailoutReason dont_optimize_reason() { return kNoReason; }
   int* ast_properties() {
     static int dummy = 42;
     return &dummy;
@@ -1318,10 +1351,9 @@ class PreParserTraits {
     return PreParserExpression::Super();
   }
 
-  static PreParserExpression ClassExpression(
-      PreParserIdentifier name, PreParserExpression extends,
-      PreParserExpression constructor, PreParserExpressionList properties,
-      int start_position, int end_position, PreParserFactory* factory) {
+  static PreParserExpression DefaultConstructor(bool call_super,
+                                                PreParserScope* scope, int pos,
+                                                int end_pos) {
     return PreParserExpression::Default();
   }
 
@@ -1379,6 +1411,29 @@ class PreParserTraits {
     return 0;
   }
 
+  struct TemplateLiteralState {};
+
+  TemplateLiteralState OpenTemplateLiteral(int pos) {
+    return TemplateLiteralState();
+  }
+  void AddTemplateSpan(TemplateLiteralState*, bool) {}
+  void AddTemplateExpression(TemplateLiteralState*, PreParserExpression) {}
+  PreParserExpression CloseTemplateLiteral(TemplateLiteralState*, int,
+                                           PreParserExpression tag) {
+    if (IsTaggedTemplate(tag)) {
+      // Emulate generation of array literals for tag callsite
+      // 1st is array of cooked strings, second is array of raw strings
+      MaterializeTemplateCallsiteLiterals();
+    }
+    return EmptyExpression();
+  }
+  inline void MaterializeTemplateCallsiteLiterals();
+  PreParserExpression NoTemplateTag() {
+    return PreParserExpression::NoTemplateTag();
+  }
+  static bool IsTaggedTemplate(const PreParserExpression tag) {
+    return !tag.IsNoTemplateTag();
+  }
   static AstValueFactory* ast_value_factory() { return NULL; }
 
   void CheckConflictingVarDeclarations(PreParserScope scope, bool* ok) {}
@@ -1391,6 +1446,11 @@ class PreParserTraits {
       int function_token_position, FunctionLiteral::FunctionType type,
       FunctionLiteral::ArityRestriction arity_restriction, bool* ok);
 
+  PreParserExpression ParseClassLiteral(PreParserIdentifier name,
+                                        Scanner::Location class_name_location,
+                                        bool name_is_strict_reserved, int pos,
+                                        bool* ok);
+
  private:
   PreParser* pre_parser_;
 };
@@ -1427,8 +1487,8 @@ class PreParser : public ParserBase<PreParserTraits> {
   // success (even if parsing failed, the pre-parse data successfully
   // captured the syntax error), and false if a stack-overflow happened
   // during parsing.
-  PreParseResult PreParseProgram() {
-    PreParserScope scope(scope_, GLOBAL_SCOPE);
+  PreParseResult PreParseProgram(int* materialized_literals = 0) {
+    PreParserScope scope(scope_, SCRIPT_SCOPE);
     PreParserFactory factory(NULL);
     FunctionState top_scope(&function_state_, &scope_, &scope, &factory);
     bool ok = true;
@@ -1438,7 +1498,11 @@ class PreParser : public ParserBase<PreParserTraits> {
     if (!ok) {
       ReportUnexpectedToken(scanner()->current_token());
     } else if (scope_->strict_mode() == STRICT) {
-      CheckOctalLiteral(start_position, scanner()->location().end_pos, &ok);
+      CheckStrictOctalLiteral(start_position, scanner()->location().end_pos,
+                              &ok);
+    }
+    if (materialized_literals) {
+      *materialized_literals = function_state_->materialized_literal_count();
     }
     return kPreParseSuccess;
   }
@@ -1528,10 +1592,21 @@ class PreParser : public ParserBase<PreParserTraits> {
       FunctionLiteral::ArityRestriction arity_restriction, bool* ok);
   void ParseLazyFunctionLiteralBody(bool* ok);
 
+  PreParserExpression ParseClassLiteral(PreParserIdentifier name,
+                                        Scanner::Location class_name_location,
+                                        bool name_is_strict_reserved, int pos,
+                                        bool* ok);
+
   bool CheckInOrOf(bool accept_OF);
 };
 
 
+void PreParserTraits::MaterializeTemplateCallsiteLiterals() {
+  pre_parser_->function_state_->NextMaterializedLiteralIndex();
+  pre_parser_->function_state_->NextMaterializedLiteralIndex();
+}
+
+
 PreParserStatementList PreParser::ParseEagerFunctionBody(
     PreParserIdentifier function_name, int pos, Variable* fvar,
     Token::Value fvar_init_op, bool is_generator, bool* ok) {
@@ -1603,6 +1678,10 @@ void ParserBase<Traits>::ReportUnexpectedToken(Token::Value token) {
     case Token::FUTURE_STRICT_RESERVED_WORD:
       return ReportMessageAt(source_location, strict_mode() == SLOPPY
           ? "unexpected_token_identifier" : "unexpected_strict_reserved");
+    case Token::TEMPLATE_SPAN:
+    case Token::TEMPLATE_TAIL:
+      return Traits::ReportMessageAt(source_location,
+          "unexpected_template_string");
     default:
       const char* name = Token::String(token);
       DCHECK(name != NULL);
@@ -1749,6 +1828,7 @@ ParserBase<Traits>::ParsePrimaryExpression(bool* ok) {
   //   RegExpLiteral
   //   ClassLiteral
   //   '(' Expression ')'
+  //   TemplateLiteral
 
   int pos = peek_position();
   ExpressionT result = this->EmptyExpression();
@@ -1804,7 +1884,7 @@ ParserBase<Traits>::ParsePrimaryExpression(bool* ok) {
 
     case Token::LPAREN:
       Consume(Token::LPAREN);
-      if (allow_arrow_functions() && peek() == Token::RPAREN) {
+      if (allow_harmony_arrow_functions() && peek() == Token::RPAREN) {
         // Arrow functions are the only expression type constructions
         // for which an empty parameter list "()" is valid input.
         Consume(Token::RPAREN);
@@ -1822,6 +1902,11 @@ ParserBase<Traits>::ParsePrimaryExpression(bool* ok) {
 
     case Token::CLASS: {
       Consume(Token::CLASS);
+      if (!allow_harmony_sloppy() && strict_mode() == SLOPPY) {
+        ReportMessage("sloppy_lexical", NULL);
+        *ok = false;
+        break;
+      }
       int class_token_position = position();
       IdentifierT name = this->EmptyIdentifier();
       bool is_strict_reserved_name = false;
@@ -1837,8 +1922,14 @@ ParserBase<Traits>::ParsePrimaryExpression(bool* ok) {
       break;
     }
 
+    case Token::TEMPLATE_SPAN:
+    case Token::TEMPLATE_TAIL:
+      result =
+          this->ParseTemplateLiteral(Traits::NoTemplateTag(), pos, CHECK_OK);
+      break;
+
     case Token::MOD:
-      if (allow_natives_syntax() || extension_ != NULL) {
+      if (allow_natives() || extension_ != NULL) {
         result = this->ParseV8Intrinsic(CHECK_OK);
         break;
       }
@@ -2166,7 +2257,7 @@ ParserBase<Traits>::ParseAssignmentExpression(bool accept_IN, bool* ok) {
   ExpressionT expression =
       this->ParseConditionalExpression(accept_IN, CHECK_OK);
 
-  if (allow_arrow_functions() && peek() == Token::ARROW) {
+  if (allow_harmony_arrow_functions() && peek() == Token::ARROW) {
     checkpoint.Restore();
     expression = this->ParseArrowFunctionLiteral(lhs_location.beg_pos,
                                                  expression, CHECK_OK);
@@ -2460,6 +2551,23 @@ ParserBase<Traits>::ParseLeftHandSideExpression(bool* ok) {
         break;
       }
 
+      case Token::TEMPLATE_SPAN:
+      case Token::TEMPLATE_TAIL: {
+        int pos;
+        if (scanner()->current_token() == Token::IDENTIFIER) {
+          pos = position();
+        } else {
+          pos = peek_position();
+          if (result->IsFunctionLiteral() && mode() == PARSE_EAGERLY) {
+            // If the tag function looks like an IIFE, set_parenthesized() to
+            // force eager compilation.
+            result->AsFunctionLiteral()->set_parenthesized();
+          }
+        }
+        result = ParseTemplateLiteral(result, pos, CHECK_OK);
+        break;
+      }
+
       case Token::PERIOD: {
         Consume(Token::PERIOD);
         int pos = position();
@@ -2562,8 +2670,11 @@ ParserBase<Traits>::ParseMemberExpression(bool* ok) {
     int beg_pos = position();
     Consume(Token::SUPER);
     Token::Value next = peek();
-    if (next == Token::PERIOD || next == Token::LBRACK ||
-        next == Token::LPAREN) {
+    if (next == Token::PERIOD || next == Token::LBRACK) {
+      scope_->RecordSuperPropertyUsage();
+      result = this->SuperReference(scope_, factory());
+    } else if (next == Token::LPAREN) {
+      scope_->RecordSuperConstructorCallUsage();
       result = this->SuperReference(scope_, factory());
     } else {
       ReportMessageAt(Scanner::Location(beg_pos, position()),
@@ -2625,8 +2736,6 @@ typename ParserBase<Traits>::ExpressionT ParserBase<
                                        bool* ok) {
   typename Traits::Type::ScopePtr scope = this->NewScope(scope_, ARROW_SCOPE);
   typename Traits::Type::StatementList body;
-  typename Traits::Type::AstProperties ast_properties;
-  BailoutReason dont_optimize_reason = kNoReason;
   int num_parameters = -1;
   int materialized_literal_count = -1;
   int expected_property_count = -1;
@@ -2703,14 +2812,12 @@ typename ParserBase<Traits>::ExpressionT ParserBase<
 
     // Validate strict mode.
     if (strict_mode() == STRICT) {
-      CheckOctalLiteral(start_pos, scanner()->location().end_pos, CHECK_OK);
+      CheckStrictOctalLiteral(start_pos, scanner()->location().end_pos,
+                              CHECK_OK);
     }
 
     if (allow_harmony_scoping() && strict_mode() == STRICT)
       this->CheckConflictingVarDeclarations(scope, CHECK_OK);
-
-    ast_properties = *factory()->visitor()->ast_properties();
-    dont_optimize_reason = factory()->visitor()->dont_optimize_reason();
   }
 
   FunctionLiteralT function_literal = factory()->NewFunctionLiteral(
@@ -2722,8 +2829,6 @@ typename ParserBase<Traits>::ExpressionT ParserBase<
       start_pos);
 
   function_literal->set_function_token_position(start_pos);
-  function_literal->set_ast_properties(&ast_properties);
-  function_literal->set_dont_optimize_reason(dont_optimize_reason);
 
   if (fni_ != NULL) this->InferFunctionName(fni_, function_literal);
 
@@ -2731,76 +2836,90 @@ typename ParserBase<Traits>::ExpressionT ParserBase<
 }
 
 
-template <class Traits>
-typename ParserBase<Traits>::ExpressionT ParserBase<Traits>::ParseClassLiteral(
-    IdentifierT name, Scanner::Location class_name_location,
-    bool name_is_strict_reserved, int pos, bool* ok) {
-  // All parts of a ClassDeclaration or a ClassExpression are strict code.
-  if (name_is_strict_reserved) {
-    ReportMessageAt(class_name_location, "unexpected_strict_reserved");
-    *ok = false;
-    return this->EmptyExpression();
-  }
-  if (this->IsEvalOrArguments(name)) {
-    ReportMessageAt(class_name_location, "strict_eval_arguments");
-    *ok = false;
-    return this->EmptyExpression();
-  }
-
-  ExpressionT extends = this->EmptyExpression();
-  if (Check(Token::EXTENDS)) {
-    typename Traits::Type::ScopePtr scope = this->NewScope(scope_, BLOCK_SCOPE);
-    BlockState block_state(&scope_, Traits::Type::ptr_to_scope(scope));
-    scope_->SetStrictMode(STRICT);
-    extends = this->ParseLeftHandSideExpression(CHECK_OK);
+template <typename Traits>
+typename ParserBase<Traits>::ExpressionT
+ParserBase<Traits>::ParseTemplateLiteral(ExpressionT tag, int start, bool* ok) {
+  // A TemplateLiteral is made up of 0 or more TEMPLATE_SPAN tokens (literal
+  // text followed by a substitution expression), finalized by a single
+  // TEMPLATE_TAIL.
+  //
+  // In terms of draft language, TEMPLATE_SPAN may be either the TemplateHead or
+  // TemplateMiddle productions, while TEMPLATE_TAIL is either TemplateTail, or
+  // NoSubstitutionTemplate.
+  //
+  // When parsing a TemplateLiteral, we must have scanned either an initial
+  // TEMPLATE_SPAN, or a TEMPLATE_TAIL.
+  CHECK(peek() == Token::TEMPLATE_SPAN || peek() == Token::TEMPLATE_TAIL);
+
+  // If we reach a TEMPLATE_TAIL first, we are parsing a NoSubstitutionTemplate.
+  // In this case we may simply consume the token and build a template with a
+  // single TEMPLATE_SPAN and no expressions.
+  if (peek() == Token::TEMPLATE_TAIL) {
+    Consume(Token::TEMPLATE_TAIL);
+    int pos = position();
+    CheckTemplateOctalLiteral(pos, peek_position(), CHECK_OK);
+    typename Traits::TemplateLiteralState ts = Traits::OpenTemplateLiteral(pos);
+    Traits::AddTemplateSpan(&ts, true);
+    return Traits::CloseTemplateLiteral(&ts, start, tag);
   }
 
-  // TODO(arv): Implement scopes and name binding in class body only.
-  typename Traits::Type::ScopePtr scope = this->NewScope(scope_, BLOCK_SCOPE);
-  BlockState block_state(&scope_, Traits::Type::ptr_to_scope(scope));
-  scope_->SetStrictMode(STRICT);
-  scope_->SetScopeName(name);
-
-  typename Traits::Type::PropertyList properties =
-      this->NewPropertyList(4, zone_);
-  ExpressionT constructor = this->EmptyExpression();
-  bool has_seen_constructor = false;
+  Consume(Token::TEMPLATE_SPAN);
+  int pos = position();
+  typename Traits::TemplateLiteralState ts = Traits::OpenTemplateLiteral(pos);
+  Traits::AddTemplateSpan(&ts, false);
+  Token::Value next;
+
+  // If we open with a TEMPLATE_SPAN, we must scan the subsequent expression,
+  // and repeat if the following token is a TEMPLATE_SPAN as well (in this
+  // case, representing a TemplateMiddle).
+
+  do {
+    next = peek();
+    if (!next) {
+      ReportMessageAt(Scanner::Location(start, peek_position()),
+                      "unterminated_template");
+      *ok = false;
+      return Traits::EmptyExpression();
+    }
 
-  Expect(Token::LBRACE, CHECK_OK);
-  while (peek() != Token::RBRACE) {
-    if (Check(Token::SEMICOLON)) continue;
-    if (fni_ != NULL) fni_->Enter();
-    const bool in_class = true;
-    const bool is_static = false;
-    bool old_has_seen_constructor = has_seen_constructor;
-    ObjectLiteralPropertyT property = this->ParsePropertyDefinition(
-        NULL, in_class, is_static, &has_seen_constructor, CHECK_OK);
+    int expr_pos = peek_position();
+    ExpressionT expression = this->ParseExpression(true, CHECK_OK);
+    Traits::AddTemplateExpression(&ts, expression);
 
-    if (has_seen_constructor != old_has_seen_constructor) {
-      constructor = this->GetPropertyValue(property);
-    } else {
-      properties->Add(property, zone());
+    if (peek() != Token::RBRACE) {
+      ReportMessageAt(Scanner::Location(expr_pos, peek_position()),
+                      "unterminated_template_expr");
+      *ok = false;
+      return Traits::EmptyExpression();
     }
 
-    if (fni_ != NULL) {
-      fni_->Infer();
-      fni_->Leave();
+    // If we didn't die parsing that expression, our next token should be a
+    // TEMPLATE_SPAN or TEMPLATE_TAIL.
+    next = scanner()->ScanTemplateContinuation();
+    Next();
+
+    if (!next) {
+      ReportMessageAt(Scanner::Location(start, position()),
+                      "unterminated_template");
+      *ok = false;
+      return Traits::EmptyExpression();
     }
-  }
 
-  int end_pos = peek_position();
-  Expect(Token::RBRACE, CHECK_OK);
+    Traits::AddTemplateSpan(&ts, next == Token::TEMPLATE_TAIL);
+  } while (next == Token::TEMPLATE_SPAN);
 
-  return this->ClassExpression(name, extends, constructor, properties, pos,
-                               end_pos + 1, factory());
+  DCHECK_EQ(next, Token::TEMPLATE_TAIL);
+  CheckTemplateOctalLiteral(pos, peek_position(), CHECK_OK);
+  // Once we've reached a TEMPLATE_TAIL, we can close the TemplateLiteral.
+  return Traits::CloseTemplateLiteral(&ts, start, tag);
 }
 
 
 template <typename Traits>
-typename ParserBase<Traits>::ExpressionT
-ParserBase<Traits>::CheckAndRewriteReferenceExpression(
-    ExpressionT expression,
-    Scanner::Location location, const char* message, bool* ok) {
+typename ParserBase<Traits>::ExpressionT ParserBase<
+    Traits>::CheckAndRewriteReferenceExpression(ExpressionT expression,
+                                                Scanner::Location location,
+                                                const char* message, bool* ok) {
   if (strict_mode() == STRICT && this->IsIdentifier(expression) &&
       this->IsEvalOrArguments(this->AsIdentifier(expression))) {
     this->ReportMessageAt(location, "strict_eval_arguments", false);
index d300d9a..bf01520 100644 (file)
@@ -30,7 +30,7 @@ class PrettyPrinter: public AstVisitor {
   static void PrintOut(Zone* zone, AstNode* node);
 
   // Individual nodes
-#define DECLARE_VISIT(type) virtual void Visit##type(type* node);
+#define DECLARE_VISIT(type) void Visit##type(type* node) OVERRIDE;
   AST_NODE_LIST(DECLARE_VISIT)
 #undef DECLARE_VISIT
 
index 443a3b8..c096296 100644 (file)
@@ -67,6 +67,13 @@ var lastMicrotaskId = 0;
     return promise;
   }
 
+  function PromiseCreateAndSet(status, value) {
+    var promise = new $Promise(promiseRaw);
+    // If debug is active, notify about the newly created promise first.
+    if (DEBUG_IS_ACTIVE) PromiseSet(promise, 0, UNDEFINED);
+    return PromiseSet(promise, status, value);
+  }
+
   function PromiseInit(promise) {
     return PromiseSet(
         promise, 0, UNDEFINED, new InternalArray, new InternalArray)
@@ -74,7 +81,8 @@ var lastMicrotaskId = 0;
 
   function PromiseDone(promise, status, value, promiseQueue) {
     if (GET_PRIVATE(promise, promiseStatus) === 0) {
-      PromiseEnqueue(value, GET_PRIVATE(promise, promiseQueue), status);
+      var tasks = GET_PRIVATE(promise, promiseQueue);
+      if (tasks.length) PromiseEnqueue(value, tasks, status);
       PromiseSet(promise, status, value);
     }
   }
@@ -103,6 +111,7 @@ var lastMicrotaskId = 0;
   function PromiseHandle(value, handler, deferred) {
     try {
       %DebugPushPromise(deferred.promise);
+      DEBUG_PREPARE_STEP_IN_IF_STEPPING(handler);
       var result = handler(value);
       if (result === deferred.promise)
         throw MakeTypeError('promise_cyclic', [result]);
@@ -195,7 +204,7 @@ var lastMicrotaskId = 0;
   function PromiseResolved(x) {
     if (this === $Promise) {
       // Optimized case, avoid extra closure.
-      return PromiseSet(new $Promise(promiseRaw), +1, x);
+      return PromiseCreateAndSet(+1, x);
     } else {
       return new this(function(resolve, reject) { resolve(x) });
     }
@@ -205,7 +214,7 @@ var lastMicrotaskId = 0;
     var promise;
     if (this === $Promise) {
       // Optimized case, avoid extra closure.
-      promise = PromiseSet(new $Promise(promiseRaw), -1, r);
+      promise = PromiseCreateAndSet(-1, r);
       // The debug event for this would always be an uncaught promise reject,
       // which is usually simply noise. Do not trigger that debug event.
       %PromiseRejectEvent(promise, r, false);
@@ -270,8 +279,15 @@ var lastMicrotaskId = 0;
       this,
       function(x) {
         x = PromiseCoerce(constructor, x);
-        return x === that ? onReject(MakeTypeError('promise_cyclic', [x])) :
-               IsPromise(x) ? x.then(onResolve, onReject) : onResolve(x);
+        if (x === that) {
+          DEBUG_PREPARE_STEP_IN_IF_STEPPING(onReject);
+          return onReject(MakeTypeError('promise_cyclic', [x]));
+        } else if (IsPromise(x)) {
+          return x.then(onResolve, onReject);
+        } else {
+          DEBUG_PREPARE_STEP_IN_IF_STEPPING(onResolve);
+          return onResolve(x);
+        }
       },
       onReject,
       PromiseChain
index 61feced..6140e0d 100644 (file)
@@ -41,16 +41,24 @@ typedef TypeImpl<ZoneTypeConfig> Type;
 class TypeInfo;
 
 // Type of properties.
+// Order of kinds is significant.
+// Must fit in the BitField PropertyDetails::KindField.
+enum PropertyKind { DATA = 0, ACCESSOR = 1 };
+
+
+// Order of modes is significant.
+// Must fit in the BitField PropertyDetails::StoreModeField.
+enum PropertyLocation { IN_OBJECT = 0, IN_DESCRIPTOR = 1 };
+
+
 // Order of properties is significant.
 // Must fit in the BitField PropertyDetails::TypeField.
 // A copy of this is in mirror-debugger.js.
 enum PropertyType {
-  // Only in slow mode.
-  NORMAL = 0,
-  // Only in fast mode.
-  FIELD = 1,
-  CONSTANT = 2,
-  CALLBACKS = 3
+  FIELD = (IN_OBJECT << 1) | DATA,
+  CONSTANT = (IN_DESCRIPTOR << 1) | DATA,
+  ACCESSOR_FIELD = (IN_OBJECT << 1) | ACCESSOR,
+  CALLBACKS = (IN_DESCRIPTOR << 1) | ACCESSOR
 };
 
 
@@ -229,6 +237,9 @@ class PropertyDetails BASE_EMBEDDED {
     return Representation::FromKind(static_cast<Representation::Kind>(bits));
   }
 
+  PropertyKind kind() const { return KindField::decode(value_); }
+  PropertyLocation location() const { return LocationField::decode(value_); }
+
   PropertyType type() const { return TypeField::decode(value_); }
 
   PropertyAttributes attributes() const {
@@ -240,13 +251,12 @@ class PropertyDetails BASE_EMBEDDED {
   }
 
   Representation representation() const {
-    DCHECK(type() != NORMAL);
     return DecodeRepresentation(RepresentationField::decode(value_));
   }
 
-  int field_index() const {
-    return FieldIndexField::decode(value_);
-  }
+  int field_index() const { return FieldIndexField::decode(value_); }
+
+  inline int field_width_in_words() const;
 
   inline PropertyDetails AsDeleted() const;
 
@@ -257,11 +267,12 @@ class PropertyDetails BASE_EMBEDDED {
   bool IsReadOnly() const { return (attributes() & READ_ONLY) != 0; }
   bool IsConfigurable() const { return (attributes() & DONT_DELETE) == 0; }
   bool IsDontEnum() const { return (attributes() & DONT_ENUM) != 0; }
-  bool IsDeleted() const { return DeletedField::decode(value_) != 0;}
+  bool IsDeleted() const { return DeletedField::decode(value_) != 0; }
 
   // Bit fields in value_ (type, shift, size). Must be public so the
   // constants can be embedded in generated code.
-  class TypeField : public BitField<PropertyType, 0, 2> {};
+  class KindField : public BitField<PropertyKind, 0, 1> {};
+  class LocationField : public BitField<PropertyLocation, 1, 1> {};
   class AttributesField : public BitField<PropertyAttributes, 2, 3> {};
 
   // Bit fields for normalized objects.
@@ -275,11 +286,24 @@ class PropertyDetails BASE_EMBEDDED {
   class FieldIndexField
       : public BitField<uint32_t, 9 + kDescriptorIndexBitCount,
                         kDescriptorIndexBitCount> {};  // NOLINT
-  // All bits for fast objects must fix in a smi.
-  STATIC_ASSERT(9 + kDescriptorIndexBitCount + kDescriptorIndexBitCount <= 31);
+
+  // NOTE: TypeField overlaps with KindField and LocationField.
+  class TypeField : public BitField<PropertyType, 0, 2> {};
+  STATIC_ASSERT(KindField::kNext == LocationField::kShift);
+  STATIC_ASSERT(TypeField::kShift == KindField::kShift);
+  STATIC_ASSERT(TypeField::kNext == LocationField::kNext);
+
+  // All bits for both fast and slow objects must fit in a smi.
+  STATIC_ASSERT(DictionaryStorageField::kNext <= 31);
+  STATIC_ASSERT(FieldIndexField::kNext <= 31);
 
   static const int kInitialIndex = 1;
 
+#ifdef OBJECT_PRINT
+  // For our gdb macros, we should perhaps change these in the future.
+  void Print(bool dictionary_mode);
+#endif
+
  private:
   PropertyDetails(int value, int pointer) {
     value_ = DescriptorPointer::update(value, pointer);
index e9e4b64..d00998c 100644 (file)
@@ -42,32 +42,65 @@ std::ostream& operator<<(std::ostream& os,
 }
 
 
+struct FastPropertyDetails {
+  explicit FastPropertyDetails(const PropertyDetails& v) : details(v) {}
+  const PropertyDetails details;
+};
+
+
+// Outputs PropertyDetails as a dictionary details.
 std::ostream& operator<<(std::ostream& os, const PropertyDetails& details) {
   os << "(";
-  switch (details.type()) {
-    case NORMAL:
-      os << "normal: dictionary_index: " << details.dictionary_index();
-      break;
-    case CONSTANT:
-      os << "constant: p: " << details.pointer();
-      break;
-    case FIELD:
-      os << "field: " << details.representation().Mnemonic()
-         << ", field_index: " << details.field_index()
-         << ", p: " << details.pointer();
-      break;
-    case CALLBACKS:
-      os << "callbacks: p: " << details.pointer();
-      break;
+  if (details.location() == IN_DESCRIPTOR) {
+    os << "immutable ";
   }
-  os << ", attrs: " << details.attributes() << ")";
-  return os;
+  os << (details.kind() == DATA ? "data" : "accessor");
+  return os << ", dictionary_index: " << details.dictionary_index()
+            << ", attrs: " << details.attributes() << ")";
+}
+
+
+// Outputs PropertyDetails as a descriptor array details.
+std::ostream& operator<<(std::ostream& os,
+                         const FastPropertyDetails& details_fast) {
+  const PropertyDetails& details = details_fast.details;
+  os << "(";
+  if (details.location() == IN_DESCRIPTOR) {
+    os << "immutable ";
+  }
+  os << (details.kind() == DATA ? "data" : "accessor");
+  if (details.location() == IN_OBJECT) {
+    os << ": " << details.representation().Mnemonic()
+       << ", field_index: " << details.field_index();
+  }
+  return os << ", p: " << details.pointer()
+            << ", attrs: " << details.attributes() << ")";
+}
+
+
+#ifdef OBJECT_PRINT
+void PropertyDetails::Print(bool dictionary_mode) {
+  OFStream os(stdout);
+  if (dictionary_mode) {
+    os << *this;
+  } else {
+    os << FastPropertyDetails(*this);
+  }
+  os << "\n" << std::flush;
 }
+#endif
 
 
 std::ostream& operator<<(std::ostream& os, const Descriptor& d) {
-  return os << "Descriptor " << Brief(*d.GetKey()) << " @ "
-            << Brief(*d.GetValue()) << " " << d.GetDetails();
+  Object* value = *d.GetValue();
+  os << "Descriptor " << Brief(*d.GetKey()) << " @ " << Brief(value) << " ";
+  if (value->IsAccessorPair()) {
+    AccessorPair* pair = AccessorPair::cast(value);
+    os << "(get: " << Brief(pair->getter())
+       << ", set: " << Brief(pair->setter()) << ") ";
+  }
+  os << FastPropertyDetails(d.GetDetails());
+  return os;
 }
 
 } }  // namespace v8::internal
index 48b7501..a9d8b09 100644 (file)
@@ -119,7 +119,7 @@ class LookupResult FINAL BASE_EMBEDDED {
         lookup_type_(NOT_FOUND),
         holder_(NULL),
         transition_(NULL),
-        details_(NONE, NORMAL, Representation::None()) {
+        details_(NONE, FIELD, Representation::None()) {
     isolate->set_top_lookup_result(this);
   }
 
@@ -148,7 +148,7 @@ class LookupResult FINAL BASE_EMBEDDED {
 
   void NotFound() {
     lookup_type_ = NOT_FOUND;
-    details_ = PropertyDetails(NONE, NORMAL, Representation::None());
+    details_ = PropertyDetails(NONE, FIELD, 0);
     holder_ = NULL;
     transition_ = NULL;
   }
@@ -160,7 +160,6 @@ class LookupResult FINAL BASE_EMBEDDED {
 
   // Property callbacks does not include transitions to callbacks.
   bool IsPropertyCallbacks() const {
-    DCHECK(!(details_.type() == CALLBACKS && !IsFound()));
     return !IsTransition() && details_.type() == CALLBACKS;
   }
 
@@ -170,12 +169,10 @@ class LookupResult FINAL BASE_EMBEDDED {
   }
 
   bool IsField() const {
-    DCHECK(!(details_.type() == FIELD && !IsFound()));
     return lookup_type_ == DESCRIPTOR_TYPE && details_.type() == FIELD;
   }
 
   bool IsConstant() const {
-    DCHECK(!(details_.type() == CONSTANT && !IsFound()));
     return lookup_type_ == DESCRIPTOR_TYPE && details_.type() == CONSTANT;
   }
 
index 0f3dbb6..e1eac76 100644 (file)
@@ -30,46 +30,7 @@ function DoConstructRegExp(object, pattern, flags) {
   pattern = IS_UNDEFINED(pattern) ? '' : ToString(pattern);
   flags = IS_UNDEFINED(flags) ? '' : ToString(flags);
 
-  var global = false;
-  var ignoreCase = false;
-  var multiline = false;
-  var sticky = false;
-  for (var i = 0; i < flags.length; i++) {
-    var c = %_CallFunction(flags, i, StringCharAt);
-    switch (c) {
-      case 'g':
-        if (global) {
-          throw MakeSyntaxError("invalid_regexp_flags", [flags]);
-        }
-        global = true;
-        break;
-      case 'i':
-        if (ignoreCase) {
-          throw MakeSyntaxError("invalid_regexp_flags", [flags]);
-        }
-        ignoreCase = true;
-        break;
-      case 'm':
-        if (multiline) {
-          throw MakeSyntaxError("invalid_regexp_flags", [flags]);
-        }
-        multiline = true;
-        break;
-      case 'y':
-        if (!harmony_regexps || sticky) {
-          throw MakeSyntaxError("invalid_regexp_flags", [flags]);
-        }
-        sticky = true;
-        break;
-      default:
-        throw MakeSyntaxError("invalid_regexp_flags", [flags]);
-    }
-  }
-
-  %RegExpInitializeObject(object, pattern, global, ignoreCase, multiline, sticky);
-
-  // Call internal function to compile the pattern.
-  %RegExpCompile(object, pattern, flags);
+  %RegExpInitializeAndCompile(object, pattern, flags);
 }
 
 
index 351c4bd..c52051a 100644 (file)
@@ -29,9 +29,7 @@ class Processor: public AstVisitor {
   void Process(ZoneList<Statement*>* statements);
   bool result_assigned() const { return result_assigned_; }
 
-  AstNodeFactory<AstNullVisitor>* factory() {
-    return &factory_;
-  }
+  AstNodeFactory* factory() { return &factory_; }
 
  private:
   Variable* result_;
@@ -49,7 +47,7 @@ class Processor: public AstVisitor {
   bool is_set_;
   bool in_try_;
 
-  AstNodeFactory<AstNullVisitor> factory_;
+  AstNodeFactory factory_;
 
   Expression* SetResult(Expression* value) {
     result_assigned_ = true;
@@ -229,7 +227,7 @@ bool Rewriter::Rewrite(CompilationInfo* info) {
   DCHECK(function != NULL);
   Scope* scope = function->scope();
   DCHECK(scope != NULL);
-  if (!scope->is_global_scope() && !scope->is_eval_scope()) return true;
+  if (!scope->is_script_scope() && !scope->is_eval_scope()) return true;
 
   ZoneList<Statement*>* body = function->body();
   if (!body->is_empty()) {
index f786846..6c99714 100644 (file)
@@ -106,23 +106,7 @@ void RuntimeProfiler::Optimize(JSFunction* function, const char* reason) {
     PrintF("]\n");
   }
 
-
-  if (isolate_->concurrent_recompilation_enabled() &&
-      !isolate_->bootstrapper()->IsActive()) {
-    if (isolate_->concurrent_osr_enabled() &&
-        isolate_->optimizing_compiler_thread()->IsQueuedForOSR(function)) {
-      // Do not attempt regular recompilation if we already queued this for OSR.
-      // TODO(yangguo): This is necessary so that we don't install optimized
-      // code on a function that is already optimized, since OSR and regular
-      // recompilation race.  This goes away as soon as OSR becomes one-shot.
-      return;
-    }
-    DCHECK(!function->IsInOptimizationQueue());
-    function->MarkForConcurrentOptimization();
-  } else {
-    // The next call to the function will trigger optimization.
-    function->MarkForOptimization();
-  }
+  function->AttemptConcurrentOptimization();
 }
 
 
@@ -163,7 +147,7 @@ void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function,
 void RuntimeProfiler::OptimizeNow() {
   HandleScope scope(isolate_);
 
-  if (isolate_->DebuggerHasBreakPoints()) return;
+  if (!isolate_->use_crankshaft() || isolate_->DebuggerHasBreakPoints()) return;
 
   DisallowHeapAllocation no_gc;
 
index 4d15d20..79824e4 100644 (file)
@@ -6,9 +6,7 @@
 
 // CAUTION: Some of the functions specified in this file are called
 // directly from compiled code. These are the functions with names in
-// ALL CAPS. The compiled code passes the first argument in 'this' and
-// it does not push the function onto the stack. This means that you
-// cannot use contexts in all these functions.
+// ALL CAPS. The compiled code passes the first argument in 'this'.
 
 
 /* -----------------------------------
@@ -324,8 +322,13 @@ function IN(x) {
   if (!IS_SPEC_OBJECT(x)) {
     throw %MakeTypeError('invalid_in_operator_use', [this, x]);
   }
-  return %_IsNonNegativeSmi(this) ?
-    %HasElement(x, this) : %HasProperty(x, %ToName(this));
+  if (%_IsNonNegativeSmi(this)) {
+    if (IS_ARRAY(x) && %_HasFastPackedElements(x)) {
+      return this < x.length;
+    }
+    return %HasElement(x, this);
+  }
+  return %HasProperty(x, %ToName(this));
 }
 
 
@@ -598,6 +601,15 @@ function SameValue(x, y) {
   return x === y;
 }
 
+// ES6, section 7.2.4
+function SameValueZero(x, y) {
+  if (typeof x != typeof y) return false;
+  if (IS_NUMBER(x)) {
+    if (NUMBER_IS_NAN(x) && NUMBER_IS_NAN(y)) return true;
+  }
+  return x === y;
+}
+
 
 /* ---------------------------------
    - - -   U t i l i t i e s   - - -
@@ -614,6 +626,15 @@ function IsPrimitive(x) {
 }
 
 
+// ES6, draft 10-14-14, section 22.1.3.1.1
+function IsConcatSpreadable(O) {
+  if (!IS_SPEC_OBJECT(O)) return false;
+  var spreadable = O[symbolIsConcatSpreadable];
+  if (IS_UNDEFINED(spreadable)) return IS_ARRAY(O);
+  return ToBoolean(spreadable);
+}
+
+
 // ECMA-262, section 8.6.2.6, page 28.
 function DefaultNumber(x) {
   if (!IS_SYMBOL_WRAPPER(x)) {
index 523d8f5..a017236 100644 (file)
@@ -289,11 +289,11 @@ static uint32_t EstimateElementCount(Handle<JSArray> array) {
 
 
 template <class ExternalArrayClass, class ElementType>
-static void IterateExternalArrayElements(Isolate* isolate,
-                                         Handle<JSObject> receiver,
-                                         bool elements_are_ints,
-                                         bool elements_are_guaranteed_smis,
-                                         ArrayConcatVisitor* visitor) {
+static void IterateTypedArrayElements(Isolate* isolate,
+                                      Handle<JSObject> receiver,
+                                      bool elements_are_ints,
+                                      bool elements_are_guaranteed_smis,
+                                      ArrayConcatVisitor* visitor) {
   Handle<ExternalArrayClass> array(
       ExternalArrayClass::cast(receiver->elements()));
   uint32_t len = static_cast<uint32_t>(array->length());
@@ -439,8 +439,27 @@ static void CollectElementIndices(Handle<JSObject> object, uint32_t range,
 }
 
 
+static bool IterateElementsSlow(Isolate* isolate, Handle<JSObject> receiver,
+                                uint32_t length, ArrayConcatVisitor* visitor) {
+  for (uint32_t i = 0; i < length; ++i) {
+    HandleScope loop_scope(isolate);
+    Maybe<bool> maybe = JSReceiver::HasElement(receiver, i);
+    if (!maybe.has_value) return false;
+    if (maybe.value) {
+      Handle<Object> element_value;
+      ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+          isolate, element_value,
+          Runtime::GetElementOrCharAt(isolate, receiver, i), false);
+      visitor->visit(i, element_value);
+    }
+  }
+  visitor->increase_index_offset(length);
+  return true;
+}
+
+
 /**
- * A helper function that visits elements of a JSArray in numerical
+ * A helper function that visits elements of a JSObject in numerical
  * order.
  *
  * The visitor argument called for each existing element in the array
@@ -449,9 +468,32 @@ static void CollectElementIndices(Handle<JSObject> object, uint32_t range,
  * length.
  * Returns false if any access threw an exception, otherwise true.
  */
-static bool IterateElements(Isolate* isolate, Handle<JSArray> receiver,
+static bool IterateElements(Isolate* isolate, Handle<JSObject> receiver,
                             ArrayConcatVisitor* visitor) {
-  uint32_t length = static_cast<uint32_t>(receiver->length()->Number());
+  uint32_t length = 0;
+
+  if (receiver->IsJSArray()) {
+    Handle<JSArray> array(Handle<JSArray>::cast(receiver));
+    length = static_cast<uint32_t>(array->length()->Number());
+  } else {
+    Handle<Object> val;
+    Handle<Object> key(isolate->heap()->length_string(), isolate);
+    ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, val,
+        Runtime::GetObjectProperty(isolate, receiver, key), false);
+    // TODO(caitp): Support larger element indexes (up to 2^53-1).
+    if (!val->ToUint32(&length)) {
+      ASSIGN_RETURN_ON_EXCEPTION_VALUE(isolate, val,
+          Execution::ToLength(isolate, val), false);
+      val->ToUint32(&length);
+    }
+  }
+
+  if (!(receiver->IsJSArray() || receiver->IsJSTypedArray())) {
+    // For classes which are not known to be safe to access via elements alone,
+    // use the slow case.
+    return IterateElementsSlow(isolate, receiver, length, visitor);
+  }
+
   switch (receiver->GetElementsKind()) {
     case FAST_SMI_ELEMENTS:
     case FAST_ELEMENTS:
@@ -552,55 +594,132 @@ static bool IterateElements(Isolate* isolate, Handle<JSArray> receiver,
       }
       break;
     }
+    case UINT8_CLAMPED_ELEMENTS: {
+      Handle<FixedUint8ClampedArray> pixels(
+      FixedUint8ClampedArray::cast(receiver->elements()));
+      for (uint32_t j = 0; j < length; j++) {
+        Handle<Smi> e(Smi::FromInt(pixels->get_scalar(j)), isolate);
+        visitor->visit(j, e);
+      }
+      break;
+    }
     case EXTERNAL_INT8_ELEMENTS: {
-      IterateExternalArrayElements<ExternalInt8Array, int8_t>(
+      IterateTypedArrayElements<ExternalInt8Array, int8_t>(
           isolate, receiver, true, true, visitor);
       break;
     }
+    case INT8_ELEMENTS: {
+      IterateTypedArrayElements<FixedInt8Array, int8_t>(
+      isolate, receiver, true, true, visitor);
+      break;
+    }
     case EXTERNAL_UINT8_ELEMENTS: {
-      IterateExternalArrayElements<ExternalUint8Array, uint8_t>(
+      IterateTypedArrayElements<ExternalUint8Array, uint8_t>(
           isolate, receiver, true, true, visitor);
       break;
     }
+    case UINT8_ELEMENTS: {
+      IterateTypedArrayElements<FixedUint8Array, uint8_t>(
+      isolate, receiver, true, true, visitor);
+      break;
+    }
     case EXTERNAL_INT16_ELEMENTS: {
-      IterateExternalArrayElements<ExternalInt16Array, int16_t>(
+      IterateTypedArrayElements<ExternalInt16Array, int16_t>(
           isolate, receiver, true, true, visitor);
       break;
     }
+    case INT16_ELEMENTS: {
+      IterateTypedArrayElements<FixedInt16Array, int16_t>(
+      isolate, receiver, true, true, visitor);
+      break;
+    }
     case EXTERNAL_UINT16_ELEMENTS: {
-      IterateExternalArrayElements<ExternalUint16Array, uint16_t>(
+      IterateTypedArrayElements<ExternalUint16Array, uint16_t>(
           isolate, receiver, true, true, visitor);
       break;
     }
+    case UINT16_ELEMENTS: {
+      IterateTypedArrayElements<FixedUint16Array, uint16_t>(
+      isolate, receiver, true, true, visitor);
+      break;
+    }
     case EXTERNAL_INT32_ELEMENTS: {
-      IterateExternalArrayElements<ExternalInt32Array, int32_t>(
+      IterateTypedArrayElements<ExternalInt32Array, int32_t>(
           isolate, receiver, true, false, visitor);
       break;
     }
+    case INT32_ELEMENTS: {
+      IterateTypedArrayElements<FixedInt32Array, int32_t>(
+      isolate, receiver, true, false, visitor);
+      break;
+    }
     case EXTERNAL_UINT32_ELEMENTS: {
-      IterateExternalArrayElements<ExternalUint32Array, uint32_t>(
+      IterateTypedArrayElements<ExternalUint32Array, uint32_t>(
           isolate, receiver, true, false, visitor);
       break;
     }
+    case UINT32_ELEMENTS: {
+      IterateTypedArrayElements<FixedUint32Array, uint32_t>(
+      isolate, receiver, true, false, visitor);
+      break;
+    }
     case EXTERNAL_FLOAT32_ELEMENTS: {
-      IterateExternalArrayElements<ExternalFloat32Array, float>(
+      IterateTypedArrayElements<ExternalFloat32Array, float>(
           isolate, receiver, false, false, visitor);
       break;
     }
+    case FLOAT32_ELEMENTS: {
+      IterateTypedArrayElements<FixedFloat32Array, float>(
+      isolate, receiver, false, false, visitor);
+      break;
+    }
     case EXTERNAL_FLOAT64_ELEMENTS: {
-      IterateExternalArrayElements<ExternalFloat64Array, double>(
+      IterateTypedArrayElements<ExternalFloat64Array, double>(
           isolate, receiver, false, false, visitor);
       break;
     }
-    default:
-      UNREACHABLE();
+    case FLOAT64_ELEMENTS: {
+      IterateTypedArrayElements<FixedFloat64Array, double>(
+      isolate, receiver, false, false, visitor);
+      break;
+    }
+    case SLOPPY_ARGUMENTS_ELEMENTS: {
+      ElementsAccessor* accessor = receiver->GetElementsAccessor();
+      for (uint32_t index = 0; index < length; index++) {
+        HandleScope loop_scope(isolate);
+        if (accessor->HasElement(receiver, receiver, index)) {
+          Handle<Object> element;
+          ASSIGN_RETURN_ON_EXCEPTION_VALUE(
+              isolate, element, accessor->Get(receiver, receiver, index),
+              false);
+          visitor->visit(index, element);
+        }
+      }
       break;
+    }
   }
   visitor->increase_index_offset(length);
   return true;
 }
 
 
+static bool IsConcatSpreadable(Isolate* isolate, Handle<Object> obj) {
+  HandleScope handle_scope(isolate);
+  if (!obj->IsSpecObject()) return false;
+  if (obj->IsJSArray()) return true;
+  if (FLAG_harmony_arrays) {
+    Handle<Symbol> key(isolate->factory()->is_concat_spreadable_symbol());
+    Handle<Object> value;
+    MaybeHandle<Object> maybeValue =
+        i::Runtime::GetObjectProperty(isolate, obj, key);
+    if (maybeValue.ToHandle(&value)) {
+      return value->BooleanValue();
+    }
+  }
+  return false;
+}
+
+
 /**
  * Array::concat implementation.
  * See ECMAScript 262, 15.4.4.4.
@@ -771,9 +890,11 @@ RUNTIME_FUNCTION(Runtime_ArrayConcat) {
 
   for (int i = 0; i < argument_count; i++) {
     Handle<Object> obj(elements->get(i), isolate);
-    if (obj->IsJSArray()) {
-      Handle<JSArray> array = Handle<JSArray>::cast(obj);
-      if (!IterateElements(isolate, array, &visitor)) {
+    bool spreadable = IsConcatSpreadable(isolate, obj);
+    if (isolate->has_pending_exception()) return isolate->heap()->exception();
+    if (spreadable) {
+      Handle<JSObject> object = Handle<JSObject>::cast(obj);
+      if (!IterateElements(isolate, object, &visitor)) {
         return isolate->heap()->exception();
       }
     } else {
index cc4e09b..7c827f0 100644 (file)
@@ -62,7 +62,7 @@ RUNTIME_FUNCTION(Runtime_DefineClass) {
   DCHECK(args.length() == 6);
   CONVERT_ARG_HANDLE_CHECKED(Object, name, 0);
   CONVERT_ARG_HANDLE_CHECKED(Object, super_class, 1);
-  CONVERT_ARG_HANDLE_CHECKED(Object, constructor, 2);
+  CONVERT_ARG_HANDLE_CHECKED(JSFunction, constructor, 2);
   CONVERT_ARG_HANDLE_CHECKED(Script, script, 3);
   CONVERT_SMI_ARG_CHECKED(start_position, 4);
   CONVERT_SMI_ARG_CHECKED(end_position, 5);
@@ -98,58 +98,54 @@ RUNTIME_FUNCTION(Runtime_DefineClass) {
 
   Handle<Map> map =
       isolate->factory()->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
-  map->set_prototype(*prototype_parent);
+  map->SetPrototype(prototype_parent);
+  map->set_constructor(*constructor);
   Handle<JSObject> prototype = isolate->factory()->NewJSObjectFromMap(map);
 
   Handle<String> name_string = name->IsString()
                                    ? Handle<String>::cast(name)
                                    : isolate->factory()->empty_string();
+  constructor->shared()->set_name(*name_string);
 
-  Handle<JSFunction> ctor;
-  if (constructor->IsSpecFunction()) {
-    ctor = Handle<JSFunction>::cast(constructor);
-    JSFunction::SetPrototype(ctor, prototype);
-    PropertyAttributes attribs =
-        static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
-    RETURN_FAILURE_ON_EXCEPTION(
-        isolate,
-        JSObject::SetOwnPropertyIgnoreAttributes(
-            ctor, isolate->factory()->prototype_string(), prototype, attribs));
-  } else {
-    // TODO(arv): This should not use an empty function but a function that
-    // calls super.
-    Handle<Code> code(isolate->builtins()->builtin(Builtins::kEmptyFunction));
-    ctor = isolate->factory()->NewFunction(name_string, code, prototype, true);
-  }
+  JSFunction::SetPrototype(constructor, prototype);
+  PropertyAttributes attribs =
+      static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE | READ_ONLY);
+  RETURN_FAILURE_ON_EXCEPTION(
+      isolate, JSObject::SetOwnPropertyIgnoreAttributes(
+                   constructor, isolate->factory()->prototype_string(),
+                   prototype, attribs));
 
+  // TODO(arv): Only do this conditionally.
   Handle<Symbol> home_object_symbol(isolate->heap()->home_object_symbol());
   RETURN_FAILURE_ON_EXCEPTION(
       isolate, JSObject::SetOwnPropertyIgnoreAttributes(
-                   ctor, home_object_symbol, prototype, DONT_ENUM));
+                   constructor, home_object_symbol, prototype, DONT_ENUM));
 
   if (!constructor_parent.is_null()) {
     RETURN_FAILURE_ON_EXCEPTION(
-        isolate, JSObject::SetPrototype(ctor, constructor_parent, false));
+        isolate,
+        JSObject::SetPrototype(constructor, constructor_parent, false));
   }
 
   JSObject::AddProperty(prototype, isolate->factory()->constructor_string(),
-                        ctor, DONT_ENUM);
+                        constructor, DONT_ENUM);
 
   // Install private properties that are used to construct the FunctionToString.
   RETURN_FAILURE_ON_EXCEPTION(
+      isolate, Object::SetProperty(constructor,
+                                   isolate->factory()->class_script_symbol(),
+                                   script, STRICT));
+  RETURN_FAILURE_ON_EXCEPTION(
       isolate,
-      Object::SetProperty(ctor, isolate->factory()->class_script_symbol(),
-                          script, STRICT));
+      Object::SetProperty(
+          constructor, isolate->factory()->class_start_position_symbol(),
+          handle(Smi::FromInt(start_position), isolate), STRICT));
   RETURN_FAILURE_ON_EXCEPTION(
       isolate, Object::SetProperty(
-                   ctor, isolate->factory()->class_start_position_symbol(),
-                   handle(Smi::FromInt(start_position), isolate), STRICT));
-  RETURN_FAILURE_ON_EXCEPTION(
-      isolate,
-      Object::SetProperty(ctor, isolate->factory()->class_end_position_symbol(),
-                          handle(Smi::FromInt(end_position), isolate), STRICT));
+                   constructor, isolate->factory()->class_end_position_symbol(),
+                   handle(Smi::FromInt(end_position), isolate), STRICT));
 
-  return *ctor;
+  return *constructor;
 }
 
 
@@ -160,11 +156,6 @@ RUNTIME_FUNCTION(Runtime_DefineClassMethod) {
   CONVERT_ARG_HANDLE_CHECKED(Object, key, 1);
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 2);
 
-  RETURN_FAILURE_ON_EXCEPTION(
-      isolate, JSObject::SetOwnPropertyIgnoreAttributes(
-                   function, isolate->factory()->home_object_symbol(), object,
-                   DONT_ENUM));
-
   uint32_t index;
   if (key->ToArrayIndex(&index)) {
     RETURN_FAILURE_ON_EXCEPTION(
@@ -198,11 +189,6 @@ RUNTIME_FUNCTION(Runtime_DefineClassGetter) {
                                      Runtime::ToName(isolate, key));
   RETURN_FAILURE_ON_EXCEPTION(
       isolate,
-      JSObject::SetOwnPropertyIgnoreAttributes(
-          getter, isolate->factory()->home_object_symbol(), object, DONT_ENUM));
-
-  RETURN_FAILURE_ON_EXCEPTION(
-      isolate,
       JSObject::DefineAccessor(object, name, getter,
                                isolate->factory()->null_value(), NONE));
   return isolate->heap()->undefined_value();
@@ -221,10 +207,6 @@ RUNTIME_FUNCTION(Runtime_DefineClassSetter) {
                                      Runtime::ToName(isolate, key));
   RETURN_FAILURE_ON_EXCEPTION(
       isolate,
-      JSObject::SetOwnPropertyIgnoreAttributes(
-          setter, isolate->factory()->home_object_symbol(), object, DONT_ENUM));
-  RETURN_FAILURE_ON_EXCEPTION(
-      isolate,
       JSObject::DefineAccessor(object, name, isolate->factory()->null_value(),
                                setter, NONE));
   return isolate->heap()->undefined_value();
@@ -455,5 +437,52 @@ RUNTIME_FUNCTION(Runtime_StoreKeyedToSuper_Sloppy) {
 
   return StoreKeyedToSuper(isolate, home_object, receiver, key, value, SLOPPY);
 }
+
+
+RUNTIME_FUNCTION(Runtime_DefaultConstructorSuperCall) {
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 0);
+
+  // Compute the frame holding the arguments.
+  JavaScriptFrameIterator it(isolate);
+  it.AdvanceToArgumentsFrame();
+  JavaScriptFrame* frame = it.frame();
+
+  Handle<JSFunction> function(frame->function(), isolate);
+  Handle<Object> receiver(frame->receiver(), isolate);
+
+  Handle<Object> proto_function;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, proto_function,
+                                     Runtime::GetPrototype(isolate, function));
+
+  // Get the actual number of provided arguments.
+  const int argc = frame->ComputeParametersCount();
+
+  // Loose upper bound to allow fuzzing. We'll most likely run out of
+  // stack space before hitting this limit.
+  static int kMaxArgc = 1000000;
+  RUNTIME_ASSERT(argc >= 0 && argc <= kMaxArgc);
+
+  // If there are too many arguments, allocate argv via malloc.
+  const int argv_small_size = 10;
+  Handle<Object> argv_small_buffer[argv_small_size];
+  SmartArrayPointer<Handle<Object> > argv_large_buffer;
+  Handle<Object>* argv = argv_small_buffer;
+  if (argc > argv_small_size) {
+    argv = new Handle<Object>[argc];
+    if (argv == NULL) return isolate->StackOverflow();
+    argv_large_buffer = SmartArrayPointer<Handle<Object> >(argv);
+  }
+
+  for (int i = 0; i < argc; ++i) {
+    argv[i] = handle(frame->GetParameter(i), isolate);
+  }
+
+  Handle<Object> result;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, result,
+      Execution::Call(isolate, proto_function, receiver, argc, argv, false));
+  return *result;
+}
 }
 }  // namespace v8::internal
index 45ac41c..abdd056 100644 (file)
@@ -115,6 +115,22 @@ RUNTIME_FUNCTION(Runtime_SetIteratorNext) {
 }
 
 
+// The array returned contains the following information:
+// 0: HasMore flag
+// 1: Iteration index
+// 2: Iteration kind
+RUNTIME_FUNCTION(Runtime_SetIteratorDetails) {
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 1);
+  CONVERT_ARG_HANDLE_CHECKED(JSSetIterator, holder, 0);
+  Handle<FixedArray> details = isolate->factory()->NewFixedArray(4);
+  details->set(0, isolate->heap()->ToBoolean(holder->HasMore()));
+  details->set(1, holder->index());
+  details->set(2, holder->kind());
+  return *isolate->factory()->NewJSArrayWithElements(details);
+}
+
+
 RUNTIME_FUNCTION(Runtime_MapInitialize) {
   HandleScope scope(isolate);
   DCHECK(args.length() == 1);
@@ -225,25 +241,47 @@ RUNTIME_FUNCTION(Runtime_MapIteratorClone) {
 }
 
 
-RUNTIME_FUNCTION(Runtime_GetWeakMapEntries) {
+// The array returned contains the following information:
+// 0: HasMore flag
+// 1: Iteration index
+// 2: Iteration kind
+RUNTIME_FUNCTION(Runtime_MapIteratorDetails) {
   HandleScope scope(isolate);
   DCHECK(args.length() == 1);
+  CONVERT_ARG_HANDLE_CHECKED(JSMapIterator, holder, 0);
+  Handle<FixedArray> details = isolate->factory()->NewFixedArray(4);
+  details->set(0, isolate->heap()->ToBoolean(holder->HasMore()));
+  details->set(1, holder->index());
+  details->set(2, holder->kind());
+  return *isolate->factory()->NewJSArrayWithElements(details);
+}
+
+
+RUNTIME_FUNCTION(Runtime_GetWeakMapEntries) {
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 2);
   CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, holder, 0);
+  CONVERT_NUMBER_CHECKED(int, max_entries, Int32, args[1]);
+  RUNTIME_ASSERT(max_entries >= 0);
+
   Handle<ObjectHashTable> table(ObjectHashTable::cast(holder->table()));
+  if (max_entries == 0 || max_entries > table->NumberOfElements()) {
+    max_entries = table->NumberOfElements();
+  }
   Handle<FixedArray> entries =
-      isolate->factory()->NewFixedArray(table->NumberOfElements() * 2);
+      isolate->factory()->NewFixedArray(max_entries * 2);
   {
     DisallowHeapAllocation no_gc;
-    int number_of_non_hole_elements = 0;
-    for (int i = 0; i < table->Capacity(); i++) {
+    int count = 0;
+    for (int i = 0; count / 2 < max_entries && i < table->Capacity(); i++) {
       Handle<Object> key(table->KeyAt(i), isolate);
       if (table->IsKey(*key)) {
-        entries->set(number_of_non_hole_elements++, *key);
+        entries->set(count++, *key);
         Object* value = table->Lookup(key);
-        entries->set(number_of_non_hole_elements++, value);
+        entries->set(count++, value);
       }
     }
-    DCHECK_EQ(table->NumberOfElements() * 2, number_of_non_hole_elements);
+    DCHECK_EQ(max_entries * 2, count);
   }
   return *isolate->factory()->NewJSArrayWithElements(entries);
 }
@@ -346,21 +384,28 @@ RUNTIME_FUNCTION(Runtime_WeakCollectionSet) {
 
 RUNTIME_FUNCTION(Runtime_GetWeakSetValues) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
+  DCHECK(args.length() == 2);
   CONVERT_ARG_HANDLE_CHECKED(JSWeakCollection, holder, 0);
+  CONVERT_NUMBER_CHECKED(int, max_values, Int32, args[1]);
+  RUNTIME_ASSERT(max_values >= 0);
+
   Handle<ObjectHashTable> table(ObjectHashTable::cast(holder->table()));
-  Handle<FixedArray> values =
-      isolate->factory()->NewFixedArray(table->NumberOfElements());
+  if (max_values == 0 || max_values > table->NumberOfElements()) {
+    max_values = table->NumberOfElements();
+  }
+  Handle<FixedArray> values = isolate->factory()->NewFixedArray(max_values);
+  // Recompute max_values because GC could have removed elements from the table.
+  if (max_values > table->NumberOfElements()) {
+    max_values = table->NumberOfElements();
+  }
   {
     DisallowHeapAllocation no_gc;
-    int number_of_non_hole_elements = 0;
-    for (int i = 0; i < table->Capacity(); i++) {
+    int count = 0;
+    for (int i = 0; count < max_values && i < table->Capacity(); i++) {
       Handle<Object> key(table->KeyAt(i), isolate);
-      if (table->IsKey(*key)) {
-        values->set(number_of_non_hole_elements++, *key);
-      }
+      if (table->IsKey(*key)) values->set(count++, *key);
     }
-    DCHECK_EQ(table->NumberOfElements(), number_of_non_hole_elements);
+    DCHECK_EQ(max_values, count);
   }
   return *isolate->factory()->NewJSArrayWithElements(values);
 }
index 2e806fa..ebd0c13 100644 (file)
@@ -47,10 +47,10 @@ RUNTIME_FUNCTION(Runtime_CompileOptimized) {
   DCHECK(args.length() == 2);
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
   CONVERT_BOOLEAN_ARG_CHECKED(concurrent, 1);
+  DCHECK(isolate->use_crankshaft());
 
   Handle<Code> unoptimized(function->shared()->code());
-  if (!isolate->use_crankshaft() ||
-      function->shared()->optimization_disabled() ||
+  if (function->shared()->optimization_disabled() ||
       isolate->DebuggerHasBreakPoints()) {
     // If the function is not optimizable or debugger is active continue
     // using the code from the full compiler.
@@ -176,7 +176,7 @@ static bool IsSuitableForOnStackReplacement(Isolate* isolate,
                                             Handle<JSFunction> function,
                                             Handle<Code> current_code) {
   // Keep track of whether we've succeeded in optimizing.
-  if (!isolate->use_crankshaft() || !current_code->optimizable()) return false;
+  if (!current_code->optimizable()) return false;
   // If we are trying to do OSR when there are already optimized
   // activations of the function, it means (a) the function is directly or
   // indirectly recursive and (b) an optimized invocation has been
@@ -347,9 +347,10 @@ bool CodeGenerationFromStringsAllowed(Isolate* isolate,
 
 RUNTIME_FUNCTION(Runtime_CompileString) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 2);
+  DCHECK(args.length() == 3);
   CONVERT_ARG_HANDLE_CHECKED(String, source, 0);
   CONVERT_BOOLEAN_ARG_CHECKED(function_literal_only, 1);
+  CONVERT_SMI_ARG_CHECKED(source_offset, 2);
 
   // Extract native context.
   Handle<Context> context(isolate->native_context());
@@ -375,6 +376,14 @@ RUNTIME_FUNCTION(Runtime_CompileString) {
       isolate, fun,
       Compiler::GetFunctionFromEval(source, outer_info, context, SLOPPY,
                                     restriction, RelocInfo::kNoPosition));
+  if (function_literal_only) {
+    // The actual body is wrapped, which shifts line numbers.
+    Handle<Script> script(Script::cast(fun->shared()->script()), isolate);
+    if (script->line_offset() == 0) {
+      int line_num = Script::GetLineNumber(script, source_offset);
+      script->set_line_offset(Smi::FromInt(-line_num));
+    }
+  }
   return *fun;
 }
 
index 95ac77b..12c5a0d 100644 (file)
@@ -136,8 +136,7 @@ RUNTIME_FUNCTION(Runtime_DebugGetPropertyDetails) {
         isolate, element_or_char,
         Runtime::GetElementOrCharAt(isolate, obj, index));
     details->set(0, *element_or_char);
-    details->set(1,
-                 PropertyDetails(NONE, NORMAL, Representation::None()).AsSmi());
+    details->set(1, PropertyDetails(NONE, FIELD, 0).AsSmi());
     return *isolate->factory()->NewJSArrayWithElements(details);
   }
 
@@ -159,7 +158,7 @@ RUNTIME_FUNCTION(Runtime_DebugGetPropertyDetails) {
   details->set(0, *value);
   // TODO(verwaest): Get rid of this random way of handling interceptors.
   PropertyDetails d = it.state() == LookupIterator::INTERCEPTOR
-                          ? PropertyDetails(NONE, NORMAL, 0)
+                          ? PropertyDetails(NONE, FIELD, 0)
                           : it.property_details();
   details->set(1, d.AsSmi());
   details->set(
@@ -214,7 +213,7 @@ RUNTIME_FUNCTION(Runtime_DebugPropertyIndexFromDetails) {
   SealHandleScope shs(isolate);
   DCHECK(args.length() == 1);
   CONVERT_PROPERTY_DETAILS_CHECKED(details, 0);
-  // TODO(verwaest): Depends on the type of details.
+  // TODO(verwaest): Works only for dictionary mode holders.
   return Smi::FromInt(details.dictionary_index());
 }
 
@@ -801,6 +800,29 @@ MUST_USE_RESULT static MaybeHandle<JSObject> MaterializeLocalContext(
 }
 
 
+MUST_USE_RESULT static MaybeHandle<JSObject> MaterializeScriptScope(
+    Handle<GlobalObject> global) {
+  Isolate* isolate = global->GetIsolate();
+  Handle<ScriptContextTable> script_contexts(
+      global->native_context()->script_context_table());
+
+  Handle<JSObject> script_scope =
+      isolate->factory()->NewJSObject(isolate->object_function());
+
+  for (int context_index = 0; context_index < script_contexts->used();
+       context_index++) {
+    Handle<Context> context =
+        ScriptContextTable::GetContext(script_contexts, context_index);
+    Handle<ScopeInfo> scope_info(ScopeInfo::cast(context->extension()));
+    if (!ScopeInfo::CopyContextLocalsToScopeObject(scope_info, context,
+                                                   script_scope)) {
+      return MaybeHandle<JSObject>();
+    }
+  }
+  return script_scope;
+}
+
+
 MUST_USE_RESULT static MaybeHandle<JSObject> MaterializeLocalScope(
     Isolate* isolate, JavaScriptFrame* frame, int inlined_jsframe_index) {
   FrameInspector frame_inspector(frame, inlined_jsframe_index, isolate);
@@ -986,6 +1008,35 @@ static bool SetClosureVariableValue(Isolate* isolate, Handle<Context> context,
 }
 
 
+static bool SetBlockContextVariableValue(Handle<Context> block_context,
+                                         Handle<String> variable_name,
+                                         Handle<Object> new_value) {
+  DCHECK(block_context->IsBlockContext());
+  Handle<ScopeInfo> scope_info(ScopeInfo::cast(block_context->extension()));
+
+  return SetContextLocalValue(block_context->GetIsolate(), scope_info,
+                              block_context, variable_name, new_value);
+}
+
+
+static bool SetScriptVariableValue(Handle<Context> context,
+                                   Handle<String> variable_name,
+                                   Handle<Object> new_value) {
+  Handle<ScriptContextTable> script_contexts(
+      context->global_object()->native_context()->script_context_table());
+  ScriptContextTable::LookupResult lookup_result;
+  if (ScriptContextTable::Lookup(script_contexts, variable_name,
+                                 &lookup_result)) {
+    Handle<Context> script_context = ScriptContextTable::GetContext(
+        script_contexts, lookup_result.context_index);
+    script_context->set(lookup_result.slot_index, *new_value);
+    return true;
+  }
+
+  return false;
+}
+
+
 // Create a plain JSObject which materializes the scope for the specified
 // catch context.
 MUST_USE_RESULT static MaybeHandle<JSObject> MaterializeCatchScope(
@@ -1073,6 +1124,7 @@ class ScopeIterator {
     ScopeTypeClosure,
     ScopeTypeCatch,
     ScopeTypeBlock,
+    ScopeTypeScript,
     ScopeTypeModule
   };
 
@@ -1084,6 +1136,7 @@ class ScopeIterator {
         function_(frame->function()),
         context_(Context::cast(frame->context())),
         nested_scope_chain_(4),
+        seen_script_scope_(false),
         failed_(false) {
     // Catch the case when the debugger stops in an internal function.
     Handle<SharedFunctionInfo> shared_info(function_->shared());
@@ -1147,7 +1200,7 @@ class ScopeIterator {
           scope_info->scope_type() != ARROW_SCOPE) {
         // Global or eval code.
         CompilationInfoWithZone info(script);
-        if (scope_info->scope_type() == GLOBAL_SCOPE) {
+        if (scope_info->scope_type() == SCRIPT_SCOPE) {
           info.MarkAsGlobal();
         } else {
           DCHECK(scope_info->scope_type() == EVAL_SCOPE);
@@ -1175,6 +1228,7 @@ class ScopeIterator {
         inlined_jsframe_index_(0),
         function_(function),
         context_(function->context()),
+        seen_script_scope_(false),
         failed_(false) {
     if (function->IsBuiltin()) {
       context_ = Handle<Context>();
@@ -1199,8 +1253,16 @@ class ScopeIterator {
       context_ = Handle<Context>();
       return;
     }
+    if (scope_type == ScopeTypeScript) seen_script_scope_ = true;
     if (nested_scope_chain_.is_empty()) {
-      context_ = Handle<Context>(context_->previous(), isolate_);
+      if (scope_type == ScopeTypeScript) {
+        if (context_->IsScriptContext()) {
+          context_ = Handle<Context>(context_->previous(), isolate_);
+        }
+        CHECK(context_->IsNativeContext());
+      } else {
+        context_ = Handle<Context>(context_->previous(), isolate_);
+      }
     } else {
       if (nested_scope_chain_.last()->HasContext()) {
         DCHECK(context_->previous() != NULL);
@@ -1223,9 +1285,9 @@ class ScopeIterator {
         case MODULE_SCOPE:
           DCHECK(context_->IsModuleContext());
           return ScopeTypeModule;
-        case GLOBAL_SCOPE:
-          DCHECK(context_->IsNativeContext());
-          return ScopeTypeGlobal;
+        case SCRIPT_SCOPE:
+          DCHECK(context_->IsScriptContext() || context_->IsNativeContext());
+          return ScopeTypeScript;
         case WITH_SCOPE:
           DCHECK(context_->IsWithContext());
           return ScopeTypeWith;
@@ -1241,7 +1303,9 @@ class ScopeIterator {
     }
     if (context_->IsNativeContext()) {
       DCHECK(context_->global_object()->IsGlobalObject());
-      return ScopeTypeGlobal;
+      // If we are at the native context and have not yet seen script scope,
+      // fake it.
+      return seen_script_scope_ ? ScopeTypeGlobal : ScopeTypeScript;
     }
     if (context_->IsFunctionContext()) {
       return ScopeTypeClosure;
@@ -1255,6 +1319,9 @@ class ScopeIterator {
     if (context_->IsModuleContext()) {
       return ScopeTypeModule;
     }
+    if (context_->IsScriptContext()) {
+      return ScopeTypeScript;
+    }
     DCHECK(context_->IsWithContext());
     return ScopeTypeWith;
   }
@@ -1265,6 +1332,9 @@ class ScopeIterator {
     switch (Type()) {
       case ScopeIterator::ScopeTypeGlobal:
         return Handle<JSObject>(CurrentContext()->global_object());
+      case ScopeIterator::ScopeTypeScript:
+        return MaterializeScriptScope(
+            Handle<GlobalObject>(CurrentContext()->global_object()));
       case ScopeIterator::ScopeTypeLocal:
         // Materialize the content of the local scope into a JSObject.
         DCHECK(nested_scope_chain_.length() == 1);
@@ -1303,9 +1373,12 @@ class ScopeIterator {
       case ScopeIterator::ScopeTypeClosure:
         return SetClosureVariableValue(isolate_, CurrentContext(),
                                        variable_name, new_value);
+      case ScopeIterator::ScopeTypeScript:
+        return SetScriptVariableValue(CurrentContext(), variable_name,
+                                      new_value);
       case ScopeIterator::ScopeTypeBlock:
-        // TODO(2399): should we implement it?
-        break;
+        return SetBlockContextVariableValue(CurrentContext(), variable_name,
+                                            new_value);
       case ScopeIterator::ScopeTypeModule:
         // TODO(2399): should we implement it?
         break;
@@ -1329,7 +1402,8 @@ class ScopeIterator {
   // be an actual context.
   Handle<Context> CurrentContext() {
     DCHECK(!failed_);
-    if (Type() == ScopeTypeGlobal || nested_scope_chain_.is_empty()) {
+    if (Type() == ScopeTypeGlobal || Type() == ScopeTypeScript ||
+        nested_scope_chain_.is_empty()) {
       return context_;
     } else if (nested_scope_chain_.last()->HasContext()) {
       return context_;
@@ -1386,6 +1460,15 @@ class ScopeIterator {
         }
         break;
 
+      case ScopeIterator::ScopeTypeScript:
+        os << "Script:\n";
+        CurrentContext()
+            ->global_object()
+            ->native_context()
+            ->script_context_table()
+            ->Print(os);
+        break;
+
       default:
         UNREACHABLE();
     }
@@ -1400,6 +1483,7 @@ class ScopeIterator {
   Handle<JSFunction> function_;
   Handle<Context> context_;
   List<Handle<ScopeInfo> > nested_scope_chain_;
+  bool seen_script_scope_;
   bool failed_;
 
   void RetrieveScopeChain(Scope* scope,
@@ -2083,8 +2167,9 @@ static MaybeHandle<Object> DebugEvaluate(Isolate* isolate,
 static Handle<JSObject> NewJSObjectWithNullProto(Isolate* isolate) {
   Handle<JSObject> result =
       isolate->factory()->NewJSObject(isolate->object_function());
-  Handle<Map> new_map = Map::Copy(Handle<Map>(result->map()));
-  new_map->set_prototype(*isolate->factory()->null_value());
+  Handle<Map> new_map =
+      Map::Copy(Handle<Map>(result->map()), "ObjectWithNullProto");
+  new_map->SetPrototype(isolate->factory()->null_value());
   JSObject::MigrateToMap(result, new_map);
   return result;
 }
@@ -2163,6 +2248,7 @@ RUNTIME_FUNCTION(Runtime_DebugEvaluate) {
   // We iterate to find the function's context. If the function has no
   // context-allocated variables, we iterate until we hit the outer context.
   while (!function_context->IsFunctionContext() &&
+         !function_context->IsScriptContext() &&
          !function_context.is_identical_to(outer_context)) {
     inner_context = function_context;
     function_context = Handle<Context>(function_context->previous(), isolate);
@@ -2645,7 +2731,9 @@ RUNTIME_FUNCTION(Runtime_GetScript) {
 // to a built-in function such as Array.forEach.
 RUNTIME_FUNCTION(Runtime_DebugCallbackSupportsStepping) {
   DCHECK(args.length() == 1);
-  if (!isolate->debug()->is_active() || !isolate->debug()->StepInActive()) {
+  Debug* debug = isolate->debug();
+  if (!debug->is_active() || !debug->IsStepping() ||
+      debug->last_step_action() != StepIn) {
     return isolate->heap()->false_value();
   }
   CONVERT_ARG_CHECKED(Object, callback, 0);
index 9c2add7..ff07acd 100644 (file)
@@ -135,16 +135,14 @@ RUNTIME_FUNCTION(Runtime_ResumeJSGeneratorObject) {
 }
 
 
-RUNTIME_FUNCTION(Runtime_ThrowGeneratorStateError) {
+RUNTIME_FUNCTION(Runtime_GeneratorClose) {
   HandleScope scope(isolate);
   DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(JSGeneratorObject, generator, 0);
-  int continuation = generator->continuation();
-  const char* message = continuation == JSGeneratorObject::kGeneratorClosed
-                            ? "generator_finished"
-                            : "generator_running";
-  Vector<Handle<Object> > argv = HandleVector<Object>(NULL, 0);
-  THROW_NEW_ERROR_RETURN_FAILURE(isolate, NewError(message, argv));
+
+  generator->set_continuation(JSGeneratorObject::kGeneratorClosed);
+
+  return isolate->heap()->undefined_value();
 }
 
 
index 23b5b19..8bbe0ee 100644 (file)
@@ -17,53 +17,22 @@ namespace internal {
 static Handle<Map> ComputeObjectLiteralMap(
     Handle<Context> context, Handle<FixedArray> constant_properties,
     bool* is_result_from_cache) {
-  Isolate* isolate = context->GetIsolate();
   int properties_length = constant_properties->length();
   int number_of_properties = properties_length / 2;
-  // Check that there are only internal strings and array indices among keys.
-  int number_of_string_keys = 0;
+
   for (int p = 0; p != properties_length; p += 2) {
     Object* key = constant_properties->get(p);
     uint32_t element_index = 0;
-    if (key->IsInternalizedString()) {
-      number_of_string_keys++;
-    } else if (key->ToArrayIndex(&element_index)) {
+    if (key->ToArrayIndex(&element_index)) {
       // An index key does not require space in the property backing store.
       number_of_properties--;
-    } else {
-      // Bail out as a non-internalized-string non-index key makes caching
-      // impossible.
-      // DCHECK to make sure that the if condition after the loop is false.
-      DCHECK(number_of_string_keys != number_of_properties);
-      break;
-    }
-  }
-  // If we only have internalized strings and array indices among keys then we
-  // can use the map cache in the native context.
-  const int kMaxKeys = 10;
-  if ((number_of_string_keys == number_of_properties) &&
-      (number_of_string_keys < kMaxKeys)) {
-    // Create the fixed array with the key.
-    Handle<FixedArray> keys =
-        isolate->factory()->NewFixedArray(number_of_string_keys);
-    if (number_of_string_keys > 0) {
-      int index = 0;
-      for (int p = 0; p < properties_length; p += 2) {
-        Object* key = constant_properties->get(p);
-        if (key->IsInternalizedString()) {
-          keys->set(index++, key);
-        }
-      }
-      DCHECK(index == number_of_string_keys);
     }
-    *is_result_from_cache = true;
-    return isolate->factory()->ObjectLiteralMapFromCache(context, keys);
   }
-  *is_result_from_cache = false;
-  return Map::Create(isolate, number_of_properties);
+  Isolate* isolate = context->GetIsolate();
+  return isolate->factory()->ObjectLiteralMapFromCache(
+      context, number_of_properties, is_result_from_cache);
 }
 
-
 MUST_USE_RESULT static MaybeHandle<Object> CreateLiteralBoilerplate(
     Isolate* isolate, Handle<FixedArray> literals,
     Handle<FixedArray> constant_properties);
@@ -109,7 +78,7 @@ MUST_USE_RESULT static MaybeHandle<Object> CreateObjectLiteralBoilerplate(
   if (should_normalize) {
     // TODO(verwaest): We might not want to ever normalize here.
     JSObject::NormalizeProperties(boilerplate, KEEP_INOBJECT_PROPERTIES,
-                                  length / 2);
+                                  length / 2, "Boilerplate");
   }
   // TODO(verwaest): Support tracking representations in the boilerplate.
   for (int index = 0; index < length; index += 2) {
@@ -166,9 +135,9 @@ MUST_USE_RESULT static MaybeHandle<Object> CreateObjectLiteralBoilerplate(
   // constant function properties.
   if (should_transform && !has_function_literal) {
     JSObject::MigrateSlowToFast(boilerplate,
-                                boilerplate->map()->unused_property_fields());
+                                boilerplate->map()->unused_property_fields(),
+                                "FastLiteral");
   }
-
   return boilerplate;
 }
 
index 74cb8cb..407f237 100644 (file)
@@ -242,10 +242,8 @@ MaybeHandle<Object> Runtime::DefineObjectProperty(Handle<JSObject> js_object,
 }
 
 
-RUNTIME_FUNCTION(Runtime_GetPrototype) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 1);
-  CONVERT_ARG_HANDLE_CHECKED(Object, obj, 0);
+MaybeHandle<Object> Runtime::GetPrototype(Isolate* isolate,
+                                          Handle<Object> obj) {
   // We don't expect access checks to be needed on JSProxy objects.
   DCHECK(!obj->IsAccessCheckNeeded() || obj->IsJSObject());
   PrototypeIterator iter(isolate, obj, PrototypeIterator::START_AT_RECEIVER);
@@ -257,15 +255,26 @@ RUNTIME_FUNCTION(Runtime_GetPrototype) {
       isolate->ReportFailedAccessCheck(
           Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter)),
           v8::ACCESS_GET);
-      RETURN_FAILURE_IF_SCHEDULED_EXCEPTION(isolate);
-      return isolate->heap()->undefined_value();
+      RETURN_EXCEPTION_IF_SCHEDULED_EXCEPTION(isolate, Object);
+      return isolate->factory()->undefined_value();
     }
     iter.AdvanceIgnoringProxies();
     if (PrototypeIterator::GetCurrent(iter)->IsJSProxy()) {
-      return *PrototypeIterator::GetCurrent(iter);
+      return PrototypeIterator::GetCurrent(iter);
     }
   } while (!iter.IsAtEnd(PrototypeIterator::END_AT_NON_HIDDEN));
-  return *PrototypeIterator::GetCurrent(iter);
+  return PrototypeIterator::GetCurrent(iter);
+}
+
+
+RUNTIME_FUNCTION(Runtime_GetPrototype) {
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 1);
+  CONVERT_ARG_HANDLE_CHECKED(Object, obj, 0);
+  Handle<Object> result;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
+                                     Runtime::GetPrototype(isolate, obj));
+  return *result;
 }
 
 
@@ -469,7 +478,7 @@ RUNTIME_FUNCTION(Runtime_DisableAccessChecks) {
   bool needs_access_checks = old_map->is_access_check_needed();
   if (needs_access_checks) {
     // Copy map so it won't interfere constructor's initial map.
-    Handle<Map> new_map = Map::Copy(old_map);
+    Handle<Map> new_map = Map::Copy(old_map, "DisableAccessChecks");
     new_map->set_is_access_check_needed(false);
     JSObject::MigrateToMap(Handle<JSObject>::cast(object), new_map);
   }
@@ -484,7 +493,7 @@ RUNTIME_FUNCTION(Runtime_EnableAccessChecks) {
   Handle<Map> old_map(object->map());
   RUNTIME_ASSERT(!old_map->is_access_check_needed());
   // Copy map so it won't interfere constructor's initial map.
-  Handle<Map> new_map = Map::Copy(old_map);
+  Handle<Map> new_map = Map::Copy(old_map, "EnableAccessChecks");
   new_map->set_is_access_check_needed(true);
   JSObject::MigrateToMap(object, new_map);
   return isolate->heap()->undefined_value();
@@ -499,7 +508,8 @@ RUNTIME_FUNCTION(Runtime_OptimizeObjectForAddingMultipleProperties) {
   // Conservative upper limit to prevent fuzz tests from going OOM.
   RUNTIME_ASSERT(properties <= 100000);
   if (object->HasFastProperties() && !object->IsJSGlobalProxy()) {
-    JSObject::NormalizeProperties(object, KEEP_INOBJECT_PROPERTIES, properties);
+    JSObject::NormalizeProperties(object, KEEP_INOBJECT_PROPERTIES, properties,
+                                  "OptimizeForAdding");
   }
   return *object;
 }
@@ -520,6 +530,21 @@ RUNTIME_FUNCTION(Runtime_ObjectFreeze) {
 }
 
 
+RUNTIME_FUNCTION(Runtime_ObjectSeal) {
+  HandleScope scope(isolate);
+  DCHECK(args.length() == 1);
+  CONVERT_ARG_HANDLE_CHECKED(JSObject, object, 0);
+
+  // %ObjectSeal is a fast path and these cases are handled elsewhere.
+  RUNTIME_ASSERT(!object->HasSloppyArgumentsElements() &&
+                 !object->map()->is_observed() && !object->IsJSProxy());
+
+  Handle<Object> result;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result, JSObject::Seal(object));
+  return *result;
+}
+
+
 RUNTIME_FUNCTION(Runtime_GetProperty) {
   HandleScope scope(isolate);
   DCHECK(args.length() == 2);
@@ -607,7 +632,7 @@ RUNTIME_FUNCTION(Runtime_KeyedGetProperty) {
         NameDictionary* dictionary = receiver->property_dictionary();
         int entry = dictionary->FindEntry(key);
         if ((entry != NameDictionary::kNotFound) &&
-            (dictionary->DetailsAt(entry).type() == NORMAL)) {
+            (dictionary->DetailsAt(entry).type() == FIELD)) {
           Object* value = dictionary->ValueAt(entry);
           if (!receiver->IsGlobalObject()) return value;
           value = PropertyCell::cast(value)->value();
@@ -790,7 +815,12 @@ RUNTIME_FUNCTION(Runtime_HasOwnProperty) {
     // Fast case: either the key is a real named property or it is not
     // an array index and there are no interceptors or hidden
     // prototypes.
-    Maybe<bool> maybe = JSObject::HasRealNamedProperty(js_obj, key);
+    Maybe<bool> maybe;
+    if (key_is_array_index) {
+      maybe = JSObject::HasOwnElement(js_obj, index);
+    } else {
+      maybe = JSObject::HasRealNamedProperty(js_obj, key);
+    }
     if (!maybe.has_value) return isolate->heap()->exception();
     DCHECK(!isolate->has_pending_exception());
     if (maybe.value) {
@@ -980,31 +1010,27 @@ RUNTIME_FUNCTION(Runtime_GetOwnPropertyNames) {
       Handle<JSObject> jsproto =
           Handle<JSObject>::cast(PrototypeIterator::GetCurrent(iter));
       jsproto->GetOwnPropertyNames(*names, next_copy_index, filter);
-      if (i > 0) {
-        // Names from hidden prototypes may already have been added
-        // for inherited function template instances. Count the duplicates
-        // and stub them out; the final copy pass at the end ignores holes.
-        for (int j = next_copy_index;
-             j < next_copy_index + own_property_count[i]; j++) {
-          Object* name_from_hidden_proto = names->get(j);
+      // Names from hidden prototypes may already have been added
+      // for inherited function template instances. Count the duplicates
+      // and stub them out; the final copy pass at the end ignores holes.
+      for (int j = next_copy_index; j < next_copy_index + own_property_count[i];
+           j++) {
+        Object* name_from_hidden_proto = names->get(j);
+        if (isolate->IsInternallyUsedPropertyName(name_from_hidden_proto)) {
+          hidden_strings++;
+        } else {
           for (int k = 0; k < next_copy_index; k++) {
-            if (names->get(k) != isolate->heap()->hidden_string()) {
-              Object* name = names->get(k);
-              if (name_from_hidden_proto == name) {
-                names->set(j, isolate->heap()->hidden_string());
-                hidden_strings++;
-                break;
-              }
+            Object* name = names->get(k);
+            if (name_from_hidden_proto == name) {
+              names->set(j, isolate->heap()->hidden_string());
+              hidden_strings++;
+              break;
             }
           }
         }
       }
       next_copy_index += own_property_count[i];
 
-      // Hidden properties only show up if the filter does not skip strings.
-      if ((filter & STRING) == 0 && JSObject::HasHiddenProperties(jsproto)) {
-        hidden_strings++;
-      }
       iter.Advance();
     }
   }
@@ -1017,7 +1043,7 @@ RUNTIME_FUNCTION(Runtime_GetOwnPropertyNames) {
     int dest_pos = 0;
     for (int i = 0; i < total_property_count; i++) {
       Object* name = old_names->get(i);
-      if (name == isolate->heap()->hidden_string()) {
+      if (isolate->IsInternallyUsedPropertyName(name)) {
         hidden_strings--;
         continue;
       }
@@ -1152,7 +1178,8 @@ RUNTIME_FUNCTION(Runtime_ToFastProperties) {
   DCHECK(args.length() == 1);
   CONVERT_ARG_HANDLE_CHECKED(Object, object, 0);
   if (object->IsJSObject() && !object->IsGlobalObject()) {
-    JSObject::MigrateSlowToFast(Handle<JSObject>::cast(object), 0);
+    JSObject::MigrateSlowToFast(Handle<JSObject>::cast(object), 0,
+                                "RuntimeToFastProperties");
   }
   return *object;
 }
@@ -1357,16 +1384,6 @@ RUNTIME_FUNCTION(Runtime_GlobalProxy) {
 }
 
 
-RUNTIME_FUNCTION(Runtime_IsAttachedGlobal) {
-  SealHandleScope shs(isolate);
-  DCHECK(args.length() == 1);
-  CONVERT_ARG_CHECKED(Object, global, 0);
-  if (!global->IsJSGlobalObject()) return isolate->heap()->false_value();
-  return isolate->heap()->ToBoolean(
-      !JSGlobalObject::cast(global)->IsDetached());
-}
-
-
 RUNTIME_FUNCTION(Runtime_LookupAccessor) {
   HandleScope scope(isolate);
   DCHECK(args.length() == 3);
@@ -1398,9 +1415,8 @@ RUNTIME_FUNCTION(Runtime_LoadMutableDouble) {
     RUNTIME_ASSERT(field_index.outobject_array_index() <
                    object->properties()->length());
   }
-  Handle<Object> raw_value(object->RawFastPropertyAt(field_index), isolate);
-  RUNTIME_ASSERT(raw_value->IsMutableHeapNumber());
-  return *Object::WrapForRead(isolate, raw_value, Representation::Double());
+  return *JSObject::FastPropertyAt(object, Representation::Double(),
+                                   field_index);
 }
 
 
@@ -1453,10 +1469,8 @@ RUNTIME_FUNCTION(Runtime_DefineAccessorPropertyUnchecked) {
   RUNTIME_ASSERT((unchecked & ~(READ_ONLY | DONT_ENUM | DONT_DELETE)) == 0);
   PropertyAttributes attr = static_cast<PropertyAttributes>(unchecked);
 
-  bool fast = obj->HasFastProperties();
   RETURN_FAILURE_ON_EXCEPTION(
       isolate, JSObject::DefineAccessor(obj, name, getter, setter, attr));
-  if (fast) JSObject::MigrateSlowToFast(obj, 0);
   return isolate->heap()->undefined_value();
 }
 
@@ -1516,6 +1530,15 @@ RUNTIME_FUNCTION(Runtime_GetDataProperty) {
 }
 
 
+RUNTIME_FUNCTION(Runtime_HasFastPackedElements) {
+  SealHandleScope shs(isolate);
+  DCHECK(args.length() == 1);
+  CONVERT_ARG_CHECKED(HeapObject, obj, 0);
+  return isolate->heap()->ToBoolean(
+      IsFastPackedElementsKind(obj->map()->elements_kind()));
+}
+
+
 RUNTIME_FUNCTION(RuntimeReference_ValueOf) {
   SealHandleScope shs(isolate);
   DCHECK(args.length() == 1);
index 4579136..211922c 100644 (file)
@@ -5,6 +5,7 @@
 #include "src/v8.h"
 
 #include "src/arguments.h"
+#include "src/debug.h"
 #include "src/runtime/runtime-utils.h"
 
 namespace v8 {
@@ -63,6 +64,18 @@ RUNTIME_FUNCTION(Runtime_DeliverObservationChangeRecords) {
   // we make a call inside a verbose TryCatch.
   catcher.SetVerbose(true);
   Handle<Object> argv[] = {argument};
+
+  // Allow stepping into the observer callback.
+  Debug* debug = isolate->debug();
+  if (debug->is_active() && debug->IsStepping() &&
+      debug->last_step_action() == StepIn) {
+    // Previous StepIn may have activated a StepOut if it was at the frame exit.
+    // In this case to be able to step into the callback again, we need to clear
+    // the step out first.
+    debug->ClearStepOut();
+    debug->FloodWithOneShot(callback);
+  }
+
   USE(Execution::Call(isolate, callback, isolate->factory()->undefined_value(),
                       arraysize(argv), argv));
   if (isolate->has_pending_exception()) {
index b613f8a..be9adff 100644 (file)
@@ -8,7 +8,7 @@
 #include "src/jsregexp-inl.h"
 #include "src/jsregexp.h"
 #include "src/runtime/runtime-utils.h"
-#include "src/runtime/string-builder.h"
+#include "src/string-builder.h"
 #include "src/string-search.h"
 
 namespace v8 {
@@ -759,19 +759,6 @@ RUNTIME_FUNCTION(Runtime_StringSplit) {
 }
 
 
-RUNTIME_FUNCTION(Runtime_RegExpCompile) {
-  HandleScope scope(isolate);
-  DCHECK(args.length() == 3);
-  CONVERT_ARG_HANDLE_CHECKED(JSRegExp, re, 0);
-  CONVERT_ARG_HANDLE_CHECKED(String, pattern, 1);
-  CONVERT_ARG_HANDLE_CHECKED(String, flags, 2);
-  Handle<Object> result;
-  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(isolate, result,
-                                     RegExpImpl::Compile(re, pattern, flags));
-  return *result;
-}
-
-
 RUNTIME_FUNCTION(Runtime_RegExpExecRT) {
   HandleScope scope(isolate);
   DCHECK(args.length() == 4);
@@ -813,72 +800,110 @@ RUNTIME_FUNCTION(Runtime_RegExpConstructResult) {
 }
 
 
-RUNTIME_FUNCTION(Runtime_RegExpInitializeObject) {
+static JSRegExp::Flags RegExpFlagsFromString(Handle<String> flags,
+                                             bool* success) {
+  uint32_t value = JSRegExp::NONE;
+  int length = flags->length();
+  // A longer flags string cannot be valid.
+  if (length > 4) return JSRegExp::Flags(0);
+  for (int i = 0; i < length; i++) {
+    uint32_t flag = JSRegExp::NONE;
+    switch (flags->Get(i)) {
+      case 'g':
+        flag = JSRegExp::GLOBAL;
+        break;
+      case 'i':
+        flag = JSRegExp::IGNORE_CASE;
+        break;
+      case 'm':
+        flag = JSRegExp::MULTILINE;
+        break;
+      case 'y':
+        if (!FLAG_harmony_regexps) return JSRegExp::Flags(0);
+        flag = JSRegExp::STICKY;
+        break;
+      default:
+        return JSRegExp::Flags(0);
+    }
+    // Duplicate flag.
+    if (value & flag) return JSRegExp::Flags(0);
+    value |= flag;
+  }
+  *success = true;
+  return JSRegExp::Flags(value);
+}
+
+
+RUNTIME_FUNCTION(Runtime_RegExpInitializeAndCompile) {
   HandleScope scope(isolate);
-  DCHECK(args.length() == 6);
+  DCHECK(args.length() == 3);
   CONVERT_ARG_HANDLE_CHECKED(JSRegExp, regexp, 0);
   CONVERT_ARG_HANDLE_CHECKED(String, source, 1);
+  CONVERT_ARG_HANDLE_CHECKED(String, flags_string, 2);
+  Factory* factory = isolate->factory();
   // If source is the empty string we set it to "(?:)" instead as
   // suggested by ECMA-262, 5th, section 15.10.4.1.
-  if (source->length() == 0) source = isolate->factory()->query_colon_string();
-
-  CONVERT_ARG_HANDLE_CHECKED(Object, global, 2);
-  if (!global->IsTrue()) global = isolate->factory()->false_value();
-
-  CONVERT_ARG_HANDLE_CHECKED(Object, ignoreCase, 3);
-  if (!ignoreCase->IsTrue()) ignoreCase = isolate->factory()->false_value();
-
-  CONVERT_ARG_HANDLE_CHECKED(Object, multiline, 4);
-  if (!multiline->IsTrue()) multiline = isolate->factory()->false_value();
+  if (source->length() == 0) source = factory->query_colon_string();
+
+  bool success = false;
+  JSRegExp::Flags flags = RegExpFlagsFromString(flags_string, &success);
+  if (!success) {
+    Handle<FixedArray> element = factory->NewFixedArray(1);
+    element->set(0, *flags_string);
+    Handle<JSArray> args = factory->NewJSArrayWithElements(element);
+    THROW_NEW_ERROR_RETURN_FAILURE(
+        isolate, NewSyntaxError("invalid_regexp_flags", args));
+  }
 
-  CONVERT_ARG_HANDLE_CHECKED(Object, sticky, 5);
-  if (!sticky->IsTrue()) sticky = isolate->factory()->false_value();
+  Handle<Object> global = factory->ToBoolean(flags.is_global());
+  Handle<Object> ignore_case = factory->ToBoolean(flags.is_ignore_case());
+  Handle<Object> multiline = factory->ToBoolean(flags.is_multiline());
+  Handle<Object> sticky = factory->ToBoolean(flags.is_sticky());
 
   Map* map = regexp->map();
   Object* constructor = map->constructor();
   if (!FLAG_harmony_regexps && constructor->IsJSFunction() &&
       JSFunction::cast(constructor)->initial_map() == map) {
     // If we still have the original map, set in-object properties directly.
-    regexp->InObjectPropertyAtPut(JSRegExp::kSourceFieldIndex, *source);
     // Both true and false are immovable immortal objects so no need for write
     // barrier.
     regexp->InObjectPropertyAtPut(JSRegExp::kGlobalFieldIndex, *global,
                                   SKIP_WRITE_BARRIER);
-    regexp->InObjectPropertyAtPut(JSRegExp::kIgnoreCaseFieldIndex, *ignoreCase,
+    regexp->InObjectPropertyAtPut(JSRegExp::kIgnoreCaseFieldIndex, *ignore_case,
                                   SKIP_WRITE_BARRIER);
     regexp->InObjectPropertyAtPut(JSRegExp::kMultilineFieldIndex, *multiline,
                                   SKIP_WRITE_BARRIER);
     regexp->InObjectPropertyAtPut(JSRegExp::kLastIndexFieldIndex,
                                   Smi::FromInt(0), SKIP_WRITE_BARRIER);
-    return *regexp;
+  } else {
+    // Map has changed, so use generic, but slower, method.  We also end here if
+    // the --harmony-regexp flag is set, because the initial map does not have
+    // space for the 'sticky' flag, since it is from the snapshot, but must work
+    // both with and without --harmony-regexp.  When sticky comes out from under
+    // the flag, we will be able to use the fast initial map.
+    PropertyAttributes final =
+        static_cast<PropertyAttributes>(READ_ONLY | DONT_ENUM | DONT_DELETE);
+    PropertyAttributes writable =
+        static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
+    Handle<Object> zero(Smi::FromInt(0), isolate);
+    JSObject::SetOwnPropertyIgnoreAttributes(regexp, factory->global_string(),
+                                             global, final).Check();
+    JSObject::SetOwnPropertyIgnoreAttributes(
+        regexp, factory->ignore_case_string(), ignore_case, final).Check();
+    JSObject::SetOwnPropertyIgnoreAttributes(
+        regexp, factory->multiline_string(), multiline, final).Check();
+    if (FLAG_harmony_regexps) {
+      JSObject::SetOwnPropertyIgnoreAttributes(regexp, factory->sticky_string(),
+                                               sticky, final).Check();
+    }
+    JSObject::SetOwnPropertyIgnoreAttributes(
+        regexp, factory->last_index_string(), zero, writable).Check();
   }
 
-  // Map has changed, so use generic, but slower, method.  We also end here if
-  // the --harmony-regexp flag is set, because the initial map does not have
-  // space for the 'sticky' flag, since it is from the snapshot, but must work
-  // both with and without --harmony-regexp.  When sticky comes out from under
-  // the flag, we will be able to use the fast initial map.
-  PropertyAttributes final =
-      static_cast<PropertyAttributes>(READ_ONLY | DONT_ENUM | DONT_DELETE);
-  PropertyAttributes writable =
-      static_cast<PropertyAttributes>(DONT_ENUM | DONT_DELETE);
-  Handle<Object> zero(Smi::FromInt(0), isolate);
-  Factory* factory = isolate->factory();
-  JSObject::SetOwnPropertyIgnoreAttributes(regexp, factory->source_string(),
-                                           source, final).Check();
-  JSObject::SetOwnPropertyIgnoreAttributes(regexp, factory->global_string(),
-                                           global, final).Check();
-  JSObject::SetOwnPropertyIgnoreAttributes(
-      regexp, factory->ignore_case_string(), ignoreCase, final).Check();
-  JSObject::SetOwnPropertyIgnoreAttributes(regexp, factory->multiline_string(),
-                                           multiline, final).Check();
-  if (FLAG_harmony_regexps) {
-    JSObject::SetOwnPropertyIgnoreAttributes(regexp, factory->sticky_string(),
-                                             sticky, final).Check();
-  }
-  JSObject::SetOwnPropertyIgnoreAttributes(regexp, factory->last_index_string(),
-                                           zero, writable).Check();
-  return *regexp;
+  Handle<Object> result;
+  ASSIGN_RETURN_FAILURE_ON_EXCEPTION(
+      isolate, result, RegExpImpl::Compile(regexp, source, flags));
+  return *result;
 }
 
 
index c935cda..2a0b435 100644 (file)
@@ -22,11 +22,27 @@ static Object* ThrowRedeclarationError(Isolate* isolate, Handle<String> name) {
 }
 
 
+RUNTIME_FUNCTION(Runtime_ThrowConstAssignError) {
+  HandleScope scope(isolate);
+  THROW_NEW_ERROR_RETURN_FAILURE(
+      isolate,
+      NewTypeError("harmony_const_assign", HandleVector<Object>(NULL, 0)));
+}
+
+
 // May throw a RedeclarationError.
 static Object* DeclareGlobals(Isolate* isolate, Handle<GlobalObject> global,
                               Handle<String> name, Handle<Object> value,
                               PropertyAttributes attr, bool is_var,
                               bool is_const, bool is_function) {
+  Handle<ScriptContextTable> script_contexts(
+      global->native_context()->script_context_table());
+  ScriptContextTable::LookupResult lookup;
+  if (ScriptContextTable::Lookup(script_contexts, name, &lookup) &&
+      IsLexicalVariableMode(lookup.mode)) {
+    return ThrowRedeclarationError(isolate, name);
+  }
+
   // Do the lookup own properties only, see ES5 erratum.
   LookupIterator it(global, name, LookupIterator::HIDDEN_SKIP_INTERCEPTOR);
   Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
@@ -189,7 +205,7 @@ RUNTIME_FUNCTION(Runtime_DeclareLookupSlot) {
   HandleScope scope(isolate);
   DCHECK(args.length() == 4);
 
-  // Declarations are always made in a function, native, or global context. In
+  // Declarations are always made in a function, eval or script context. In
   // the case of eval code, the context passed is the context of the caller,
   // which may be some nested context and not the declaration context.
   CONVERT_ARG_HANDLE_CHECKED(Context, context_arg, 0);
@@ -298,9 +314,11 @@ RUNTIME_FUNCTION(Runtime_InitializeLegacyConstLookupSlot) {
 
   // The declared const was configurable, and may have been deleted in the
   // meanwhile. If so, re-introduce the variable in the context extension.
-  DCHECK(context_arg->has_extension());
   if (attributes == ABSENT) {
-    holder = handle(context_arg->extension(), isolate);
+    Handle<Context> declaration_context(context_arg->declaration_context());
+    DCHECK(declaration_context->has_extension());
+    holder = handle(declaration_context->extension(), isolate);
+    CHECK(holder->IsJSObject());
   } else {
     // For JSContextExtensionObjects, the initializer can be run multiple times
     // if in a for loop: for (var i = 0; i < 2; i++) { const x = i; }. Only the
@@ -347,7 +365,7 @@ static Handle<JSObject> NewSloppyArguments(Isolate* isolate,
           isolate->factory()->NewFixedArray(mapped_count + 2, NOT_TENURED);
       parameter_map->set_map(isolate->heap()->sloppy_arguments_elements_map());
 
-      Handle<Map> map = Map::Copy(handle(result->map()));
+      Handle<Map> map = Map::Copy(handle(result->map()), "NewSloppyArguments");
       map->set_elements_kind(SLOPPY_ARGUMENTS_ELEMENTS);
 
       result->set_map(*map);
@@ -507,19 +525,61 @@ RUNTIME_FUNCTION(Runtime_NewClosure) {
                                                                 pretenure_flag);
 }
 
+static Object* FindNameClash(Handle<ScopeInfo> scope_info,
+                             Handle<GlobalObject> global_object,
+                             Handle<ScriptContextTable> script_context) {
+  Isolate* isolate = scope_info->GetIsolate();
+  for (int var = 0; var < scope_info->ContextLocalCount(); var++) {
+    Handle<String> name(scope_info->ContextLocalName(var));
+    VariableMode mode = scope_info->ContextLocalMode(var);
+    ScriptContextTable::LookupResult lookup;
+    if (ScriptContextTable::Lookup(script_context, name, &lookup)) {
+      if (IsLexicalVariableMode(mode) || IsLexicalVariableMode(lookup.mode)) {
+        return ThrowRedeclarationError(isolate, name);
+      }
+    }
+
+    if (IsLexicalVariableMode(mode)) {
+      LookupIterator it(global_object, name,
+                        LookupIterator::HIDDEN_SKIP_INTERCEPTOR);
+      Maybe<PropertyAttributes> maybe = JSReceiver::GetPropertyAttributes(&it);
+      if (!maybe.has_value) return isolate->heap()->exception();
+      if ((maybe.value & DONT_DELETE) != 0) {
+        return ThrowRedeclarationError(isolate, name);
+      }
+
+      GlobalObject::InvalidatePropertyCell(global_object, name);
+    }
+  }
+  return isolate->heap()->undefined_value();
+}
+
 
-RUNTIME_FUNCTION(Runtime_NewGlobalContext) {
+RUNTIME_FUNCTION(Runtime_NewScriptContext) {
   HandleScope scope(isolate);
   DCHECK(args.length() == 2);
 
   CONVERT_ARG_HANDLE_CHECKED(JSFunction, function, 0);
   CONVERT_ARG_HANDLE_CHECKED(ScopeInfo, scope_info, 1);
+  Handle<GlobalObject> global_object(function->context()->global_object());
+  Handle<Context> native_context(global_object->native_context());
+  Handle<ScriptContextTable> script_context_table(
+      native_context->script_context_table());
+
+  Handle<String> clashed_name;
+  Object* name_clash_result =
+      FindNameClash(scope_info, global_object, script_context_table);
+  if (isolate->has_pending_exception()) return name_clash_result;
+
   Handle<Context> result =
-      isolate->factory()->NewGlobalContext(function, scope_info);
+      isolate->factory()->NewScriptContext(function, scope_info);
 
   DCHECK(function->context() == isolate->context());
   DCHECK(function->context()->global_object() == result->global_object());
-  result->global_object()->set_global_context(*result);
+
+  Handle<ScriptContextTable> new_script_context_table =
+      ScriptContextTable::Extend(script_context_table, result);
+  native_context->set_script_context_table(*new_script_context_table);
   return *result;
 }
 
@@ -629,7 +689,7 @@ RUNTIME_FUNCTION(Runtime_PushModuleContext) {
 
   if (!args[1]->IsScopeInfo()) {
     // Module already initialized. Find hosting context and retrieve context.
-    Context* host = Context::cast(isolate->context())->global_context();
+    Context* host = Context::cast(isolate->context())->script_context();
     Context* context = Context::cast(host->get(index));
     DCHECK(context->previous() == isolate->context());
     isolate->set_context(context);
@@ -651,7 +711,7 @@ RUNTIME_FUNCTION(Runtime_PushModuleContext) {
   isolate->set_context(*context);
 
   // Find hosting scope and initialize internal variable holding module there.
-  previous->global_context()->set(index, *context);
+  previous->script_context()->set(index, *context);
 
   return *context;
 }
@@ -956,7 +1016,7 @@ RUNTIME_FUNCTION(Runtime_GetArgumentsProperty) {
   HandleScope scope(isolate);
   if (raw_key->IsSymbol()) {
     Handle<Symbol> symbol = Handle<Symbol>::cast(raw_key);
-    if (symbol->Equals(isolate->native_context()->iterator_symbol())) {
+    if (Name::Equals(symbol, isolate->factory()->iterator_symbol())) {
       return isolate->native_context()->array_values_iterator();
     }
     // Lookup in the initial Object.prototype object.
index 9c74f4b..df2210c 100644 (file)
@@ -8,7 +8,7 @@
 #include "src/jsregexp-inl.h"
 #include "src/jsregexp.h"
 #include "src/runtime/runtime-utils.h"
-#include "src/runtime/string-builder.h"
+#include "src/string-builder.h"
 #include "src/string-search.h"
 
 namespace v8 {
index 4995984..b4b90e2 100644 (file)
@@ -60,9 +60,16 @@ RUNTIME_FUNCTION(Runtime_OptimizeFunctionOnNextCall) {
                  (function->code()->kind() == Code::FUNCTION &&
                   function->code()->optimizable()));
 
-  // If the function is optimized, just return.
+  if (!isolate->use_crankshaft()) return isolate->heap()->undefined_value();
+
+  // If the function is already optimized, just return.
   if (function->IsOptimized()) return isolate->heap()->undefined_value();
 
+  // If the function cannot optimized, just return.
+  if (function->shared()->optimization_disabled()) {
+    return isolate->heap()->undefined_value();
+  }
+
   function->MarkForOptimization();
 
   Code* unoptimized = function->shared()->code();
@@ -75,7 +82,7 @@ RUNTIME_FUNCTION(Runtime_OptimizeFunctionOnNextCall) {
           *function, Code::kMaxLoopNestingMarker);
     } else if (type->IsOneByteEqualTo(STATIC_CHAR_VECTOR("concurrent")) &&
                isolate->concurrent_recompilation_enabled()) {
-      function->MarkForConcurrentOptimization();
+      function->AttemptConcurrentOptimization();
     }
   }
 
@@ -87,6 +94,7 @@ RUNTIME_FUNCTION(Runtime_NeverOptimizeFunction) {
   HandleScope scope(isolate);
   DCHECK(args.length() == 1);
   CONVERT_ARG_CHECKED(JSFunction, function, 0);
+  function->shared()->set_disable_optimization_reason(kOptimizationDisabled);
   function->shared()->set_optimization_disabled(true);
   return isolate->heap()->undefined_value();
 }
@@ -163,7 +171,7 @@ RUNTIME_FUNCTION(Runtime_ClearFunctionTypeFeedback) {
 RUNTIME_FUNCTION(Runtime_NotifyContextDisposed) {
   HandleScope scope(isolate);
   DCHECK(args.length() == 0);
-  isolate->heap()->NotifyContextDisposed();
+  isolate->heap()->NotifyContextDisposed(true);
   return isolate->heap()->undefined_value();
 }
 
index 16e80b5..477071a 100644 (file)
 namespace v8 {
 namespace internal {
 
-template <typename Char>
-static INLINE(Vector<const Char> GetCharVector(Handle<String> string));
-
-
-template <>
-Vector<const uint8_t> GetCharVector(Handle<String> string) {
-  String::FlatContent flat = string->GetFlatContent();
-  DCHECK(flat.IsOneByte());
-  return flat.ToOneByteVector();
-}
-
-
-template <>
-Vector<const uc16> GetCharVector(Handle<String> string) {
-  String::FlatContent flat = string->GetFlatContent();
-  DCHECK(flat.IsTwoByte());
-  return flat.ToUC16Vector();
-}
-
-
 class URIUnescape : public AllStatic {
  public:
   template <typename Char>
@@ -72,7 +52,7 @@ MaybeHandle<String> URIUnescape::Unescape(Isolate* isolate,
   {
     DisallowHeapAllocation no_allocation;
     StringSearch<uint8_t, Char> search(isolate, STATIC_CHAR_VECTOR("%"));
-    index = search.Search(GetCharVector<Char>(source), 0);
+    index = search.Search(source->GetCharVector<Char>(), 0);
     if (index < 0) return source;
   }
   return UnescapeSlow<Char>(isolate, source, index);
@@ -89,7 +69,7 @@ MaybeHandle<String> URIUnescape::UnescapeSlow(Isolate* isolate,
   int unescaped_length = 0;
   {
     DisallowHeapAllocation no_allocation;
-    Vector<const Char> vector = GetCharVector<Char>(string);
+    Vector<const Char> vector = string->GetCharVector<Char>();
     for (int i = start_index; i < length; unescaped_length++) {
       int step;
       if (UnescapeChar(vector, i, length, &step) >
@@ -112,7 +92,7 @@ MaybeHandle<String> URIUnescape::UnescapeSlow(Isolate* isolate,
                                         ->NewRawOneByteString(unescaped_length)
                                         .ToHandleChecked();
     DisallowHeapAllocation no_allocation;
-    Vector<const Char> vector = GetCharVector<Char>(string);
+    Vector<const Char> vector = string->GetCharVector<Char>();
     for (int i = start_index; i < length; dest_position++) {
       int step;
       dest->SeqOneByteStringSet(dest_position,
@@ -125,7 +105,7 @@ MaybeHandle<String> URIUnescape::UnescapeSlow(Isolate* isolate,
                                         ->NewRawTwoByteString(unescaped_length)
                                         .ToHandleChecked();
     DisallowHeapAllocation no_allocation;
-    Vector<const Char> vector = GetCharVector<Char>(string);
+    Vector<const Char> vector = string->GetCharVector<Char>();
     for (int i = start_index; i < length; dest_position++) {
       int step;
       dest->SeqTwoByteStringSet(dest_position,
@@ -221,7 +201,7 @@ MaybeHandle<String> URIEscape::Escape(Isolate* isolate, Handle<String> string) {
 
   {
     DisallowHeapAllocation no_allocation;
-    Vector<const Char> vector = GetCharVector<Char>(string);
+    Vector<const Char> vector = string->GetCharVector<Char>();
     for (int i = 0; i < length; i++) {
       uint16_t c = vector[i];
       if (c >= 256) {
@@ -249,7 +229,7 @@ MaybeHandle<String> URIEscape::Escape(Isolate* isolate, Handle<String> string) {
 
   {
     DisallowHeapAllocation no_allocation;
-    Vector<const Char> vector = GetCharVector<Char>(string);
+    Vector<const Char> vector = string->GetCharVector<Char>();
     for (int i = 0; i < length; i++) {
       uint16_t c = vector[i];
       if (c >= 256) {
index cd6f36c..459ca50 100644 (file)
@@ -80,8 +80,7 @@ void Runtime::InitializeIntrinsicFunctionNames(Isolate* isolate,
     if (name == NULL) continue;
     Handle<NameDictionary> new_dict = NameDictionary::Add(
         dict, isolate->factory()->InternalizeUtf8String(name),
-        Handle<Smi>(Smi::FromInt(i), isolate),
-        PropertyDetails(NONE, NORMAL, Representation::None()));
+        Handle<Smi>(Smi::FromInt(i), isolate), PropertyDetails(NONE, FIELD, 0));
     // The dictionary does not need to grow.
     CHECK(new_dict.is_identical_to(dict));
   }
index 5d6ccac..9e6c495 100644 (file)
@@ -45,7 +45,6 @@ namespace internal {
   F(IsSloppyModeFunction, 1, 1)                            \
   F(GetDefaultReceiver, 1, 1)                              \
                                                            \
-  F(GetPrototype, 1, 1)                                    \
   F(SetPrototype, 2, 1)                                    \
   F(InternalSetPrototype, 2, 1)                            \
   F(IsInPrototypeChain, 2, 1)                              \
@@ -155,9 +154,8 @@ namespace internal {
   F(RemPiO2, 1, 1)                                         \
                                                            \
   /* Regular expressions */                                \
-  F(RegExpCompile, 3, 1)                                   \
+  F(RegExpInitializeAndCompile, 3, 1)                      \
   F(RegExpExecMultiple, 4, 1)                              \
-  F(RegExpInitializeObject, 6, 1)                          \
                                                            \
   /* JSON */                                               \
   F(ParseJson, 1, 1)                                       \
@@ -199,7 +197,8 @@ namespace internal {
   F(StoreToSuper_Strict, 4, 1)                             \
   F(StoreToSuper_Sloppy, 4, 1)                             \
   F(StoreKeyedToSuper_Strict, 4, 1)                        \
-  F(StoreKeyedToSuper_Sloppy, 4, 1)
+  F(StoreKeyedToSuper_Sloppy, 4, 1)                        \
+  F(DefaultConstructorSuperCall, 0, 1)
 
 
 #define RUNTIME_FUNCTION_LIST_ALWAYS_2(F)              \
@@ -250,11 +249,10 @@ namespace internal {
   F(DateCacheVersion, 0, 1)                            \
                                                        \
   /* Globals */                                        \
-  F(CompileString, 2, 1)                               \
+  F(CompileString, 3, 1)                               \
                                                        \
   /* Eval */                                           \
   F(GlobalProxy, 1, 1)                                 \
-  F(IsAttachedGlobal, 1, 1)                            \
                                                        \
   F(AddNamedProperty, 4, 1)                            \
   F(AddPropertyForTemplate, 4, 1)                      \
@@ -277,6 +275,7 @@ namespace internal {
   F(LookupAccessor, 3, 1)                              \
                                                        \
   /* ES5 */                                            \
+  F(ObjectSeal, 1, 1)                                  \
   F(ObjectFreeze, 1, 1)                                \
                                                        \
   /* Harmony modules */                                \
@@ -301,30 +300,15 @@ namespace internal {
   F(GetConstructTrap, 1, 1)                            \
   F(Fix, 1, 1)                                         \
                                                        \
-  /* Harmony sets */                                   \
-  F(SetInitialize, 1, 1)                               \
-  F(SetAdd, 2, 1)                                      \
-  F(SetHas, 2, 1)                                      \
-  F(SetDelete, 2, 1)                                   \
-  F(SetClear, 1, 1)                                    \
-  F(SetGetSize, 1, 1)                                  \
-                                                       \
+  /* ES6 collection iterators */                       \
   F(SetIteratorInitialize, 3, 1)                       \
   F(SetIteratorClone, 1, 1)                            \
   F(SetIteratorNext, 2, 1)                             \
-                                                       \
-  /* Harmony maps */                                   \
-  F(MapInitialize, 1, 1)                               \
-  F(MapGet, 2, 1)                                      \
-  F(MapHas, 2, 1)                                      \
-  F(MapDelete, 2, 1)                                   \
-  F(MapClear, 1, 1)                                    \
-  F(MapSet, 3, 1)                                      \
-  F(MapGetSize, 1, 1)                                  \
-                                                       \
+  F(SetIteratorDetails, 1, 1)                          \
   F(MapIteratorInitialize, 3, 1)                       \
   F(MapIteratorClone, 1, 1)                            \
   F(MapIteratorNext, 2, 1)                             \
+  F(MapIteratorDetails, 1, 1)                          \
                                                        \
   /* Harmony weak maps and sets */                     \
   F(WeakCollectionInitialize, 1, 1)                    \
@@ -333,8 +317,8 @@ namespace internal {
   F(WeakCollectionDelete, 2, 1)                        \
   F(WeakCollectionSet, 3, 1)                           \
                                                        \
-  F(GetWeakMapEntries, 1, 1)                           \
-  F(GetWeakSetValues, 1, 1)                            \
+  F(GetWeakMapEntries, 2, 1)                           \
+  F(GetWeakSetValues, 2, 1)                            \
                                                        \
   /* Harmony events */                                 \
   F(EnqueueMicrotask, 1, 1)                            \
@@ -476,7 +460,7 @@ namespace internal {
   F(CreateJSGeneratorObject, 0, 1)                           \
   F(SuspendJSGeneratorObject, 1, 1)                          \
   F(ResumeJSGeneratorObject, 3, 1)                           \
-  F(ThrowGeneratorStateError, 1, 1)                          \
+  F(GeneratorClose, 1, 1)                                    \
                                                              \
   /* Arrays */                                               \
   F(ArrayConstructor, -1, 1)                                 \
@@ -498,12 +482,13 @@ namespace internal {
   F(ReThrow, 1, 1)                                           \
   F(ThrowReferenceError, 1, 1)                               \
   F(ThrowNotDateError, 0, 1)                                 \
+  F(ThrowConstAssignError, 0, 1)                             \
   F(StackGuard, 0, 1)                                        \
   F(Interrupt, 0, 1)                                         \
   F(PromoteScheduledException, 0, 1)                         \
                                                              \
   /* Contexts */                                             \
-  F(NewGlobalContext, 2, 1)                                  \
+  F(NewScriptContext, 2, 1)                                  \
   F(NewFunctionContext, 1, 1)                                \
   F(PushWithContext, 2, 1)                                   \
   F(PushCatchContext, 3, 1)                                  \
@@ -728,7 +713,24 @@ namespace internal {
   F(DoubleHi, 1, 1)                       \
   F(DoubleLo, 1, 1)                       \
   F(MathSqrtRT, 1, 1)                     \
-  F(MathLogRT, 1, 1)
+  F(MathLogRT, 1, 1)                      \
+  /* ES6 Collections */                   \
+  F(MapClear, 1, 1)                       \
+  F(MapDelete, 2, 1)                      \
+  F(MapGet, 2, 1)                         \
+  F(MapGetSize, 1, 1)                     \
+  F(MapHas, 2, 1)                         \
+  F(MapInitialize, 1, 1)                  \
+  F(MapSet, 3, 1)                         \
+  F(SetAdd, 2, 1)                         \
+  F(SetClear, 1, 1)                       \
+  F(SetDelete, 2, 1)                      \
+  F(SetGetSize, 1, 1)                     \
+  F(SetHas, 2, 1)                         \
+  F(SetInitialize, 1, 1)                  \
+  /* Arrays */                            \
+  F(HasFastPackedElements, 1, 1)          \
+  F(GetPrototype, 1, 1)
 
 
 //---------------------------------------------------------------------------
@@ -828,6 +830,9 @@ class Runtime : public AllStatic {
   MUST_USE_RESULT static MaybeHandle<Object> GetObjectProperty(
       Isolate* isolate, Handle<Object> object, Handle<Object> key);
 
+  MUST_USE_RESULT static MaybeHandle<Object> GetPrototype(
+      Isolate* isolate, Handle<Object> object);
+
   MUST_USE_RESULT static MaybeHandle<Name> ToName(Isolate* isolate,
                                                   Handle<Object> key);
 
index ff22de5..3c1cccc 100644 (file)
@@ -91,7 +91,7 @@ class ExternalStreamingStream : public BufferedUtf16CharacterStream {
 
   virtual ~ExternalStreamingStream() { delete[] current_data_; }
 
-  virtual unsigned BufferSeekForward(unsigned delta) OVERRIDE {
+  unsigned BufferSeekForward(unsigned delta) OVERRIDE {
     // We never need to seek forward when streaming scripts. We only seek
     // forward when we want to parse a function whose location we already know,
     // and when streaming, we don't know the locations of anything we haven't
@@ -100,7 +100,7 @@ class ExternalStreamingStream : public BufferedUtf16CharacterStream {
     return 0;
   }
 
-  virtual unsigned FillBuffer(unsigned position) OVERRIDE;
+  unsigned FillBuffer(unsigned position) OVERRIDE;
 
  private:
   void HandleUtf8SplitCharacters(unsigned* data_in_buffer);
index e63239d..d499e9b 100644 (file)
@@ -38,7 +38,9 @@ Scanner::Scanner(UnicodeCache* unicode_cache)
       harmony_scoping_(false),
       harmony_modules_(false),
       harmony_numeric_literals_(false),
-      harmony_classes_(false) { }
+      harmony_classes_(false),
+      harmony_templates_(false),
+      harmony_unicode_(false) {}
 
 
 void Scanner::Initialize(Utf16CharacterStream* source) {
@@ -54,6 +56,7 @@ void Scanner::Initialize(Utf16CharacterStream* source) {
 }
 
 
+template <bool capture_raw>
 uc32 Scanner::ScanHexNumber(int expected_length) {
   DCHECK(expected_length <= 4);  // prevent overflow
 
@@ -64,13 +67,30 @@ uc32 Scanner::ScanHexNumber(int expected_length) {
       return -1;
     }
     x = x * 16 + d;
-    Advance();
+    Advance<capture_raw>();
   }
 
   return x;
 }
 
 
+template <bool capture_raw>
+uc32 Scanner::ScanUnlimitedLengthHexNumber(int max_value) {
+  uc32 x = 0;
+  int d = HexValue(c0_);
+  if (d < 0) {
+    return -1;
+  }
+  while (d >= 0) {
+    x = x * 16 + d;
+    if (x > max_value) return -1;
+    Advance<capture_raw>();
+    d = HexValue(c0_);
+  }
+  return x;
+}
+
+
 // Ensure that tokens can be stored in a byte.
 STATIC_ASSERT(Token::NUM_TOKENS <= 0x100);
 
@@ -311,8 +331,7 @@ Token::Value Scanner::SkipSourceURLComment() {
 void Scanner::TryToParseSourceURLComment() {
   // Magic comments are of the form: //[#@]\s<name>=\s*<value>\s*.* and this
   // function will just return if it cannot parse a magic comment.
-  if (!unicode_cache_->IsWhiteSpace(c0_))
-    return;
+  if (c0_ < 0 || !unicode_cache_->IsWhiteSpace(c0_)) return;
   Advance();
   LiteralBuffer name;
   while (c0_ >= 0 && !unicode_cache_->IsWhiteSpaceOrLineTerminator(c0_) &&
@@ -403,6 +422,7 @@ Token::Value Scanner::ScanHtmlComment() {
 
 void Scanner::Scan() {
   next_.literal_chars = NULL;
+  next_.raw_literal_chars = NULL;
   Token::Value token;
   do {
     // Remember the position of the next token
@@ -626,6 +646,12 @@ void Scanner::Scan() {
         token = Select(Token::BIT_NOT);
         break;
 
+      case '`':
+        if (HarmonyTemplates()) {
+          token = ScanTemplateStart();
+          break;
+        }
+
       default:
         if (c0_ < 0) {
           token = Token::EOS;
@@ -671,16 +697,17 @@ void Scanner::SeekForward(int pos) {
 }
 
 
+template <bool capture_raw, bool in_template_literal>
 bool Scanner::ScanEscape() {
   uc32 c = c0_;
-  Advance();
+  Advance<capture_raw>();
 
   // Skip escaped newlines.
-  if (c0_ >= 0 && unicode_cache_->IsLineTerminator(c)) {
+  if (!in_template_literal && c0_ >= 0 && unicode_cache_->IsLineTerminator(c)) {
     // Allow CR+LF newlines in multiline string literals.
-    if (IsCarriageReturn(c) && IsLineFeed(c0_)) Advance();
+    if (IsCarriageReturn(c) && IsLineFeed(c0_)) Advance<capture_raw>();
     // Allow LF+CR newlines in multiline string literals.
-    if (IsLineFeed(c) && IsCarriageReturn(c0_)) Advance();
+    if (IsLineFeed(c) && IsCarriageReturn(c0_)) Advance<capture_raw>();
     return true;
   }
 
@@ -694,24 +721,28 @@ bool Scanner::ScanEscape() {
     case 'r' : c = '\r'; break;
     case 't' : c = '\t'; break;
     case 'u' : {
-      c = ScanHexNumber(4);
+      c = ScanUnicodeEscape<capture_raw>();
       if (c < 0) return false;
       break;
     }
-    case 'v' : c = '\v'; break;
-    case 'x' : {
-      c = ScanHexNumber(2);
+    case 'v':
+      c = '\v';
+      break;
+    case 'x': {
+      c = ScanHexNumber<capture_raw>(2);
       if (c < 0) return false;
       break;
     }
-    case '0' :  // fall through
-    case '1' :  // fall through
-    case '2' :  // fall through
-    case '3' :  // fall through
-    case '4' :  // fall through
-    case '5' :  // fall through
-    case '6' :  // fall through
-    case '7' : c = ScanOctalEscape(c, 2); break;
+    case '0':  // Fall through.
+    case '1':  // fall through
+    case '2':  // fall through
+    case '3':  // fall through
+    case '4':  // fall through
+    case '5':  // fall through
+    case '6':  // fall through
+    case '7':
+      c = ScanOctalEscape<capture_raw>(c, 2);
+      break;
   }
 
   // According to ECMA-262, section 7.8.4, characters not covered by the
@@ -724,6 +755,7 @@ bool Scanner::ScanEscape() {
 
 // Octal escapes of the forms '\0xx' and '\xxx' are not a part of
 // ECMA-262. Other JS VMs support them.
+template <bool capture_raw>
 uc32 Scanner::ScanOctalEscape(uc32 c, int length) {
   uc32 x = c - '0';
   int i = 0;
@@ -733,7 +765,7 @@ uc32 Scanner::ScanOctalEscape(uc32 c, int length) {
     int nx = x * 8 + d;
     if (nx >= 256) break;
     x = nx;
-    Advance();
+    Advance<capture_raw>();
   }
   // Anything except '\0' is an octal escape sequence, illegal in strict mode.
   // Remember the position of octal escape sequences so that an error
@@ -757,7 +789,7 @@ Token::Value Scanner::ScanString() {
     uc32 c = c0_;
     Advance();
     if (c == '\\') {
-      if (c0_ < 0 || !ScanEscape()) return Token::ILLEGAL;
+      if (c0_ < 0 || !ScanEscape<false, false>()) return Token::ILLEGAL;
     } else {
       AddLiteralChar(c);
     }
@@ -770,6 +802,96 @@ Token::Value Scanner::ScanString() {
 }
 
 
+Token::Value Scanner::ScanTemplateSpan() {
+  // When scanning a TemplateSpan, we are looking for the following construct:
+  // TEMPLATE_SPAN ::
+  //     ` LiteralChars* ${
+  //   | } LiteralChars* ${
+  //
+  // TEMPLATE_TAIL ::
+  //     ` LiteralChars* `
+  //   | } LiteralChar* `
+  //
+  // A TEMPLATE_SPAN should always be followed by an Expression, while a
+  // TEMPLATE_TAIL terminates a TemplateLiteral and does not need to be
+  // followed by an Expression.
+
+  Token::Value result = Token::TEMPLATE_SPAN;
+  LiteralScope literal(this);
+  StartRawLiteral();
+  const bool capture_raw = true;
+  const bool in_template_literal = true;
+
+  while (true) {
+    uc32 c = c0_;
+    Advance<capture_raw>();
+    if (c == '`') {
+      result = Token::TEMPLATE_TAIL;
+      ReduceRawLiteralLength(1);
+      break;
+    } else if (c == '$' && c0_ == '{') {
+      Advance<capture_raw>();  // Consume '{'
+      ReduceRawLiteralLength(2);
+      break;
+    } else if (c == '\\') {
+      if (unicode_cache_->IsLineTerminator(c0_)) {
+        // The TV of LineContinuation :: \ LineTerminatorSequence is the empty
+        // code unit sequence.
+        uc32 lastChar = c0_;
+        Advance<capture_raw>();
+        if (lastChar == '\r') {
+          ReduceRawLiteralLength(1);  // Remove \r
+          if (c0_ == '\n') {
+            Advance<capture_raw>();  // Adds \n
+          } else {
+            AddRawLiteralChar('\n');
+          }
+        }
+      } else if (!ScanEscape<capture_raw, in_template_literal>()) {
+        return Token::ILLEGAL;
+      }
+    } else if (c < 0) {
+      // Unterminated template literal
+      PushBack(c);
+      break;
+    } else {
+      // The TRV of LineTerminatorSequence :: <CR> is the CV 0x000A.
+      // The TRV of LineTerminatorSequence :: <CR><LF> is the sequence
+      // consisting of the CV 0x000A.
+      if (c == '\r') {
+        ReduceRawLiteralLength(1);  // Remove \r
+        if (c0_ == '\n') {
+          Advance<capture_raw>();  // Adds \n
+        } else {
+          AddRawLiteralChar('\n');
+        }
+        c = '\n';
+      }
+      AddLiteralChar(c);
+    }
+  }
+  literal.Complete();
+  next_.location.end_pos = source_pos();
+  next_.token = result;
+  return result;
+}
+
+
+Token::Value Scanner::ScanTemplateStart() {
+  DCHECK(c0_ == '`');
+  next_.location.beg_pos = source_pos();
+  Advance();  // Consume `
+  return ScanTemplateSpan();
+}
+
+
+Token::Value Scanner::ScanTemplateContinuation() {
+  DCHECK_EQ(next_.token, Token::RBRACE);
+  next_.location.beg_pos = source_pos() - 1;  // We already consumed }
+  return ScanTemplateSpan();
+}
+
+
 void Scanner::ScanDecimalDigits() {
   while (IsDecimalDigit(c0_))
     AddLiteralCharAdvance();
@@ -887,7 +1009,28 @@ uc32 Scanner::ScanIdentifierUnicodeEscape() {
   Advance();
   if (c0_ != 'u') return -1;
   Advance();
-  return ScanHexNumber(4);
+  return ScanUnicodeEscape<false>();
+}
+
+
+template <bool capture_raw>
+uc32 Scanner::ScanUnicodeEscape() {
+  // Accept both \uxxxx and \u{xxxxxx} (if harmony unicode escapes are
+  // allowed). In the latter case, the number of hex digits between { } is
+  // arbitrary. \ and u have already been read.
+  if (c0_ == '{' && HarmonyUnicode()) {
+    Advance<capture_raw>();
+    uc32 cp = ScanUnlimitedLengthHexNumber<capture_raw>(0x10ffff);
+    if (cp < 0) {
+      return -1;
+    }
+    if (c0_ != '}') {
+      return -1;
+    }
+    Advance<capture_raw>();
+    return cp;
+  }
+  return ScanHexNumber<capture_raw>(4);
 }
 
 
@@ -1138,24 +1281,6 @@ bool Scanner::ScanRegExpPattern(bool seen_equal) {
 }
 
 
-bool Scanner::ScanLiteralUnicodeEscape() {
-  DCHECK(c0_ == '\\');
-  AddLiteralChar(c0_);
-  Advance();
-  int hex_digits_read = 0;
-  if (c0_ == 'u') {
-    AddLiteralChar(c0_);
-    while (hex_digits_read < 4) {
-      Advance();
-      if (!IsHexDigit(c0_)) break;
-      AddLiteralChar(c0_);
-      ++hex_digits_read;
-    }
-  }
-  return hex_digits_read == 4;
-}
-
-
 bool Scanner::ScanRegExpFlags() {
   // Scan regular expression flags.
   LiteralScope literal(this);
@@ -1163,10 +1288,7 @@ bool Scanner::ScanRegExpFlags() {
     if (c0_ != '\\') {
       AddLiteralCharAdvance();
     } else {
-      if (!ScanLiteralUnicodeEscape()) {
-        return false;
-      }
-      Advance();
+      return false;
     }
   }
   literal.Complete();
@@ -1192,6 +1314,15 @@ const AstRawString* Scanner::NextSymbol(AstValueFactory* ast_value_factory) {
 }
 
 
+const AstRawString* Scanner::CurrentRawSymbol(
+    AstValueFactory* ast_value_factory) {
+  if (is_raw_literal_one_byte()) {
+    return ast_value_factory->GetOneByteString(raw_literal_one_byte_string());
+  }
+  return ast_value_factory->GetTwoByteString(raw_literal_two_byte_string());
+}
+
+
 double Scanner::DoubleValue() {
   DCHECK(is_literal_one_byte());
   return StringToDouble(
index 387d331..6e668fd 100644 (file)
@@ -252,6 +252,10 @@ class LiteralBuffer {
     return is_one_byte_ ? position_ : (position_ >> 1);
   }
 
+  void ReduceLength(int delta) {
+    position_ -= delta * (is_one_byte_ ? kOneByteSize : kUC16Size);
+  }
+
   void Reset() {
     position_ = 0;
     is_one_byte_ = true;
@@ -318,15 +322,13 @@ class Scanner {
   // if aborting the scanning before it's complete.
   class LiteralScope {
    public:
-    explicit LiteralScope(Scanner* self)
-        : scanner_(self), complete_(false) {
+    explicit LiteralScope(Scanner* self) : scanner_(self), complete_(false) {
       scanner_->StartLiteral();
     }
      ~LiteralScope() {
        if (!complete_) scanner_->DropLiteral();
      }
     void Complete() {
-      scanner_->TerminateLiteral();
       complete_ = true;
     }
 
@@ -392,6 +394,7 @@ class Scanner {
 
   const AstRawString* CurrentSymbol(AstValueFactory* ast_value_factory);
   const AstRawString* NextSymbol(AstValueFactory* ast_value_factory);
+  const AstRawString* CurrentRawSymbol(AstValueFactory* ast_value_factory);
 
   double DoubleValue();
   bool LiteralMatches(const char* data, int length, bool allow_escapes = true) {
@@ -458,6 +461,10 @@ class Scanner {
   void SetHarmonyClasses(bool classes) {
     harmony_classes_ = classes;
   }
+  bool HarmonyTemplates() const { return harmony_templates_; }
+  void SetHarmonyTemplates(bool templates) { harmony_templates_ = templates; }
+  bool HarmonyUnicode() const { return harmony_unicode_; }
+  void SetHarmonyUnicode(bool unicode) { harmony_unicode_ = unicode; }
 
   // Returns true if there was a line terminator before the peek'ed token,
   // possibly inside a multi-line comment.
@@ -473,6 +480,10 @@ class Scanner {
   // be empty).
   bool ScanRegExpFlags();
 
+  // Scans the input as a template literal
+  Token::Value ScanTemplateStart();
+  Token::Value ScanTemplateContinuation();
+
   const LiteralBuffer* source_url() const { return &source_url_; }
   const LiteralBuffer* source_mapping_url() const {
     return &source_mapping_url_;
@@ -486,11 +497,13 @@ class Scanner {
     Token::Value token;
     Location location;
     LiteralBuffer* literal_chars;
+    LiteralBuffer* raw_literal_chars;
   };
 
   static const int kCharacterLookaheadBufferSize = 1;
 
   // Scans octal escape sequence. Also accepts "\0" decimal escape sequence.
+  template <bool capture_raw>
   uc32 ScanOctalEscape(uc32 c, int length);
 
   // Call this after setting source_ to the input.
@@ -500,6 +513,7 @@ class Scanner {
     Advance();
     // Initialize current_ to not refer to a literal.
     current_.literal_chars = NULL;
+    current_.raw_literal_chars = NULL;
   }
 
   // Literal buffer support
@@ -510,20 +524,31 @@ class Scanner {
     next_.literal_chars = free_buffer;
   }
 
+  inline void StartRawLiteral() {
+    raw_literal_buffer_.Reset();
+    next_.raw_literal_chars = &raw_literal_buffer_;
+  }
+
   INLINE(void AddLiteralChar(uc32 c)) {
     DCHECK_NOT_NULL(next_.literal_chars);
     next_.literal_chars->AddChar(c);
   }
 
-  // Complete scanning of a literal.
-  inline void TerminateLiteral() {
-    // Does nothing in the current implementation.
+  INLINE(void AddRawLiteralChar(uc32 c)) {
+    DCHECK_NOT_NULL(next_.raw_literal_chars);
+    next_.raw_literal_chars->AddChar(c);
+  }
+
+  INLINE(void ReduceRawLiteralLength(int delta)) {
+    DCHECK_NOT_NULL(next_.raw_literal_chars);
+    next_.raw_literal_chars->ReduceLength(delta);
   }
 
   // Stops scanning of a literal and drop the collected characters,
   // e.g., due to an encountered error.
   inline void DropLiteral() {
     next_.literal_chars = NULL;
+    next_.raw_literal_chars = NULL;
   }
 
   inline void AddLiteralCharAdvance() {
@@ -532,7 +557,11 @@ class Scanner {
   }
 
   // Low-level scanning support.
+  template <bool capture_raw = false>
   void Advance() {
+    if (capture_raw) {
+      AddRawLiteralChar(c0_);
+    }
     c0_ = source_->Advance();
     if (unibrow::Utf16::IsLeadSurrogate(c0_)) {
       uc32 c1 = source_->Advance();
@@ -571,10 +600,11 @@ class Scanner {
 
   // Returns the literal string, if any, for the current token (the
   // token last returned by Next()). The string is 0-terminated.
-  // Literal strings are collected for identifiers, strings, and
-  // numbers.
-  // These functions only give the correct result if the literal
-  // was scanned between calls to StartLiteral() and TerminateLiteral().
+  // Literal strings are collected for identifiers, strings, numbers as well
+  // as for template literals. For template literals we also collect the raw
+  // form.
+  // These functions only give the correct result if the literal was scanned
+  // when a LiteralScope object is alive.
   Vector<const uint8_t> literal_one_byte_string() {
     DCHECK_NOT_NULL(current_.literal_chars);
     return current_.literal_chars->one_byte_literal();
@@ -605,12 +635,26 @@ class Scanner {
     DCHECK_NOT_NULL(next_.literal_chars);
     return next_.literal_chars->is_one_byte();
   }
-  int next_literal_length() const {
-    DCHECK_NOT_NULL(next_.literal_chars);
-    return next_.literal_chars->length();
+  Vector<const uint8_t> raw_literal_one_byte_string() {
+    DCHECK_NOT_NULL(current_.raw_literal_chars);
+    return current_.raw_literal_chars->one_byte_literal();
+  }
+  Vector<const uint16_t> raw_literal_two_byte_string() {
+    DCHECK_NOT_NULL(current_.raw_literal_chars);
+    return current_.raw_literal_chars->two_byte_literal();
+  }
+  bool is_raw_literal_one_byte() {
+    DCHECK_NOT_NULL(current_.raw_literal_chars);
+    return current_.raw_literal_chars->is_one_byte();
   }
 
+  template <bool capture_raw>
   uc32 ScanHexNumber(int expected_length);
+  // Scan a number of any length but not bigger than max_value. For example, the
+  // number can be 000000001, so it's very long in characters but its value is
+  // small.
+  template <bool capture_raw>
+  uc32 ScanUnlimitedLengthHexNumber(int max_value);
 
   // Scans a single JavaScript token.
   void Scan();
@@ -633,14 +677,17 @@ class Scanner {
   // Scans an escape-sequence which is part of a string and adds the
   // decoded character to the current literal. Returns true if a pattern
   // is scanned.
+  template <bool capture_raw, bool in_template_literal>
   bool ScanEscape();
+
   // Decodes a Unicode escape-sequence which is part of an identifier.
   // If the escape sequence cannot be decoded the result is kBadChar.
   uc32 ScanIdentifierUnicodeEscape();
-  // Scans a Unicode escape-sequence and adds its characters,
-  // uninterpreted, to the current literal. Used for parsing RegExp
-  // flags.
-  bool ScanLiteralUnicodeEscape();
+  // Helper for the above functions.
+  template <bool capture_raw>
+  uc32 ScanUnicodeEscape();
+
+  Token::Value ScanTemplateSpan();
 
   // Return the current source position.
   int source_pos() {
@@ -657,6 +704,9 @@ class Scanner {
   LiteralBuffer source_url_;
   LiteralBuffer source_mapping_url_;
 
+  // Buffer to store raw string values
+  LiteralBuffer raw_literal_buffer_;
+
   TokenDesc current_;  // desc for current token (as returned by Next())
   TokenDesc next_;     // desc for next token (one token look-ahead)
 
@@ -685,6 +735,10 @@ class Scanner {
   bool harmony_numeric_literals_;
   // Whether we scan 'class', 'extends', 'static' and 'super' as keywords.
   bool harmony_classes_;
+  // Whether we scan TEMPLATE_SPAN and TEMPLATE_TAIL
+  bool harmony_templates_;
+  // Whether we allow \u{xxxxx}.
+  bool harmony_unicode_;
 };
 
 } }  // namespace v8::internal
index 598c5e6..b9cb6f3 100644 (file)
@@ -380,13 +380,14 @@ bool ScopeInfo::CopyContextLocalsToScopeObject(Handle<ScopeInfo> scope_info,
   for (int i = 0; i < local_count; ++i) {
     if (scope_info->LocalIsSynthetic(first_context_var + i)) continue;
     int context_index = Context::MIN_CONTEXT_SLOTS + i;
+    Handle<Object> value = Handle<Object>(context->get(context_index), isolate);
+    // Do not reflect variables under TDZ in scope object.
+    if (value->IsTheHole()) continue;
     RETURN_ON_EXCEPTION_VALUE(
-        isolate,
-        Runtime::DefineObjectProperty(
-            scope_object,
-            Handle<String>(String::cast(scope_info->get(i + start))),
-            Handle<Object>(context->get(context_index), isolate),
-            ::NONE),
+        isolate, Runtime::DefineObjectProperty(
+                     scope_object,
+                     Handle<String>(String::cast(scope_info->get(i + start))),
+                     value, ::NONE),
         false);
   }
   return true;
index 51c0065..39b67a8 100644 (file)
@@ -78,14 +78,14 @@ Scope::Scope(Scope* outer_scope, ScopeType scope_type,
       unresolved_(16, zone),
       decls_(4, zone),
       interface_(FLAG_harmony_modules &&
-                 (scope_type == MODULE_SCOPE || scope_type == GLOBAL_SCOPE)
+                 (scope_type == MODULE_SCOPE || scope_type == SCRIPT_SCOPE)
                      ? Interface::NewModule(zone) : NULL),
       already_resolved_(false),
       ast_value_factory_(ast_value_factory),
       zone_(zone) {
   SetDefaults(scope_type, outer_scope, Handle<ScopeInfo>::null());
-  // The outermost scope must be a global scope.
-  DCHECK(scope_type == GLOBAL_SCOPE || outer_scope != NULL);
+  // The outermost scope must be a script scope.
+  DCHECK(scope_type == SCRIPT_SCOPE || outer_scope != NULL);
   DCHECK(!HasIllegalRedeclaration());
 }
 
@@ -160,16 +160,20 @@ void Scope::SetDefaults(ScopeType scope_type,
   scope_inside_with_ = false;
   scope_contains_with_ = false;
   scope_calls_eval_ = false;
-  scope_uses_this_ = false;
   scope_uses_arguments_ = false;
+  scope_uses_super_property_ = false;
+  scope_uses_super_constructor_call_ = false;
+  scope_uses_this_ = false;
   asm_module_ = false;
   asm_function_ = outer_scope != NULL && outer_scope->asm_module_;
   // Inherit the strict mode from the parent scope.
   strict_mode_ = outer_scope != NULL ? outer_scope->strict_mode_ : SLOPPY;
   outer_scope_calls_sloppy_eval_ = false;
   inner_scope_calls_eval_ = false;
-  inner_scope_uses_this_ = false;
   inner_scope_uses_arguments_ = false;
+  inner_scope_uses_this_ = false;
+  inner_scope_uses_super_property_ = false;
+  inner_scope_uses_super_constructor_call_ = false;
   force_eager_compilation_ = false;
   force_context_allocation_ = (outer_scope != NULL && !is_function_scope())
       ? outer_scope->has_forced_context_allocation() : false;
@@ -188,7 +192,7 @@ void Scope::SetDefaults(ScopeType scope_type,
 }
 
 
-Scope* Scope::DeserializeScopeChain(Context* context, Scope* global_scope,
+Scope* Scope::DeserializeScopeChain(Context* context, Scope* script_scope,
                                     Zone* zone) {
   // Reconstruct the outer scope chain from a closure's context chain.
   Scope* current_scope = NULL;
@@ -199,7 +203,7 @@ Scope* Scope::DeserializeScopeChain(Context* context, Scope* global_scope,
       Scope* with_scope = new(zone) Scope(current_scope,
                                           WITH_SCOPE,
                                           Handle<ScopeInfo>::null(),
-                                          global_scope->ast_value_factory_,
+                                          script_scope->ast_value_factory_,
                                           zone);
       current_scope = with_scope;
       // All the inner scopes are inside a with.
@@ -207,26 +211,26 @@ Scope* Scope::DeserializeScopeChain(Context* context, Scope* global_scope,
       for (Scope* s = innermost_scope; s != NULL; s = s->outer_scope()) {
         s->scope_inside_with_ = true;
       }
-    } else if (context->IsGlobalContext()) {
+    } else if (context->IsScriptContext()) {
       ScopeInfo* scope_info = ScopeInfo::cast(context->extension());
       current_scope = new(zone) Scope(current_scope,
-                                      GLOBAL_SCOPE,
+                                      SCRIPT_SCOPE,
                                       Handle<ScopeInfo>(scope_info),
-                                      global_scope->ast_value_factory_,
+                                      script_scope->ast_value_factory_,
                                       zone);
     } else if (context->IsModuleContext()) {
       ScopeInfo* scope_info = ScopeInfo::cast(context->module()->scope_info());
       current_scope = new(zone) Scope(current_scope,
                                       MODULE_SCOPE,
                                       Handle<ScopeInfo>(scope_info),
-                                      global_scope->ast_value_factory_,
+                                      script_scope->ast_value_factory_,
                                       zone);
     } else if (context->IsFunctionContext()) {
       ScopeInfo* scope_info = context->closure()->shared()->scope_info();
       current_scope = new(zone) Scope(current_scope,
                                       FUNCTION_SCOPE,
                                       Handle<ScopeInfo>(scope_info),
-                                      global_scope->ast_value_factory_,
+                                      script_scope->ast_value_factory_,
                                       zone);
       if (scope_info->IsAsmFunction()) current_scope->asm_function_ = true;
       if (scope_info->IsAsmModule()) current_scope->asm_module_ = true;
@@ -235,15 +239,15 @@ Scope* Scope::DeserializeScopeChain(Context* context, Scope* global_scope,
       current_scope = new(zone) Scope(current_scope,
                                       BLOCK_SCOPE,
                                       Handle<ScopeInfo>(scope_info),
-                                      global_scope->ast_value_factory_,
+                                      script_scope->ast_value_factory_,
                                       zone);
     } else {
       DCHECK(context->IsCatchContext());
       String* name = String::cast(context->extension());
       current_scope = new (zone) Scope(
           current_scope,
-          global_scope->ast_value_factory_->GetString(Handle<String>(name)),
-          global_scope->ast_value_factory_, zone);
+          script_scope->ast_value_factory_->GetString(Handle<String>(name)),
+          script_scope->ast_value_factory_, zone);
     }
     if (contains_with) current_scope->RecordWithStatement();
     if (innermost_scope == NULL) innermost_scope = current_scope;
@@ -255,9 +259,9 @@ Scope* Scope::DeserializeScopeChain(Context* context, Scope* global_scope,
     context = context->previous();
   }
 
-  global_scope->AddInnerScope(current_scope);
-  global_scope->PropagateScopeInfo(false);
-  return (innermost_scope == NULL) ? global_scope : innermost_scope;
+  script_scope->AddInnerScope(current_scope);
+  script_scope->PropagateScopeInfo(false);
+  return (innermost_scope == NULL) ? script_scope : innermost_scope;
 }
 
 
@@ -268,14 +272,14 @@ bool Scope::Analyze(CompilationInfo* info) {
 
   // Traverse the scope tree up to the first unresolved scope or the global
   // scope and start scope resolution and variable allocation from that scope.
-  while (!top->is_global_scope() &&
+  while (!top->is_script_scope() &&
          !top->outer_scope()->already_resolved()) {
     top = top->outer_scope();
   }
 
   // Allocate the variables.
   {
-    AstNodeFactory<AstNullVisitor> ast_node_factory(info->ast_value_factory());
+    AstNodeFactory ast_node_factory(info->ast_value_factory());
     if (!top->AllocateVariables(info, &ast_node_factory)) return false;
   }
 
@@ -286,7 +290,7 @@ bool Scope::Analyze(CompilationInfo* info) {
     scope->Print();
   }
 
-  if (FLAG_harmony_modules && FLAG_print_interfaces && top->is_global_scope()) {
+  if (FLAG_harmony_modules && FLAG_print_interfaces && top->is_script_scope()) {
     PrintF("global : ");
     top->interface()->Print();
   }
@@ -309,9 +313,9 @@ void Scope::Initialize() {
   }
 
   // Declare convenience variables.
-  // Declare and allocate receiver (even for the global scope, and even
+  // Declare and allocate receiver (even for the script scope, and even
   // if naccesses_ == 0).
-  // NOTE: When loading parameters in the global scope, we must take
+  // NOTE: When loading parameters in the script scope, we must take
   // care not to access them as properties of the global object, but
   // instead load them directly from the stack. Currently, the only
   // such parameter is 'this' which is passed on the stack when
@@ -371,6 +375,13 @@ Scope* Scope::FinalizeBlockScope() {
     outer_scope()->unresolved_.Add(unresolved_[i], zone());
   }
 
+  // Propagate usage flags to outer scope.
+  if (uses_arguments()) outer_scope_->RecordArgumentsUsage();
+  if (uses_super_property()) outer_scope_->RecordSuperPropertyUsage();
+  if (uses_super_constructor_call())
+    outer_scope_->RecordSuperConstructorCallUsage();
+  if (uses_this()) outer_scope_->RecordThisUsage();
+
   return NULL;
 }
 
@@ -417,7 +428,7 @@ Variable* Scope::LookupLocal(const AstRawString* name) {
 
 
 Variable* Scope::LookupFunctionVar(const AstRawString* name,
-                                   AstNodeFactory<AstNullVisitor>* factory) {
+                                   AstNodeFactory* factory) {
   if (function_ != NULL && function_->proxy()->raw_name() == name) {
     return function_->proxy()->var();
   } else if (!scope_info_.is_null()) {
@@ -477,7 +488,7 @@ Variable* Scope::DeclareLocal(const AstRawString* name, VariableMode mode,
 
 
 Variable* Scope::DeclareDynamicGlobal(const AstRawString* name) {
-  DCHECK(is_global_scope());
+  DCHECK(is_script_scope());
   return variables_.Declare(this,
                             name,
                             DYNAMIC_GLOBAL,
@@ -636,8 +647,7 @@ void Scope::CollectStackAndContextLocals(ZoneList<Variable*>* stack_locals,
 }
 
 
-bool Scope::AllocateVariables(CompilationInfo* info,
-                              AstNodeFactory<AstNullVisitor>* factory) {
+bool Scope::AllocateVariables(CompilationInfo* info, AstNodeFactory* factory) {
   // 1) Propagate scope information.
   bool outer_scope_calls_sloppy_eval = false;
   if (outer_scope_ != NULL) {
@@ -648,7 +658,7 @@ bool Scope::AllocateVariables(CompilationInfo* info,
   PropagateScopeInfo(outer_scope_calls_sloppy_eval);
 
   // 2) Allocate module instances.
-  if (FLAG_harmony_modules && (is_global_scope() || is_module_scope())) {
+  if (FLAG_harmony_modules && (is_script_scope() || is_module_scope())) {
     DCHECK(num_modules_ == 0);
     AllocateModulesRecursively(this);
   }
@@ -728,9 +738,9 @@ int Scope::ContextChainLength(Scope* scope) {
 }
 
 
-Scope* Scope::GlobalScope() {
+Scope* Scope::ScriptScope() {
   Scope* scope = this;
-  while (!scope->is_global_scope()) {
+  while (!scope->is_script_scope()) {
     scope = scope->outer_scope();
   }
   return scope;
@@ -778,7 +788,7 @@ static const char* Header(ScopeType scope_type) {
     case EVAL_SCOPE: return "eval";
     case FUNCTION_SCOPE: return "function";
     case MODULE_SCOPE: return "module";
-    case GLOBAL_SCOPE: return "global";
+    case SCRIPT_SCOPE: return "global";
     case CATCH_SCOPE: return "catch";
     case BLOCK_SCOPE: return "block";
     case WITH_SCOPE: return "with";
@@ -889,12 +899,21 @@ void Scope::Print(int n) {
   if (scope_inside_with_) Indent(n1, "// scope inside 'with'\n");
   if (scope_contains_with_) Indent(n1, "// scope contains 'with'\n");
   if (scope_calls_eval_) Indent(n1, "// scope calls 'eval'\n");
-  if (scope_uses_this_) Indent(n1, "// scope uses 'this'\n");
   if (scope_uses_arguments_) Indent(n1, "// scope uses 'arguments'\n");
-  if (inner_scope_uses_this_) Indent(n1, "// inner scope uses 'this'\n");
+  if (scope_uses_super_property_)
+    Indent(n1, "// scope uses 'super' property\n");
+  if (scope_uses_super_constructor_call_)
+    Indent(n1, "// scope uses 'super' constructor\n");
+  if (scope_uses_this_) Indent(n1, "// scope uses 'this'\n");
   if (inner_scope_uses_arguments_) {
     Indent(n1, "// inner scope uses 'arguments'\n");
   }
+  if (inner_scope_uses_super_property_)
+    Indent(n1, "// inner scope uses 'super' property\n");
+  if (inner_scope_uses_super_constructor_call_) {
+    Indent(n1, "// inner scope uses 'super' constructor\n");
+  }
+  if (inner_scope_uses_this_) Indent(n1, "// inner scope uses 'this'\n");
   if (outer_scope_calls_sloppy_eval_) {
     Indent(n1, "// outer scope calls 'eval' in sloppy context\n");
   }
@@ -972,7 +991,7 @@ Variable* Scope::NonLocal(const AstRawString* name, VariableMode mode) {
 
 Variable* Scope::LookupRecursive(VariableProxy* proxy,
                                  BindingKind* binding_kind,
-                                 AstNodeFactory<AstNullVisitor>* factory) {
+                                 AstNodeFactory* factory) {
   DCHECK(binding_kind != NULL);
   if (already_resolved() && is_with_scope()) {
     // Short-cut: if the scope is deserialized from a scope info, variable
@@ -1005,7 +1024,7 @@ Variable* Scope::LookupRecursive(VariableProxy* proxy,
       var->ForceContextAllocation();
     }
   } else {
-    DCHECK(is_global_scope());
+    DCHECK(is_script_scope());
   }
 
   if (is_with_scope()) {
@@ -1034,10 +1053,9 @@ Variable* Scope::LookupRecursive(VariableProxy* proxy,
 }
 
 
-bool Scope::ResolveVariable(CompilationInfo* info,
-                            VariableProxy* proxy,
-                            AstNodeFactory<AstNullVisitor>* factory) {
-  DCHECK(info->global_scope()->is_global_scope());
+bool Scope::ResolveVariable(CompilationInfo* info, VariableProxy* proxy,
+                            AstNodeFactory* factory) {
+  DCHECK(info->script_scope()->is_script_scope());
 
   // If the proxy is already resolved there's nothing to do
   // (functions and consts may be resolved by the parser).
@@ -1069,7 +1087,7 @@ bool Scope::ResolveVariable(CompilationInfo* info,
 
     case UNBOUND:
       // No binding has been found. Declare a variable on the global object.
-      var = info->global_scope()->DeclareDynamicGlobal(proxy->raw_name());
+      var = info->script_scope()->DeclareDynamicGlobal(proxy->raw_name());
       break;
 
     case UNBOUND_EVAL_SHADOWED:
@@ -1086,21 +1104,6 @@ bool Scope::ResolveVariable(CompilationInfo* info,
   DCHECK(var != NULL);
   if (proxy->is_assigned()) var->set_maybe_assigned();
 
-  if (FLAG_harmony_scoping && strict_mode() == STRICT &&
-      var->is_const_mode() && proxy->is_assigned()) {
-    // Assignment to const. Throw a syntax error.
-    MessageLocation location(
-        info->script(), proxy->position(), proxy->position());
-    Isolate* isolate = info->isolate();
-    Factory* factory = isolate->factory();
-    Handle<JSArray> array = factory->NewJSArray(0);
-    Handle<Object> error;
-    MaybeHandle<Object> maybe_error =
-        factory->NewSyntaxError("harmony_const_assign", array);
-    if (maybe_error.ToHandle(&error)) isolate->Throw(*error, &location);
-    return false;
-  }
-
   if (FLAG_harmony_modules) {
     bool ok;
 #ifdef DEBUG
@@ -1143,10 +1146,9 @@ bool Scope::ResolveVariable(CompilationInfo* info,
 }
 
 
-bool Scope::ResolveVariablesRecursively(
-    CompilationInfo* info,
-    AstNodeFactory<AstNullVisitor>* factory) {
-  DCHECK(info->global_scope()->is_global_scope());
+bool Scope::ResolveVariablesRecursively(CompilationInfo* info,
+                                        AstNodeFactory* factory) {
+  DCHECK(info->script_scope()->is_script_scope());
 
   // Resolve unresolved variables for this scope.
   for (int i = 0; i < unresolved_.length(); i++) {
@@ -1177,15 +1179,23 @@ void Scope::PropagateScopeInfo(bool outer_scope_calls_sloppy_eval ) {
       inner_scope_calls_eval_ = true;
     }
     // If the inner scope is an arrow function, propagate the flags tracking
-    // usage of this/arguments, but do not propagate them out from normal
+    // usage of arguments/super/this, but do not propagate them out from normal
     // functions.
     if (!inner->is_function_scope() || inner->is_arrow_scope()) {
-      if (inner->scope_uses_this_ || inner->inner_scope_uses_this_) {
-        inner_scope_uses_this_ = true;
-      }
       if (inner->scope_uses_arguments_ || inner->inner_scope_uses_arguments_) {
         inner_scope_uses_arguments_ = true;
       }
+      if (inner->scope_uses_super_property_ ||
+          inner->inner_scope_uses_super_property_) {
+        inner_scope_uses_super_property_ = true;
+      }
+      if (inner->uses_super_constructor_call() ||
+          inner->inner_scope_uses_super_constructor_call_) {
+        inner_scope_uses_super_constructor_call_ = true;
+      }
+      if (inner->scope_uses_this_ || inner->inner_scope_uses_this_) {
+        inner_scope_uses_this_ = true;
+      }
     }
     if (inner->force_eager_compilation_) {
       force_eager_compilation_ = true;
@@ -1209,7 +1219,7 @@ bool Scope::MustAllocate(Variable* var) {
        is_catch_scope() ||
        is_block_scope() ||
        is_module_scope() ||
-       is_global_scope())) {
+       is_script_scope())) {
     var->set_is_used();
     if (scope_calls_eval_ || inner_scope_calls_eval_) var->set_maybe_assigned();
   }
@@ -1232,7 +1242,7 @@ bool Scope::MustAllocateInContext(Variable* var) {
   if (var->mode() == TEMPORARY) return false;
   if (var->mode() == INTERNAL) return true;
   if (is_catch_scope() || is_block_scope() || is_module_scope()) return true;
-  if (is_global_scope() && IsLexicalVariableMode(var->mode())) return true;
+  if (is_script_scope() && IsLexicalVariableMode(var->mode())) return true;
   return var->has_forced_context_allocation() ||
       scope_calls_eval_ ||
       inner_scope_calls_eval_ ||
index 25305de..8d79006 100644 (file)
@@ -80,7 +80,7 @@ class Scope: public ZoneObject {
   // doesn't re-allocate variables repeatedly.
   static bool Analyze(CompilationInfo* info);
 
-  static Scope* DeserializeScopeChain(Context* context, Scope* global_scope,
+  static Scope* DeserializeScopeChain(Context* context, Scope* script_scope,
                                       Zone* zone);
 
   // The scope name is only used for printing/debugging.
@@ -108,7 +108,7 @@ class Scope: public ZoneObject {
   // the name of named function literal is kept in an intermediate scope
   // in between this scope and the next outer scope.)
   Variable* LookupFunctionVar(const AstRawString* name,
-                              AstNodeFactory<AstNullVisitor>* factory);
+                              AstNodeFactory* factory);
 
   // Lookup a variable in this scope or outer scopes.
   // Returns the variable or NULL if not found.
@@ -135,14 +135,13 @@ class Scope: public ZoneObject {
                          Interface* interface = Interface::NewValue());
 
   // Declare an implicit global variable in this scope which must be a
-  // global scope.  The variable was introduced (possibly from an inner
+  // script scope.  The variable was introduced (possibly from an inner
   // scope) by a reference to an unresolved variable with no intervening
   // with statements or eval calls.
   Variable* DeclareDynamicGlobal(const AstRawString* name);
 
   // Create a new unresolved variable.
-  template<class Visitor>
-  VariableProxy* NewUnresolved(AstNodeFactory<Visitor>* factory,
+  VariableProxy* NewUnresolved(AstNodeFactory* factory,
                                const AstRawString* name,
                                Interface* interface = Interface::NewValue(),
                                int position = RelocInfo::kNoPosition) {
@@ -209,14 +208,22 @@ class Scope: public ZoneObject {
   void RecordWithStatement() { scope_contains_with_ = true; }
 
   // Inform the scope that the corresponding code contains an eval call.
-  void RecordEvalCall() { if (!is_global_scope()) scope_calls_eval_ = true; }
-
-  // Inform the scope that the corresponding code uses "this".
-  void RecordThisUsage() { scope_uses_this_ = true; }
+  void RecordEvalCall() { if (!is_script_scope()) scope_calls_eval_ = true; }
 
   // Inform the scope that the corresponding code uses "arguments".
   void RecordArgumentsUsage() { scope_uses_arguments_ = true; }
 
+  // Inform the scope that the corresponding code uses "super".
+  void RecordSuperPropertyUsage() { scope_uses_super_property_ = true; }
+
+  // Inform the scope that the corresponding code invokes "super" constructor.
+  void RecordSuperConstructorCallUsage() {
+    scope_uses_super_constructor_call_ = true;
+  }
+
+  // Inform the scope that the corresponding code uses "this".
+  void RecordThisUsage() { scope_uses_this_ = true; }
+
   // Set the strict mode flag (unless disabled by a global flag).
   void SetStrictMode(StrictMode strict_mode) { strict_mode_ = strict_mode; }
 
@@ -272,14 +279,14 @@ class Scope: public ZoneObject {
     return scope_type_ == FUNCTION_SCOPE || scope_type_ == ARROW_SCOPE;
   }
   bool is_module_scope() const { return scope_type_ == MODULE_SCOPE; }
-  bool is_global_scope() const { return scope_type_ == GLOBAL_SCOPE; }
+  bool is_script_scope() const { return scope_type_ == SCRIPT_SCOPE; }
   bool is_catch_scope() const { return scope_type_ == CATCH_SCOPE; }
   bool is_block_scope() const { return scope_type_ == BLOCK_SCOPE; }
   bool is_with_scope() const { return scope_type_ == WITH_SCOPE; }
   bool is_arrow_scope() const { return scope_type_ == ARROW_SCOPE; }
   bool is_declaration_scope() const {
     return is_eval_scope() || is_function_scope() ||
-        is_module_scope() || is_global_scope();
+        is_module_scope() || is_script_scope();
   }
   bool is_strict_eval_scope() const {
     return is_eval_scope() && strict_mode_ == STRICT;
@@ -301,14 +308,28 @@ class Scope: public ZoneObject {
   // Does this scope contain a with statement.
   bool contains_with() const { return scope_contains_with_; }
 
-  // Does this scope access "this".
-  bool uses_this() const { return scope_uses_this_; }
-  // Does any inner scope access "this".
-  bool inner_uses_this() const { return inner_scope_uses_this_; }
   // Does this scope access "arguments".
   bool uses_arguments() const { return scope_uses_arguments_; }
   // Does any inner scope access "arguments".
   bool inner_uses_arguments() const { return inner_scope_uses_arguments_; }
+  // Does this scope access "super" property (super.foo).
+  bool uses_super_property() const { return scope_uses_super_property_; }
+  // Does any inner scope access "super" property.
+  bool inner_uses_super_property() const {
+    return inner_scope_uses_super_property_;
+  }
+  // Does this scope calls "super" constructor.
+  bool uses_super_constructor_call() const {
+    return scope_uses_super_constructor_call_;
+  }
+  // Does  any inner scope calls "super" constructor.
+  bool inner_uses_super_constructor_call() const {
+    return inner_scope_uses_super_constructor_call_;
+  }
+  // Does this scope access "this".
+  bool uses_this() const { return scope_uses_this_; }
+  // Does any inner scope access "this".
+  bool inner_uses_this() const { return inner_scope_uses_this_; }
 
   // ---------------------------------------------------------------------------
   // Accessors.
@@ -372,7 +393,7 @@ class Scope: public ZoneObject {
   int StackLocalCount() const;
   int ContextLocalCount() const;
 
-  // For global scopes, the number of module literals (including nested ones).
+  // For script scopes, the number of module literals (including nested ones).
   int num_modules() const { return num_modules_; }
 
   // For module scopes, the host scope's internal variable binding this module.
@@ -396,8 +417,10 @@ class Scope: public ZoneObject {
   // The number of contexts between this and scope; zero if this == scope.
   int ContextChainLength(Scope* scope);
 
-  // Find the innermost global scope.
-  Scope* GlobalScope();
+  // Find the script scope.
+  // Used in modules implemenetation to find hosting scope.
+  // TODO(rossberg): is this needed?
+  Scope* ScriptScope();
 
   // Find the first function, global, or eval scope.  This is the scope
   // where var declarations will be hoisted to in the implementation.
@@ -449,7 +472,7 @@ class Scope: public ZoneObject {
 
   // The variables declared in this scope:
   //
-  // All user-declared variables (incl. parameters).  For global scopes
+  // All user-declared variables (incl. parameters).  For script scopes
   // variables may be implicitly 'declared' by being used (possibly in
   // an inner scope) with no intervening with statements or eval calls.
   VariableMap variables_;
@@ -486,10 +509,14 @@ class Scope: public ZoneObject {
   // This scope or a nested catch scope or with scope contain an 'eval' call. At
   // the 'eval' call site this scope is the declaration scope.
   bool scope_calls_eval_;
-  // This scope uses "this".
-  bool scope_uses_this_;
   // This scope uses "arguments".
   bool scope_uses_arguments_;
+  // This scope uses "super" property ('super.foo').
+  bool scope_uses_super_property_;
+  // This scope uses "super" constructor ('super(..)').
+  bool scope_uses_super_constructor_call_;
+  // This scope uses "this".
+  bool scope_uses_this_;
   // This scope contains an "use asm" annotation.
   bool asm_module_;
   // This scope's outer context is an asm module.
@@ -503,8 +530,10 @@ class Scope: public ZoneObject {
   // Computed via PropagateScopeInfo.
   bool outer_scope_calls_sloppy_eval_;
   bool inner_scope_calls_eval_;
-  bool inner_scope_uses_this_;
   bool inner_scope_uses_arguments_;
+  bool inner_scope_uses_super_property_;
+  bool inner_scope_uses_super_constructor_call_;
+  bool inner_scope_uses_this_;
   bool force_eager_compilation_;
   bool force_context_allocation_;
 
@@ -555,14 +584,14 @@ class Scope: public ZoneObject {
     // The variable reference could not be statically resolved to any binding
     // and thus should be considered referencing a global variable. NULL is
     // returned. The variable reference is not inside any 'with' statement and
-    // no scope between the reference scope (inclusive) and global scope
+    // no scope between the reference scope (inclusive) and script scope
     // (exclusive) makes a sloppy 'eval' call.
     UNBOUND,
 
     // The variable reference could not be statically resolved to any binding
     // NULL is returned. The variable reference is not inside any 'with'
     // statement, but some scope between the reference scope (inclusive) and
-    // global scope (exclusive) makes a sloppy 'eval' call, that might
+    // script scope (exclusive) makes a sloppy 'eval' call, that might
     // possibly introduce a variable binding. Thus the reference should be
     // considered referencing a global variable unless it is shadowed by an
     // 'eval' introduced binding.
@@ -582,16 +611,14 @@ class Scope: public ZoneObject {
   // Lookup a variable reference given by name recursively starting with this
   // scope. If the code is executed because of a call to 'eval', the context
   // parameter should be set to the calling context of 'eval'.
-  Variable* LookupRecursive(VariableProxy* proxy,
-                            BindingKind* binding_kind,
-                            AstNodeFactory<AstNullVisitor>* factory);
+  Variable* LookupRecursive(VariableProxy* proxy, BindingKind* binding_kind,
+                            AstNodeFactory* factory);
   MUST_USE_RESULT
-  bool ResolveVariable(CompilationInfo* info,
-                       VariableProxy* proxy,
-                       AstNodeFactory<AstNullVisitor>* factory);
+  bool ResolveVariable(CompilationInfo* info, VariableProxy* proxy,
+                       AstNodeFactory* factory);
   MUST_USE_RESULT
   bool ResolveVariablesRecursively(CompilationInfo* info,
-                                   AstNodeFactory<AstNullVisitor>* factory);
+                                   AstNodeFactory* factory);
 
   // Scope analysis.
   void PropagateScopeInfo(bool outer_scope_calls_sloppy_eval);
@@ -620,8 +647,7 @@ class Scope: public ZoneObject {
   // parameter is the context in which eval was called.  In all other
   // cases the context parameter is an empty handle.
   MUST_USE_RESULT
-  bool AllocateVariables(CompilationInfo* info,
-                         AstNodeFactory<AstNullVisitor>* factory);
+  bool AllocateVariables(CompilationInfo* info, AstNodeFactory* factory);
 
  private:
   // Construct a scope based on the scope info.
index 6347943..01c55a1 100644 (file)
@@ -365,15 +365,11 @@ void ExternalReferenceTable::PopulateTable(Isolate* isolate) {
   }
 
   // Accessors
-#define ACCESSOR_INFO_DECLARATION(name) \
-  Add(FUNCTION_ADDR(&Accessors::name##Getter), \
-      ACCESSOR, \
-      Accessors::k##name##Getter, \
-      "Accessors::" #name "Getter"); \
-  Add(FUNCTION_ADDR(&Accessors::name##Setter), \
-      ACCESSOR, \
-      Accessors::k##name##Setter, \
-      "Accessors::" #name "Setter");
+#define ACCESSOR_INFO_DECLARATION(name)                          \
+  Add(FUNCTION_ADDR(&Accessors::name##Getter), ACCESSOR_CODE,    \
+      Accessors::k##name##Getter, "Accessors::" #name "Getter"); \
+  Add(FUNCTION_ADDR(&Accessors::name##Setter), ACCESSOR_CODE,    \
+      Accessors::k##name##Setter, "Accessors::" #name "Setter");
   ACCESSOR_INFO_LIST(ACCESSOR_INFO_DECLARATION)
 #undef ACCESSOR_INFO_DECLARATION
 
@@ -491,7 +487,9 @@ RootIndexMap::RootIndexMap(Isolate* isolate) {
       if (LookupEntry(map_, heap_object, false) != NULL) {
         // Some root values are initialized to the empty FixedArray();
         // Do not add them to the map.
-        DCHECK_EQ(isolate->heap()->empty_fixed_array(), heap_object);
+        // TODO(yangguo): This assert is not true. Some roots like
+        // instanceof_cache_answer can be e.g. null.
+        // DCHECK_EQ(isolate->heap()->empty_fixed_array(), heap_object);
       } else {
         SetValue(LookupEntry(map_, heap_object, true), i);
       }
@@ -611,12 +609,16 @@ class CodeAddressMap: public CodeEventLogger {
 };
 
 
-Deserializer::Deserializer(SnapshotByteSource* source)
-    : isolate_(NULL),
-      attached_objects_(NULL),
-      source_(source),
-      external_reference_decoder_(NULL),
-      deserialized_large_objects_(0) {
+void Deserializer::DecodeReservation(
+    Vector<const SerializedData::Reservation> res) {
+  DCHECK_EQ(0, reservations_[NEW_SPACE].length());
+  STATIC_ASSERT(NEW_SPACE == 0);
+  int current_space = NEW_SPACE;
+  for (const auto& r : res) {
+    reservations_[current_space].Add({r.chunk_size(), NULL, NULL});
+    if (r.is_last()) current_space++;
+  }
+  DCHECK_EQ(kNumberOfSpaces, current_space);
   for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) current_chunk_[i] = 0;
 }
 
@@ -716,7 +718,7 @@ void Deserializer::DeserializePartial(Isolate* isolate, Object** root,
 
 Deserializer::~Deserializer() {
   // TODO(svenpanne) Re-enable this assertion when v8 initialization is fixed.
-  // DCHECK(source_->AtEOF());
+  // DCHECK(source_.AtEOF());
   if (external_reference_decoder_) {
     delete external_reference_decoder_;
     external_reference_decoder_ = NULL;
@@ -752,7 +754,7 @@ class StringTableInsertionKey : public HashTableKey {
     DCHECK(string->IsInternalizedString());
   }
 
-  virtual bool IsMatch(Object* string) OVERRIDE {
+  bool IsMatch(Object* string) OVERRIDE {
     // We know that all entries in a hash table had their hash keys created.
     // Use that knowledge to have fast failure.
     if (hash_ != HashForObject(string)) return false;
@@ -760,9 +762,9 @@ class StringTableInsertionKey : public HashTableKey {
     return string_->SlowEquals(String::cast(string));
   }
 
-  virtual uint32_t Hash() OVERRIDE { return hash_; }
+  uint32_t Hash() OVERRIDE { return hash_; }
 
-  virtual uint32_t HashForObject(Object* key) OVERRIDE {
+  uint32_t HashForObject(Object* key) OVERRIDE {
     return String::cast(key)->Hash();
   }
 
@@ -794,10 +796,24 @@ HeapObject* Deserializer::ProcessNewObjectFromSerializedCode(HeapObject* obj) {
 }
 
 
-Object* Deserializer::ProcessBackRefInSerializedCode(Object* obj) {
-  if (obj->IsInternalizedString()) {
-    return String::cast(obj)->GetForwardedInternalizedString();
-  }
+HeapObject* Deserializer::GetBackReferencedObject(int space) {
+  HeapObject* obj;
+  if (space == LO_SPACE) {
+    uint32_t index = source_.GetInt();
+    obj = deserialized_large_objects_[index];
+  } else {
+    BackReference back_reference(source_.GetInt());
+    DCHECK(space < kNumberOfPreallocatedSpaces);
+    uint32_t chunk_index = back_reference.chunk_index();
+    DCHECK_LE(chunk_index, current_chunk_[space]);
+    uint32_t chunk_offset = back_reference.chunk_offset();
+    obj = HeapObject::FromAddress(reservations_[space][chunk_index].start +
+                                  chunk_offset);
+  }
+  if (deserializing_user_code() && obj->IsInternalizedString()) {
+    obj = String::cast(obj)->GetForwardedInternalizedString();
+  }
+  hot_objects_.Add(obj);
   return obj;
 }
 
@@ -807,16 +823,32 @@ Object* Deserializer::ProcessBackRefInSerializedCode(Object* obj) {
 // The reason for this strange interface is that otherwise the object is
 // written very late, which means the FreeSpace map is not set up by the
 // time we need to use it to mark the space at the end of a page free.
-void Deserializer::ReadObject(int space_number,
-                              Object** write_back) {
-  int size = source_->GetInt() << kObjectAlignmentBits;
-  Address address = Allocate(space_number, size);
-  HeapObject* obj = HeapObject::FromAddress(address);
+void Deserializer::ReadObject(int space_number, Object** write_back) {
+  Address address;
+  HeapObject* obj;
+  int next_int = source_.GetInt();
+
+  bool double_align = false;
+#ifndef V8_HOST_ARCH_64_BIT
+  double_align = next_int == kDoubleAlignmentSentinel;
+  if (double_align) next_int = source_.GetInt();
+#endif
+
+  DCHECK_NE(kDoubleAlignmentSentinel, next_int);
+  int size = next_int << kObjectAlignmentBits;
+  int reserved_size = size + (double_align ? kPointerSize : 0);
+  address = Allocate(space_number, reserved_size);
+  obj = HeapObject::FromAddress(address);
+  if (double_align) {
+    obj = isolate_->heap()->DoubleAlignForDeserialization(obj, reserved_size);
+    address = obj->address();
+  }
+
   isolate_->heap()->OnAllocationEvent(obj, size);
   Object** current = reinterpret_cast<Object**>(address);
   Object** limit = current + (size >> kPointerSizeLog2);
   if (FLAG_log_snapshot_positions) {
-    LOG(isolate_, SnapshotPositionEvent(address, source_->position()));
+    LOG(isolate_, SnapshotPositionEvent(address, source_.position()));
   }
   ReadData(current, limit, space_number, address);
 
@@ -844,8 +876,8 @@ void Deserializer::ReadObject(int space_number,
 // to do is to bump up the pointer for each space in the reserved
 // space. This is also used for fixing back references.
 // We may have to split up the pre-allocation into several chunks
-// because it would not fit onto a single page, we have to keep track
-// of when to move to the next chunk.
+// because it would not fit onto a single page. We do not have to keep
+// track of when to move to the next chunk. An opcode will signal this.
 // Since multiple large objects cannot be folded into one large object
 // space allocation, we have to do an actual allocation when deserializing
 // each large object. Instead of tracking offset for back references, we
@@ -854,7 +886,7 @@ Address Deserializer::Allocate(int space_index, int size) {
   if (space_index == LO_SPACE) {
     AlwaysAllocateScope scope(isolate_);
     LargeObjectSpace* lo_space = isolate_->heap()->lo_space();
-    Executability exec = static_cast<Executability>(source_->Get());
+    Executability exec = static_cast<Executability>(source_.Get());
     AllocationResult result = lo_space->AllocateRaw(size, exec);
     HeapObject* obj = HeapObject::cast(result.ToObjectChecked());
     deserialized_large_objects_.Add(obj);
@@ -863,20 +895,13 @@ Address Deserializer::Allocate(int space_index, int size) {
     DCHECK(space_index < kNumberOfPreallocatedSpaces);
     Address address = high_water_[space_index];
     DCHECK_NE(NULL, address);
+    high_water_[space_index] += size;
+#ifdef DEBUG
+    // Assert that the current reserved chunk is still big enough.
     const Heap::Reservation& reservation = reservations_[space_index];
     int chunk_index = current_chunk_[space_index];
-    if (address + size > reservation[chunk_index].end) {
-      // The last chunk size matches exactly the already deserialized data.
-      DCHECK_EQ(address, reservation[chunk_index].end);
-      // Move to next reserved chunk.
-      chunk_index = ++current_chunk_[space_index];
-      DCHECK_LT(chunk_index, reservation.length());
-      // Prepare for next allocation in the next chunk.
-      address = reservation[chunk_index].start;
-    } else {
-      high_water_[space_index] = address + size;
-    }
-    high_water_[space_index] = address + size;
+    CHECK_LE(high_water_[space_index], reservation[chunk_index].end);
+#endif
     return address;
   }
 }
@@ -895,7 +920,7 @@ void Deserializer::ReadData(Object** current, Object** limit, int source_space,
                                source_space != CODE_SPACE &&
                                source_space != OLD_DATA_SPACE);
   while (current < limit) {
-    int data = source_->Get();
+    byte data = source_.Get();
     switch (data) {
 #define CASE_STATEMENT(where, how, within, space_number) \
   case where + how + within + space_number:              \
@@ -919,29 +944,26 @@ void Deserializer::ReadData(Object** current, Object** limit, int source_space,
       if (where == kNewObject) {                                               \
         ReadObject(space_number, &new_object);                                 \
       } else if (where == kRootArray) {                                        \
-        int root_id = source_->GetInt();                                       \
+        int root_id = source_.GetInt();                                        \
         new_object = isolate->heap()->roots_array_start()[root_id];            \
         emit_write_barrier = isolate->heap()->InNewSpace(new_object);          \
       } else if (where == kPartialSnapshotCache) {                             \
-        int cache_index = source_->GetInt();                                   \
+        int cache_index = source_.GetInt();                                    \
         new_object = isolate->serialize_partial_snapshot_cache()[cache_index]; \
         emit_write_barrier = isolate->heap()->InNewSpace(new_object);          \
       } else if (where == kExternalReference) {                                \
-        int skip = source_->GetInt();                                          \
+        int skip = source_.GetInt();                                           \
         current = reinterpret_cast<Object**>(                                  \
             reinterpret_cast<Address>(current) + skip);                        \
-        int reference_id = source_->GetInt();                                  \
+        int reference_id = source_.GetInt();                                   \
         Address address = external_reference_decoder_->Decode(reference_id);   \
         new_object = reinterpret_cast<Object*>(address);                       \
       } else if (where == kBackref) {                                          \
         emit_write_barrier = (space_number == NEW_SPACE);                      \
         new_object = GetBackReferencedObject(data & kSpaceMask);               \
-        if (deserializing_user_code()) {                                       \
-          new_object = ProcessBackRefInSerializedCode(new_object);             \
-        }                                                                      \
       } else if (where == kBuiltin) {                                          \
         DCHECK(deserializing_user_code());                                     \
-        int builtin_id = source_->GetInt();                                    \
+        int builtin_id = source_.GetInt();                                     \
         DCHECK_LE(0, builtin_id);                                              \
         DCHECK_LT(builtin_id, Builtins::builtin_count);                        \
         Builtins::Name name = static_cast<Builtins::Name>(builtin_id);         \
@@ -949,19 +971,16 @@ void Deserializer::ReadData(Object** current, Object** limit, int source_space,
         emit_write_barrier = false;                                            \
       } else if (where == kAttachedReference) {                                \
         DCHECK(deserializing_user_code());                                     \
-        int index = source_->GetInt();                                         \
+        int index = source_.GetInt();                                          \
         new_object = *attached_objects_->at(index);                            \
         emit_write_barrier = isolate->heap()->InNewSpace(new_object);          \
       } else {                                                                 \
         DCHECK(where == kBackrefWithSkip);                                     \
-        int skip = source_->GetInt();                                          \
+        int skip = source_.GetInt();                                           \
         current = reinterpret_cast<Object**>(                                  \
             reinterpret_cast<Address>(current) + skip);                        \
         emit_write_barrier = (space_number == NEW_SPACE);                      \
         new_object = GetBackReferencedObject(data & kSpaceMask);               \
-        if (deserializing_user_code()) {                                       \
-          new_object = ProcessBackRefInSerializedCode(new_object);             \
-        }                                                                      \
       }                                                                        \
       if (within == kInnerPointer) {                                           \
         if (space_number != CODE_SPACE || new_object->IsCode()) {              \
@@ -1061,23 +1080,22 @@ void Deserializer::ReadData(Object** current, Object** limit, int source_space,
 
       // We generate 15 cases and bodies that process special tags that combine
       // the raw data tag and the length into one byte.
-#define RAW_CASE(index)                                                      \
-      case kRawData + index: {                                               \
-        byte* raw_data_out = reinterpret_cast<byte*>(current);               \
-        source_->CopyRaw(raw_data_out, index * kPointerSize);                \
-        current =                                                            \
-            reinterpret_cast<Object**>(raw_data_out + index * kPointerSize); \
-        break;                                                               \
-      }
+#define RAW_CASE(index)                                                        \
+  case kRawData + index: {                                                     \
+    byte* raw_data_out = reinterpret_cast<byte*>(current);                     \
+    source_.CopyRaw(raw_data_out, index* kPointerSize);                        \
+    current = reinterpret_cast<Object**>(raw_data_out + index * kPointerSize); \
+    break;                                                                     \
+  }
       COMMON_RAW_LENGTHS(RAW_CASE)
 #undef RAW_CASE
 
       // Deserialize a chunk of raw data that doesn't have one of the popular
       // lengths.
       case kRawData: {
-        int size = source_->GetInt();
+        int size = source_.GetInt();
         byte* raw_data_out = reinterpret_cast<byte*>(current);
-        source_->CopyRaw(raw_data_out, size);
+        source_.CopyRaw(raw_data_out, size);
         break;
       }
 
@@ -1093,7 +1111,7 @@ void Deserializer::ReadData(Object** current, Object** limit, int source_space,
       SIXTEEN_CASES(kRootArrayConstants + kHasSkipDistance)
       SIXTEEN_CASES(kRootArrayConstants + kHasSkipDistance + 16) {
         int root_id = RootArrayConstantFromByteCode(data);
-        int skip = source_->GetInt();
+        int skip = source_.GetInt();
         current = reinterpret_cast<Object**>(
             reinterpret_cast<intptr_t>(current) + skip);
         Object* object = isolate->heap()->roots_array_start()[root_id];
@@ -1102,8 +1120,8 @@ void Deserializer::ReadData(Object** current, Object** limit, int source_space,
         break;
       }
 
-      case kRepeat: {
-        int repeats = source_->GetInt();
+      case kVariableRepeat: {
+        int repeats = source_.GetInt();
         Object* object = current[-1];
         DCHECK(!isolate->heap()->InNewSpace(object));
         for (int i = 0; i < repeats; i++) current[i] = object;
@@ -1113,11 +1131,13 @@ void Deserializer::ReadData(Object** current, Object** limit, int source_space,
 
       STATIC_ASSERT(kRootArrayNumberOfConstantEncodings ==
                     Heap::kOldSpaceRoots);
-      STATIC_ASSERT(kMaxRepeats == 13);
-      case kConstantRepeat:
-      FOUR_CASES(kConstantRepeat + 1)
-      FOUR_CASES(kConstantRepeat + 5)
-      FOUR_CASES(kConstantRepeat + 9) {
+      STATIC_ASSERT(kMaxFixedRepeats == 15);
+      FOUR_CASES(kFixedRepeat)
+      FOUR_CASES(kFixedRepeat + 4)
+      FOUR_CASES(kFixedRepeat + 8)
+      case kFixedRepeat + 12:
+      case kFixedRepeat + 13:
+      case kFixedRepeat + 14: {
         int repeats = RepeatsForCode(data);
         Object* object = current[-1];
         DCHECK(!isolate->heap()->InNewSpace(object));
@@ -1221,15 +1241,15 @@ void Deserializer::ReadData(Object** current, Object** limit, int source_space,
 #undef ALL_SPACES
 
       case kSkip: {
-        int size = source_->GetInt();
+        int size = source_.GetInt();
         current = reinterpret_cast<Object**>(
             reinterpret_cast<intptr_t>(current) + size);
         break;
       }
 
       case kNativesStringResource: {
-        int index = source_->Get();
-        Vector<const char> source_vector = Natives::GetRawScriptSource(index);
+        int index = source_.Get();
+        Vector<const char> source_vector = Natives::GetScriptSource(index);
         NativesExternalStringResource* resource =
             new NativesExternalStringResource(isolate->bootstrapper(),
                                               source_vector.start(),
@@ -1238,6 +1258,41 @@ void Deserializer::ReadData(Object** current, Object** limit, int source_space,
         break;
       }
 
+      case kNextChunk: {
+        int space = source_.Get();
+        DCHECK(space < kNumberOfPreallocatedSpaces);
+        int chunk_index = current_chunk_[space];
+        const Heap::Reservation& reservation = reservations_[space];
+        // Make sure the current chunk is indeed exhausted.
+        CHECK_EQ(reservation[chunk_index].end, high_water_[space]);
+        // Move to next reserved chunk.
+        chunk_index = ++current_chunk_[space];
+        DCHECK_LT(chunk_index, reservation.length());
+        high_water_[space] = reservation[chunk_index].start;
+        break;
+      }
+
+      FOUR_CASES(kHotObjectWithSkip)
+      FOUR_CASES(kHotObjectWithSkip + 4) {
+        int skip = source_.GetInt();
+        current = reinterpret_cast<Object**>(
+            reinterpret_cast<Address>(current) + skip);
+        // Fall through.
+      }
+      FOUR_CASES(kHotObject)
+      FOUR_CASES(kHotObject + 4) {
+        int index = data & kHotObjectIndexMask;
+        *current = hot_objects_.Get(index);
+        if (write_barrier_needed && isolate->heap()->InNewSpace(*current)) {
+          Address current_address = reinterpret_cast<Address>(current);
+          isolate->heap()->RecordWrite(
+              current_object_address,
+              static_cast<int>(current_address - current_object_address));
+        }
+        current++;
+        break;
+      }
+
       case kSynchronize: {
         // If we get here then that indicates that you have a mismatch between
         // the number of GC roots when serializing and deserializing.
@@ -1301,7 +1356,7 @@ void StartupSerializer::VisitPointers(Object** start, Object** end) {
       sink_->Put(kSkip, "Skip");
       sink_->PutInt(kPointerSize, "SkipOneWord");
     } else if ((*current)->IsSmi()) {
-      sink_->Put(kRawData + 1, "Smi");
+      sink_->Put(kOnePointerRawData, "Smi");
       for (int i = 0; i < kPointerSize; i++) {
         sink_->Put(reinterpret_cast<byte*>(current)[i], "Byte");
       }
@@ -1329,7 +1384,7 @@ bool Serializer::ShouldBeSkipped(Object** current) {
 void Serializer::VisitPointers(Object** start, Object** end) {
   for (Object** current = start; current < end; current++) {
     if ((*current)->IsSmi()) {
-      sink_->Put(kRawData + 1, "Smi");
+      sink_->Put(kOnePointerRawData, "Smi");
       for (int i = 0; i < kPointerSize; i++) {
         sink_->Put(reinterpret_cast<byte*>(current)[i], "Byte");
       }
@@ -1340,15 +1395,21 @@ void Serializer::VisitPointers(Object** start, Object** end) {
 }
 
 
-void Serializer::FinalizeAllocation() {
+void Serializer::EncodeReservations(
+    List<SerializedData::Reservation>* out) const {
   for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
-    // Complete the last pending chunk and if there are no completed chunks,
-    // make sure there is at least one empty chunk.
+    for (int j = 0; j < completed_chunks_[i].length(); j++) {
+      out->Add(SerializedData::Reservation(completed_chunks_[i][j]));
+    }
+
     if (pending_chunk_[i] > 0 || completed_chunks_[i].length() == 0) {
-      completed_chunks_[i].Add(pending_chunk_[i]);
-      pending_chunk_[i] = 0;
+      out->Add(SerializedData::Reservation(pending_chunk_[i]));
     }
+    out->last().mark_as_last();
   }
+
+  out->Add(SerializedData::Reservation(large_objects_total_size_));
+  out->last().mark_as_last();
 }
 
 
@@ -1403,24 +1464,61 @@ int PartialSerializer::PartialSnapshotCacheIndex(HeapObject* heap_object) {
 }
 
 
-// Encode the location of an already deserialized object in order to write its
-// location into a later object.  We can encode the location as an offset from
-// the start of the deserialized objects or as an offset backwards from the
-// current allocation pointer.
-void Serializer::SerializeBackReference(BackReference back_reference,
-                                        HowToCode how_to_code,
-                                        WhereToPoint where_to_point, int skip) {
-  AllocationSpace space = back_reference.space();
-  if (skip == 0) {
-    sink_->Put(kBackref + how_to_code + where_to_point + space, "BackRefSer");
-  } else {
-    sink_->Put(kBackrefWithSkip + how_to_code + where_to_point + space,
-               "BackRefSerWithSkip");
-    sink_->PutInt(skip, "BackRefSkipDistance");
+bool Serializer::SerializeKnownObject(HeapObject* obj, HowToCode how_to_code,
+                                      WhereToPoint where_to_point, int skip) {
+  if (how_to_code == kPlain && where_to_point == kStartOfObject) {
+    // Encode a reference to a hot object by its index in the working set.
+    int index = hot_objects_.Find(obj);
+    if (index != HotObjectsList::kNotFound) {
+      DCHECK(index >= 0 && index <= kMaxHotObjectIndex);
+      if (FLAG_trace_serializer) {
+        PrintF(" Encoding hot object %d:", index);
+        obj->ShortPrint();
+        PrintF("\n");
+      }
+      if (skip != 0) {
+        sink_->Put(kHotObjectWithSkip + index, "HotObjectWithSkip");
+        sink_->PutInt(skip, "HotObjectSkipDistance");
+      } else {
+        sink_->Put(kHotObject + index, "HotObject");
+      }
+      return true;
+    }
   }
+  BackReference back_reference = back_reference_map_.Lookup(obj);
+  if (back_reference.is_valid()) {
+    // Encode the location of an already deserialized object in order to write
+    // its location into a later object.  We can encode the location as an
+    // offset fromthe start of the deserialized objects or as an offset
+    // backwards from thecurrent allocation pointer.
+    if (back_reference.is_source()) {
+      FlushSkip(skip);
+      if (FLAG_trace_serializer) PrintF(" Encoding source object\n");
+      DCHECK(how_to_code == kPlain && where_to_point == kStartOfObject);
+      sink_->Put(kAttachedReference + how_to_code + where_to_point, "Source");
+      sink_->PutInt(kSourceObjectReference, "kSourceObjectIndex");
+    } else {
+      if (FLAG_trace_serializer) {
+        PrintF(" Encoding back reference to: ");
+        obj->ShortPrint();
+        PrintF("\n");
+      }
 
-  sink_->PutInt(back_reference.reference(),
-                (space == LO_SPACE) ? "large object index" : "allocation");
+      AllocationSpace space = back_reference.space();
+      if (skip == 0) {
+        sink_->Put(kBackref + how_to_code + where_to_point + space, "BackRef");
+      } else {
+        sink_->Put(kBackrefWithSkip + how_to_code + where_to_point + space,
+                   "BackRefWithSkip");
+        sink_->PutInt(skip, "BackRefSkipDistance");
+      }
+      sink_->PutInt(back_reference.reference(), "BackRefValue");
+
+      hot_objects_.Add(obj);
+    }
+    return true;
+  }
+  return false;
 }
 
 
@@ -1437,16 +1535,9 @@ void StartupSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
     return;
   }
 
-  BackReference back_reference = back_reference_map_.Lookup(obj);
-  if (back_reference.is_valid()) {
-    SerializeBackReference(back_reference, how_to_code, where_to_point, skip);
-    return;
-  }
+  if (SerializeKnownObject(obj, how_to_code, where_to_point, skip)) return;
 
-  if (skip != 0) {
-    sink_->Put(kSkip, "FlushPendingSkip");
-    sink_->PutInt(skip, "SkipDistance");
-  }
+  FlushSkip(skip);
 
   // Object has not yet been serialized.  Serialize it here.
   ObjectSerializer object_serializer(this, obj, sink_, how_to_code,
@@ -1456,7 +1547,7 @@ void StartupSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
 
 
 void StartupSerializer::SerializeWeakReferences() {
-  // This phase comes right after the partial serialization (of the snapshot).
+  // This phase comes right after the serialization (of the snapshot).
   // After we have done the partial serialization the partial snapshot cache
   // will contain some references needed to decode the partial snapshot.  We
   // add one entry with 'undefined' which is the sentinel that the deserializer
@@ -1473,6 +1564,12 @@ void Serializer::PutRoot(int root_index,
                          SerializerDeserializer::HowToCode how_to_code,
                          SerializerDeserializer::WhereToPoint where_to_point,
                          int skip) {
+  if (FLAG_trace_serializer) {
+    PrintF(" Encoding root %d:", root_index);
+    object->ShortPrint();
+    PrintF("\n");
+  }
+
   if (how_to_code == kPlain &&
       where_to_point == kStartOfObject &&
       root_index < kRootArrayNumberOfConstantEncodings &&
@@ -1486,10 +1583,7 @@ void Serializer::PutRoot(int root_index,
       sink_->PutInt(skip, "SkipInPutRoot");
     }
   } else {
-    if (skip != 0) {
-      sink_->Put(kSkip, "SkipFromPutRoot");
-      sink_->PutInt(skip, "SkipFromPutRootDistance");
-    }
+    FlushSkip(skip);
     sink_->Put(kRootArray + how_to_code + where_to_point, "RootSerialization");
     sink_->PutInt(root_index, "root_index");
   }
@@ -1511,10 +1605,7 @@ void PartialSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
   }
 
   if (ShouldBeInThePartialSnapshotCache(obj)) {
-    if (skip != 0) {
-      sink_->Put(kSkip, "SkipFromSerializeObject");
-      sink_->PutInt(skip, "SkipDistanceFromSerializeObject");
-    }
+    FlushSkip(skip);
 
     int cache_index = PartialSnapshotCacheIndex(obj);
     sink_->Put(kPartialSnapshotCache + how_to_code + where_to_point,
@@ -1531,16 +1622,10 @@ void PartialSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
   // either in the root table or in the partial snapshot cache.
   DCHECK(!obj->IsInternalizedString());
 
-  BackReference back_reference = back_reference_map_.Lookup(obj);
-  if (back_reference.is_valid()) {
-    SerializeBackReference(back_reference, how_to_code, where_to_point, skip);
-    return;
-  }
+  if (SerializeKnownObject(obj, how_to_code, where_to_point, skip)) return;
+
+  FlushSkip(skip);
 
-  if (skip != 0) {
-    sink_->Put(kSkip, "SkipFromSerializeObject");
-    sink_->PutInt(skip, "SkipDistanceFromSerializeObject");
-  }
   // Object has not yet been serialized.  Serialize it here.
   ObjectSerializer serializer(this, obj, sink_, how_to_code, where_to_point);
   serializer.Serialize();
@@ -1549,10 +1634,6 @@ void PartialSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
 
 void Serializer::ObjectSerializer::SerializePrologue(AllocationSpace space,
                                                      int size, Map* map) {
-  sink_->Put(kNewObject + reference_representation_ + space,
-             "ObjectSerialization");
-  sink_->PutInt(size >> kObjectAlignmentBits, "Size in words");
-
   if (serializer_->code_address_map_) {
     const char* code_name =
         serializer_->code_address_map_->Lookup(object_->address());
@@ -1562,9 +1643,11 @@ void Serializer::ObjectSerializer::SerializePrologue(AllocationSpace space,
         SnapshotPositionEvent(object_->address(), sink_->Position()));
   }
 
-  // Mark this object as already serialized.
   BackReference back_reference;
   if (space == LO_SPACE) {
+    sink_->Put(kNewObject + reference_representation_ + space,
+               "NewLargeObject");
+    sink_->PutInt(size >> kObjectAlignmentBits, "ObjectSizeInWords");
     if (object_->IsCode()) {
       sink_->Put(EXECUTABLE, "executable large object");
     } else {
@@ -1572,8 +1655,23 @@ void Serializer::ObjectSerializer::SerializePrologue(AllocationSpace space,
     }
     back_reference = serializer_->AllocateLargeObject(size);
   } else {
-    back_reference = serializer_->Allocate(space, size);
+    bool needs_double_align = false;
+    if (object_->NeedsToEnsureDoubleAlignment()) {
+      // Add wriggle room for double alignment padding.
+      back_reference = serializer_->Allocate(space, size + kPointerSize);
+      needs_double_align = true;
+    } else {
+      back_reference = serializer_->Allocate(space, size);
+    }
+    sink_->Put(kNewObject + reference_representation_ + space, "NewObject");
+    if (needs_double_align)
+      sink_->PutInt(kDoubleAlignmentSentinel, "DoubleAlignSentinel");
+    int encoded_size = size >> kObjectAlignmentBits;
+    DCHECK_NE(kDoubleAlignmentSentinel, encoded_size);
+    sink_->PutInt(encoded_size, "ObjectSizeInWords");
   }
+
+  // Mark this object as already serialized.
   serializer_->back_reference_map()->Add(object_, back_reference);
 
   // Serialize the map (first word of the object).
@@ -1630,7 +1728,7 @@ void Serializer::ObjectSerializer::SerializeExternalString() {
   }
 
   // Serialize string content.
-  sink_->PutRaw(const_cast<byte*>(resource), content_size, "StringContent");
+  sink_->PutRaw(resource, content_size, "StringContent");
 
   // Since the allocation size is rounded up to object alignment, there
   // maybe left-over bytes that need to be padded.
@@ -1644,6 +1742,18 @@ void Serializer::ObjectSerializer::SerializeExternalString() {
 
 
 void Serializer::ObjectSerializer::Serialize() {
+  if (FLAG_trace_serializer) {
+    PrintF(" Encoding heap object: ");
+    object_->ShortPrint();
+    PrintF("\n");
+  }
+
+  if (object_->IsScript()) {
+    // Clear cached line ends.
+    Object* undefined = serializer_->isolate()->heap()->undefined_value();
+    Script::cast(object_)->set_line_ends(undefined);
+  }
+
   if (object_->IsExternalString()) {
     Heap* heap = serializer_->isolate()->heap();
     if (object_->map() != heap->native_source_string_map()) {
@@ -1694,8 +1804,8 @@ void Serializer::ObjectSerializer::VisitPointers(Object** start,
         }
         current += repeat_count;
         bytes_processed_so_far_ += repeat_count * kPointerSize;
-        if (repeat_count > kMaxRepeats) {
-          sink_->Put(kRepeat, "SerializeRepeats");
+        if (repeat_count > kMaxFixedRepeats) {
+          sink_->Put(kVariableRepeat, "SerializeRepeats");
           sink_->PutInt(repeat_count, "SerializeRepeats");
         } else {
           sink_->Put(CodeForRepeats(repeat_count), "SerializeRepeats");
@@ -1874,6 +1984,8 @@ int Serializer::ObjectSerializer::OutputRawData(
     // To make snapshots reproducible, we need to wipe out all pointers in code.
     if (code_object_) {
       Code* code = CloneCodeObject(object_);
+      // Code age headers are not serializable.
+      code->MakeYoung(serializer_->isolate());
       WipeOutRelocations(code);
       // We need to wipe out the header fields *after* wiping out the
       // relocations, because some of these fields are needed for the latter.
@@ -1916,13 +2028,16 @@ BackReference Serializer::AllocateLargeObject(int size) {
 
 
 BackReference Serializer::Allocate(AllocationSpace space, int size) {
-  CHECK(space >= 0 && space < kNumberOfPreallocatedSpaces);
+  DCHECK(space >= 0 && space < kNumberOfPreallocatedSpaces);
   DCHECK(size > 0 && size <= static_cast<int>(max_chunk_size(space)));
   uint32_t new_chunk_size = pending_chunk_[space] + size;
   if (new_chunk_size > max_chunk_size(space)) {
     // The new chunk size would not fit onto a single page. Complete the
     // current chunk and start a new one.
+    sink_->Put(kNextChunk, "NextChunk");
+    sink_->Put(space, "NextChunkSpace");
     completed_chunks_[space].Add(pending_chunk_[space]);
+    DCHECK_LE(completed_chunks_[space].length(), BackReference::kMaxChunkIndex);
     pending_chunk_[space] = 0;
     new_chunk_size = size;
   }
@@ -1953,7 +2068,7 @@ ScriptData* CodeSerializer::Serialize(Isolate* isolate,
                                       Handle<String> source) {
   base::ElapsedTimer timer;
   if (FLAG_profile_deserialization) timer.Start();
-  if (FLAG_trace_code_serializer) {
+  if (FLAG_trace_serializer) {
     PrintF("[Serializing from");
     Object* script = info->script();
     if (script->IsScript()) Script::cast(script)->name()->ShortPrint();
@@ -1967,16 +2082,8 @@ ScriptData* CodeSerializer::Serialize(Isolate* isolate,
   Object** location = Handle<Object>::cast(info).location();
   cs.VisitPointer(location);
   cs.Pad();
-  cs.FinalizeAllocation();
-
-  for (int i = 0; i < kNumberOfPreallocatedSpaces; i++) {
-    // Fail if any chunk index exceeds the limit.
-    if (cs.FinalAllocationChunks(i).length() > BackReference::kMaxChunkIndex) {
-      return NULL;
-    }
-  }
 
-  SerializedCodeData data(sink.data(), &cs);
+  SerializedCodeData data(sink.data(), cs);
   ScriptData* script_data = data.GetScriptData();
 
   if (FLAG_profile_deserialization) {
@@ -1993,33 +2100,13 @@ void CodeSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
                                      WhereToPoint where_to_point, int skip) {
   int root_index = root_index_map_.Lookup(obj);
   if (root_index != RootIndexMap::kInvalidRootIndex) {
-    if (FLAG_trace_code_serializer) {
-      PrintF(" Encoding root: %d\n", root_index);
-    }
     PutRoot(root_index, obj, how_to_code, where_to_point, skip);
     return;
   }
 
-  BackReference back_reference = back_reference_map_.Lookup(obj);
-  if (back_reference.is_valid()) {
-    if (back_reference.is_source()) {
-      DCHECK_EQ(source_, obj);
-      SerializeSourceObject(how_to_code, where_to_point);
-    } else {
-      if (FLAG_trace_code_serializer) {
-        PrintF(" Encoding back reference to: ");
-        obj->ShortPrint();
-        PrintF("\n");
-      }
-      SerializeBackReference(back_reference, how_to_code, where_to_point, skip);
-    }
-    return;
-  }
+  if (SerializeKnownObject(obj, how_to_code, where_to_point, skip)) return;
 
-  if (skip != 0) {
-    sink_->Put(kSkip, "SkipFromSerializeObject");
-    sink_->PutInt(skip, "SkipDistanceFromSerializeObject");
-  }
+  FlushSkip(skip);
 
   if (obj->IsCode()) {
     Code* code_object = Code::cast(obj);
@@ -2042,13 +2129,13 @@ void CodeSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
         SerializeIC(code_object, how_to_code, where_to_point);
         return;
       case Code::FUNCTION:
-        // Only serialize the code for the toplevel function. Replace code
-        // of included function literals by the lazy compile builtin.
+        DCHECK(code_object->has_reloc_info_for_serialization());
+        // Only serialize the code for the toplevel function unless specified
+        // by flag. Replace code of inner functions by the lazy compile builtin.
         // This is safe, as checked in Compiler::BuildFunctionInfo.
-        if (code_object != main_code_) {
+        if (code_object != main_code_ && !FLAG_serialize_inner) {
           SerializeBuiltin(Builtins::kCompileLazy, how_to_code, where_to_point);
         } else {
-          code_object->MakeYoung();
           SerializeGeneric(code_object, how_to_code, where_to_point);
         }
         return;
@@ -2062,6 +2149,8 @@ void CodeSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
   CHECK(!obj->IsJSGlobalProxy() && !obj->IsGlobalObject());
   // There should be no hash table embedded. They would require rehashing.
   CHECK(!obj->IsHashTable());
+  // We expect no instantiated function objects or contexts.
+  CHECK(!obj->IsJSFunction() && !obj->IsContext());
 
   SerializeGeneric(obj, how_to_code, where_to_point);
 }
@@ -2070,12 +2159,6 @@ void CodeSerializer::SerializeObject(HeapObject* obj, HowToCode how_to_code,
 void CodeSerializer::SerializeGeneric(HeapObject* heap_object,
                                       HowToCode how_to_code,
                                       WhereToPoint where_to_point) {
-  if (FLAG_trace_code_serializer) {
-    PrintF(" Encoding heap object: ");
-    heap_object->ShortPrint();
-    PrintF("\n");
-  }
-
   if (heap_object->IsInternalizedString()) num_internalized_strings_++;
 
   // Object has not yet been serialized.  Serialize it here.
@@ -2093,7 +2176,7 @@ void CodeSerializer::SerializeBuiltin(int builtin_index, HowToCode how_to_code,
   DCHECK_LT(builtin_index, Builtins::builtin_count);
   DCHECK_LE(0, builtin_index);
 
-  if (FLAG_trace_code_serializer) {
+  if (FLAG_trace_serializer) {
     PrintF(" Encoding builtin: %s\n",
            isolate()->builtins()->name(builtin_index));
   }
@@ -2113,7 +2196,7 @@ void CodeSerializer::SerializeCodeStub(uint32_t stub_key, HowToCode how_to_code,
 
   int index = AddCodeStubKey(stub_key) + kCodeStubsBaseIndex;
 
-  if (FLAG_trace_code_serializer) {
+  if (FLAG_trace_serializer) {
     PrintF(" Encoding code stub %s as %d\n",
            CodeStub::MajorName(CodeStub::MajorKeyFromKey(stub_key), false),
            index);
@@ -2129,7 +2212,7 @@ void CodeSerializer::SerializeIC(Code* ic, HowToCode how_to_code,
   // The IC may be implemented as a stub.
   uint32_t stub_key = ic->stub_key();
   if (stub_key != CodeStub::NoCacheKey()) {
-    if (FLAG_trace_code_serializer) {
+    if (FLAG_trace_serializer) {
       PrintF(" %s is a code stub\n", Code::Kind2String(ic->kind()));
     }
     SerializeCodeStub(stub_key, how_to_code, where_to_point);
@@ -2143,7 +2226,7 @@ void CodeSerializer::SerializeIC(Code* ic, HowToCode how_to_code,
     Builtins::Name name = static_cast<Builtins::Name>(builtin_index);
     Code* builtin = isolate()->builtins()->builtin(name);
     if (builtin == ic) {
-      if (FLAG_trace_code_serializer) {
+      if (FLAG_trace_serializer) {
         PrintF(" %s is a builtin\n", Code::Kind2String(ic->kind()));
       }
       DCHECK(ic->kind() == Code::KEYED_LOAD_IC ||
@@ -2154,7 +2237,7 @@ void CodeSerializer::SerializeIC(Code* ic, HowToCode how_to_code,
   }
   // The IC may also just be a piece of code kept in the non_monomorphic_cache.
   // In that case, just serialize as a normal code object.
-  if (FLAG_trace_code_serializer) {
+  if (FLAG_trace_serializer) {
     PrintF(" %s has no special handling\n", Code::Kind2String(ic->kind()));
   }
   DCHECK(ic->kind() == Code::LOAD_IC || ic->kind() == Code::STORE_IC);
@@ -2176,7 +2259,7 @@ int CodeSerializer::AddCodeStubKey(uint32_t stub_key) {
 
 void CodeSerializer::SerializeSourceObject(HowToCode how_to_code,
                                            WhereToPoint where_to_point) {
-  if (FLAG_trace_code_serializer) PrintF(" Encoding source object\n");
+  if (FLAG_trace_serializer) PrintF(" Encoding source object\n");
 
   DCHECK(how_to_code == kPlain && where_to_point == kStartOfObject);
   sink_->Put(kAttachedReference + how_to_code + where_to_point, "Source");
@@ -2185,7 +2268,7 @@ void CodeSerializer::SerializeSourceObject(HowToCode how_to_code,
 
 
 MaybeHandle<SharedFunctionInfo> CodeSerializer::Deserialize(
-    Isolate* isolate, ScriptData* data, Handle<String> source) {
+    Isolate* isolate, ScriptData* cached_data, Handle<String> source) {
   base::ElapsedTimer timer;
   if (FLAG_profile_deserialization) timer.Start();
 
@@ -2194,26 +2277,20 @@ MaybeHandle<SharedFunctionInfo> CodeSerializer::Deserialize(
   {
     HandleScope scope(isolate);
 
-    SerializedCodeData scd(data, *source);
-    SnapshotByteSource payload(scd.Payload(), scd.PayloadLength());
-    Deserializer deserializer(&payload);
+    SmartPointer<SerializedCodeData> scd(
+        SerializedCodeData::FromCachedData(cached_data, *source));
+    if (scd.is_empty()) {
+      if (FLAG_profile_deserialization) PrintF("[Cached code failed check]\n");
+      DCHECK(cached_data->rejected());
+      return MaybeHandle<SharedFunctionInfo>();
+    }
 
     // Eagerly expand string table to avoid allocations during deserialization.
-    StringTable::EnsureCapacityForDeserialization(isolate,
-                                                  scd.NumInternalizedStrings());
-
-    // Set reservations.
-    STATIC_ASSERT(NEW_SPACE == 0);
-    int current_space = NEW_SPACE;
-    Vector<const SerializedCodeData::Reservation> res = scd.Reservations();
-    for (const auto& r : res) {
-      deserializer.AddReservation(current_space, r.chunk_size());
-      if (r.is_last_chunk()) current_space++;
-    }
-    DCHECK_EQ(kNumberOfSpaces, current_space);
+    StringTable::EnsureCapacityForDeserialization(
+        isolate, scd->NumInternalizedStrings());
 
     // Prepare and register list of attached objects.
-    Vector<const uint32_t> code_stub_keys = scd.CodeStubKeys();
+    Vector<const uint32_t> code_stub_keys = scd->CodeStubKeys();
     Vector<Handle<Object> > attached_objects = Vector<Handle<Object> >::New(
         code_stub_keys.length() + kCodeStubsBaseIndex);
     attached_objects[kSourceObjectIndex] = source;
@@ -2221,6 +2298,8 @@ MaybeHandle<SharedFunctionInfo> CodeSerializer::Deserialize(
       attached_objects[i + kCodeStubsBaseIndex] =
           CodeStub::GetCode(isolate, code_stub_keys[i]).ToHandleChecked();
     }
+
+    Deserializer deserializer(scd.get());
     deserializer.SetAttachedObjects(&attached_objects);
 
     // Deserialize.
@@ -2235,7 +2314,7 @@ MaybeHandle<SharedFunctionInfo> CodeSerializer::Deserialize(
 
   if (FLAG_profile_deserialization) {
     double ms = timer.Elapsed().InMillisecondsF();
-    int length = data->length();
+    int length = cached_data->length();
     PrintF("[Deserializing from %d bytes took %0.3f ms]\n", length, ms);
   }
   Handle<SharedFunctionInfo> result(SharedFunctionInfo::cast(root), isolate);
@@ -2256,70 +2335,152 @@ MaybeHandle<SharedFunctionInfo> CodeSerializer::Deserialize(
 }
 
 
+void SerializedData::AllocateData(int size) {
+  DCHECK(!owns_data_);
+  data_ = NewArray<byte>(size);
+  size_ = size;
+  owns_data_ = true;
+  DCHECK(IsAligned(reinterpret_cast<intptr_t>(data_), kPointerAlignment));
+}
+
+
+SnapshotData::SnapshotData(const SnapshotByteSink& sink,
+                           const Serializer& ser) {
+  DisallowHeapAllocation no_gc;
+  List<Reservation> reservations;
+  ser.EncodeReservations(&reservations);
+  const List<byte>& payload = sink.data();
+
+  // Calculate sizes.
+  int reservation_size = reservations.length() * kInt32Size;
+  int size = kHeaderSize + reservation_size + payload.length();
+
+  // Allocate backing store and create result data.
+  AllocateData(size);
+
+  // Set header values.
+  SetHeaderValue(kCheckSumOffset, Version::Hash());
+  SetHeaderValue(kReservationsOffset, reservations.length());
+  SetHeaderValue(kPayloadLengthOffset, payload.length());
+
+  // Copy reservation chunk sizes.
+  CopyBytes(data_ + kHeaderSize, reinterpret_cast<byte*>(reservations.begin()),
+            reservation_size);
+
+  // Copy serialized data.
+  CopyBytes(data_ + kHeaderSize + reservation_size, payload.begin(),
+            static_cast<size_t>(payload.length()));
+}
+
+
+bool SnapshotData::IsSane() {
+  return GetHeaderValue(kCheckSumOffset) == Version::Hash();
+}
+
+
+Vector<const SerializedData::Reservation> SnapshotData::Reservations() const {
+  return Vector<const Reservation>(
+      reinterpret_cast<const Reservation*>(data_ + kHeaderSize),
+      GetHeaderValue(kReservationsOffset));
+}
+
+
+Vector<const byte> SnapshotData::Payload() const {
+  int reservations_size = GetHeaderValue(kReservationsOffset) * kInt32Size;
+  const byte* payload = data_ + kHeaderSize + reservations_size;
+  int length = GetHeaderValue(kPayloadLengthOffset);
+  DCHECK_EQ(data_ + size_, payload + length);
+  return Vector<const byte>(payload, length);
+}
+
+
 SerializedCodeData::SerializedCodeData(const List<byte>& payload,
-                                       CodeSerializer* cs)
-    : script_data_(NULL), owns_script_data_(true) {
+                                       const CodeSerializer& cs) {
   DisallowHeapAllocation no_gc;
-  List<uint32_t>* stub_keys = cs->stub_keys();
+  const List<uint32_t>* stub_keys = cs.stub_keys();
 
-  // Gather reservation chunk sizes.
-  List<uint32_t> reservations(SerializerDeserializer::kNumberOfSpaces);
-  STATIC_ASSERT(NEW_SPACE == 0);
-  for (int i = 0; i < SerializerDeserializer::kNumberOfSpaces; i++) {
-    Vector<const uint32_t> chunks = cs->FinalAllocationChunks(i);
-    for (int j = 0; j < chunks.length(); j++) {
-      uint32_t chunk = ChunkSizeBits::encode(chunks[j]) |
-                       IsLastChunkBits::encode(j == chunks.length() - 1);
-      reservations.Add(chunk);
-    }
-  }
+  List<Reservation> reservations;
+  cs.EncodeReservations(&reservations);
 
   // Calculate sizes.
   int reservation_size = reservations.length() * kInt32Size;
   int num_stub_keys = stub_keys->length();
   int stub_keys_size = stub_keys->length() * kInt32Size;
-  int data_length =
-      kHeaderSize + reservation_size + stub_keys_size + payload.length();
+  int size = kHeaderSize + reservation_size + stub_keys_size + payload.length();
 
   // Allocate backing store and create result data.
-  byte* data = NewArray<byte>(data_length);
-  DCHECK(IsAligned(reinterpret_cast<intptr_t>(data), kPointerAlignment));
-  script_data_ = new ScriptData(data, data_length);
-  script_data_->AcquireDataOwnership();
+  AllocateData(size);
 
   // Set header values.
-  SetHeaderValue(kCheckSumOffset, CheckSum(cs->source()));
-  SetHeaderValue(kNumInternalizedStringsOffset, cs->num_internalized_strings());
+  SetHeaderValue(kCheckSumOffset, CheckSum(cs.source()));
+  SetHeaderValue(kNumInternalizedStringsOffset, cs.num_internalized_strings());
   SetHeaderValue(kReservationsOffset, reservations.length());
   SetHeaderValue(kNumCodeStubKeysOffset, num_stub_keys);
   SetHeaderValue(kPayloadLengthOffset, payload.length());
 
   // Copy reservation chunk sizes.
-  CopyBytes(data + kHeaderSize, reinterpret_cast<byte*>(reservations.begin()),
+  CopyBytes(data_ + kHeaderSize, reinterpret_cast<byte*>(reservations.begin()),
             reservation_size);
 
   // Copy code stub keys.
-  CopyBytes(data + kHeaderSize + reservation_size,
+  CopyBytes(data_ + kHeaderSize + reservation_size,
             reinterpret_cast<byte*>(stub_keys->begin()), stub_keys_size);
 
   // Copy serialized data.
-  CopyBytes(data + kHeaderSize + reservation_size + stub_keys_size,
+  CopyBytes(data_ + kHeaderSize + reservation_size + stub_keys_size,
             payload.begin(), static_cast<size_t>(payload.length()));
 }
 
 
 bool SerializedCodeData::IsSane(String* source) {
   return GetHeaderValue(kCheckSumOffset) == CheckSum(source) &&
-         PayloadLength() >= SharedFunctionInfo::kSize;
+         Payload().length() >= SharedFunctionInfo::kSize;
 }
 
 
 int SerializedCodeData::CheckSum(String* string) {
-  int checksum = Version::Hash();
-#ifdef DEBUG
-  uint32_t seed = static_cast<uint32_t>(checksum);
-  checksum = static_cast<int>(IteratingStringHasher::Hash(string, seed));
-#endif  // DEBUG
-  return checksum;
+  return Version::Hash() ^ string->length();
+}
+
+
+// Return ScriptData object and relinquish ownership over it to the caller.
+ScriptData* SerializedCodeData::GetScriptData() {
+  DCHECK(owns_data_);
+  ScriptData* result = new ScriptData(data_, size_);
+  result->AcquireDataOwnership();
+  owns_data_ = false;
+  data_ = NULL;
+  return result;
+}
+
+
+Vector<const SerializedData::Reservation> SerializedCodeData::Reservations()
+    const {
+  return Vector<const Reservation>(
+      reinterpret_cast<const Reservation*>(data_ + kHeaderSize),
+      GetHeaderValue(kReservationsOffset));
+}
+
+
+Vector<const byte> SerializedCodeData::Payload() const {
+  int reservations_size = GetHeaderValue(kReservationsOffset) * kInt32Size;
+  int code_stubs_size = GetHeaderValue(kNumCodeStubKeysOffset) * kInt32Size;
+  const byte* payload =
+      data_ + kHeaderSize + reservations_size + code_stubs_size;
+  int length = GetHeaderValue(kPayloadLengthOffset);
+  DCHECK_EQ(data_ + size_, payload + length);
+  return Vector<const byte>(payload, length);
+}
+
+
+int SerializedCodeData::NumInternalizedStrings() const {
+  return GetHeaderValue(kNumInternalizedStringsOffset);
+}
+
+Vector<const uint32_t> SerializedCodeData::CodeStubKeys() const {
+  int reservations_size = GetHeaderValue(kReservationsOffset) * kInt32Size;
+  const byte* start = data_ + kHeaderSize + reservations_size;
+  return Vector<const uint32_t>(reinterpret_cast<const uint32_t*>(start),
+                                GetHeaderValue(kNumCodeStubKeysOffset));
 }
 } }  // namespace v8::internal
index d7a6c7f..bd0c423 100644 (file)
@@ -24,7 +24,7 @@ enum TypeCode {
   IC_UTILITY,
   STATS_COUNTER,
   TOP_ADDRESS,
-  ACCESSOR,
+  ACCESSOR_CODE,
   STUB_CACHE_TABLE,
   RUNTIME_ENTRY,
   LAZY_DEOPTIMIZATION
@@ -285,6 +285,43 @@ class BackReferenceMap : public AddressMapBase {
 };
 
 
+class HotObjectsList {
+ public:
+  HotObjectsList() : index_(0) {
+    for (int i = 0; i < kSize; i++) circular_queue_[i] = NULL;
+  }
+
+  void Add(HeapObject* object) {
+    circular_queue_[index_] = object;
+    index_ = (index_ + 1) & kSizeMask;
+  }
+
+  HeapObject* Get(int index) {
+    DCHECK_NE(NULL, circular_queue_[index]);
+    return circular_queue_[index];
+  }
+
+  static const int kNotFound = -1;
+
+  int Find(HeapObject* object) {
+    for (int i = 0; i < kSize; i++) {
+      if (circular_queue_[i] == object) return i;
+    }
+    return kNotFound;
+  }
+
+  static const int kSize = 8;
+
+ private:
+  STATIC_ASSERT(IS_POWER_OF_TWO(kSize));
+  static const int kSizeMask = kSize - 1;
+  HeapObject* circular_queue_[kSize];
+  int index_;
+
+  DISALLOW_COPY_AND_ASSIGN(HotObjectsList);
+};
+
+
 // The Serializer/Deserializer class is a common superclass for Serializer and
 // Deserializer which is used to store common constants and methods used by
 // both.
@@ -301,20 +338,21 @@ class SerializerDeserializer: public ObjectVisitor {
  protected:
   // Where the pointed-to object can be found:
   enum Where {
-    kNewObject = 0,  // Object is next in snapshot.
-    // 1-7                             One per space.
+    kNewObject = 0,  //              Object is next in snapshot.
+    // 1-7                           One per space.
+    // 0x8                           Unused.
     kRootArray = 0x9,             // Object is found in root array.
     kPartialSnapshotCache = 0xa,  // Object is in the cache.
     kExternalReference = 0xb,     // Pointer to an external reference.
     kSkip = 0xc,                  // Skip n bytes.
     kBuiltin = 0xd,               // Builtin code object.
     kAttachedReference = 0xe,     // Object is described in an attached list.
-    kNop = 0xf,                   // Does nothing, used to pad.
-    kBackref = 0x10,              // Object is described relative to end.
-    // 0x11-0x17                       One per space.
-    kBackrefWithSkip = 0x18,  // Object is described relative to end.
-    // 0x19-0x1f                       One per space.
-    // 0x20-0x3f                       Used by misc. tags below.
+    // 0xf                           Used by misc. See below.
+    kBackref = 0x10,  //             Object is described relative to end.
+    // 0x11-0x17                     One per space.
+    kBackrefWithSkip = 0x18,  //     Object is described relative to end.
+    // 0x19-0x1f                     One per space.
+    // 0x20-0x3f                     Used by misc. See below.
     kPointedToMask = 0x3f
   };
 
@@ -346,40 +384,116 @@ class SerializerDeserializer: public ObjectVisitor {
   // entire code in one memcpy, then fix up stuff with kSkip and other byte
   // codes that overwrite data.
   static const int kRawData = 0x20;
-  // Some common raw lengths: 0x21-0x3f.  These autoadvance the current pointer.
-  // A tag emitted at strategic points in the snapshot to delineate sections.
-  // If the deserializer does not find these at the expected moments then it
-  // is an indication that the snapshot and the VM do not fit together.
-  // Examine the build process for architecture, version or configuration
-  // mismatches.
-  static const int kSynchronize = 0x70;
-  // Used for the source code of the natives, which is in the executable, but
-  // is referred to from external strings in the snapshot.
-  static const int kNativesStringResource = 0x71;
-  static const int kRepeat = 0x72;
-  static const int kConstantRepeat = 0x73;
-  // 0x73-0x7f            Repeat last word (subtract 0x72 to get the count).
-  static const int kMaxRepeats = 0x7f - 0x72;
+  // Some common raw lengths: 0x21-0x3f.
+  // These autoadvance the current pointer.
+  static const int kOnePointerRawData = 0x21;
+
+  static const int kVariableRepeat = 0x60;
+  // 0x61-0x6f   Repeat last word
+  static const int kFixedRepeat = 0x61;
+  static const int kFixedRepeatBase = kFixedRepeat - 1;
+  static const int kLastFixedRepeat = 0x6f;
+  static const int kMaxFixedRepeats = kLastFixedRepeat - kFixedRepeatBase;
   static int CodeForRepeats(int repeats) {
-    DCHECK(repeats >= 1 && repeats <= kMaxRepeats);
-    return 0x72 + repeats;
+    DCHECK(repeats >= 1 && repeats <= kMaxFixedRepeats);
+    return kFixedRepeatBase + repeats;
   }
   static int RepeatsForCode(int byte_code) {
-    DCHECK(byte_code >= kConstantRepeat && byte_code <= 0x7f);
-    return byte_code - 0x72;
+    DCHECK(byte_code > kFixedRepeatBase && byte_code <= kLastFixedRepeat);
+    return byte_code - kFixedRepeatBase;
   }
+
+  // Hot objects are a small set of recently seen or back-referenced objects.
+  // They are represented by a single opcode to save space.
+  // We use 0x70..0x77 for 8 hot objects, and 0x78..0x7f to add skip.
+  static const int kHotObject = 0x70;
+  static const int kMaxHotObjectIndex = 0x77 - kHotObject;
+  static const int kHotObjectWithSkip = 0x78;
+  STATIC_ASSERT(HotObjectsList::kSize == kMaxHotObjectIndex + 1);
+  STATIC_ASSERT(0x7f - kHotObjectWithSkip == kMaxHotObjectIndex);
+  static const int kHotObjectIndexMask = 0x7;
+
   static const int kRootArrayConstants = 0xa0;
-  // 0xa0-0xbf            Things from the first 32 elements of the root array.
+  // 0xa0-0xbf  Things from the first 32 elements of the root array.
   static const int kRootArrayNumberOfConstantEncodings = 0x20;
   static int RootArrayConstantFromByteCode(int byte_code) {
     return byte_code & 0x1f;
   }
 
+  // Do nothing, used for padding.
+  static const int kNop = 0xf;
+
+  // Move to next reserved chunk.
+  static const int kNextChunk = 0x4f;
+
+  // A tag emitted at strategic points in the snapshot to delineate sections.
+  // If the deserializer does not find these at the expected moments then it
+  // is an indication that the snapshot and the VM do not fit together.
+  // Examine the build process for architecture, version or configuration
+  // mismatches.
+  static const int kSynchronize = 0x8f;
+
+  // Used for the source code of the natives, which is in the executable, but
+  // is referred to from external strings in the snapshot.
+  static const int kNativesStringResource = 0xcf;
+
   static const int kAnyOldSpace = -1;
 
   // A bitmask for getting the space out of an instruction.
   static const int kSpaceMask = 7;
   STATIC_ASSERT(kNumberOfSpaces <= kSpaceMask + 1);
+
+  // Sentinel after a new object to indicate that double alignment is needed.
+  static const int kDoubleAlignmentSentinel = 0;
+
+  // Used as index for the attached reference representing the source object.
+  static const int kSourceObjectReference = 0;
+
+  HotObjectsList hot_objects_;
+};
+
+
+class SerializedData {
+ public:
+  class Reservation {
+   public:
+    explicit Reservation(uint32_t size)
+        : reservation_(ChunkSizeBits::encode(size)) {}
+
+    uint32_t chunk_size() const { return ChunkSizeBits::decode(reservation_); }
+    bool is_last() const { return IsLastChunkBits::decode(reservation_); }
+
+    void mark_as_last() { reservation_ |= IsLastChunkBits::encode(true); }
+
+   private:
+    uint32_t reservation_;
+  };
+
+  SerializedData(byte* data, int size)
+      : data_(data), size_(size), owns_data_(false) {}
+  SerializedData() : data_(NULL), size_(0), owns_data_(false) {}
+
+  ~SerializedData() {
+    if (owns_data_) DeleteArray<byte>(data_);
+  }
+
+  class ChunkSizeBits : public BitField<uint32_t, 0, 31> {};
+  class IsLastChunkBits : public BitField<bool, 31, 1> {};
+
+ protected:
+  void SetHeaderValue(int offset, int value) {
+    reinterpret_cast<int*>(data_)[offset] = value;
+  }
+
+  int GetHeaderValue(int offset) const {
+    return reinterpret_cast<const int*>(data_)[offset];
+  }
+
+  void AllocateData(int size);
+
+  byte* data_;
+  int size_;
+  bool owns_data_;
 };
 
 
@@ -387,7 +501,15 @@ class SerializerDeserializer: public ObjectVisitor {
 class Deserializer: public SerializerDeserializer {
  public:
   // Create a deserializer from a snapshot byte source.
-  explicit Deserializer(SnapshotByteSource* source);
+  template <class Data>
+  explicit Deserializer(Data* data)
+      : isolate_(NULL),
+        attached_objects_(NULL),
+        source_(data->Payload()),
+        external_reference_decoder_(NULL),
+        deserialized_large_objects_(0) {
+    DecodeReservation(data->Reservations());
+  }
 
   virtual ~Deserializer();
 
@@ -401,12 +523,6 @@ class Deserializer: public SerializerDeserializer {
   void DeserializePartial(Isolate* isolate, Object** root,
                           OnOOM on_oom = FATAL_ON_OOM);
 
-  void AddReservation(int space, uint32_t chunk) {
-    DCHECK(space >= 0);
-    DCHECK(space < kNumberOfSpaces);
-    reservations_[space].Add({chunk, NULL, NULL});
-  }
-
   void FlushICacheForNewCodeObjects();
 
   // Serialized user code reference certain objects that are provided in a list
@@ -424,6 +540,8 @@ class Deserializer: public SerializerDeserializer {
     UNREACHABLE();
   }
 
+  void DecodeReservation(Vector<const SerializedData::Reservation> res);
+
   bool ReserveSpace();
 
   // Allocation sites are present in the snapshot, and must be linked into
@@ -442,24 +560,10 @@ class Deserializer: public SerializerDeserializer {
 
   // Special handling for serialized code like hooking up internalized strings.
   HeapObject* ProcessNewObjectFromSerializedCode(HeapObject* obj);
-  Object* ProcessBackRefInSerializedCode(Object* obj);
 
   // This returns the address of an object that has been described in the
   // snapshot by chunk index and offset.
-  HeapObject* GetBackReferencedObject(int space) {
-    if (space == LO_SPACE) {
-      uint32_t index = source_->GetInt();
-      return deserialized_large_objects_[index];
-    } else {
-      BackReference back_reference(source_->GetInt());
-      DCHECK(space < kNumberOfPreallocatedSpaces);
-      uint32_t chunk_index = back_reference.chunk_index();
-      DCHECK_LE(chunk_index, current_chunk_[space]);
-      uint32_t chunk_offset = back_reference.chunk_offset();
-      return HeapObject::FromAddress(reservations_[space][chunk_index].start +
-                                     chunk_offset);
-    }
-  }
+  HeapObject* GetBackReferencedObject(int space);
 
   // Cached current isolate.
   Isolate* isolate_;
@@ -467,7 +571,7 @@ class Deserializer: public SerializerDeserializer {
   // Objects from the attached object descriptions in the serialized user code.
   Vector<Handle<Object> >* attached_objects_;
 
-  SnapshotByteSource* source_;
+  SnapshotByteSource source_;
   // The address of the next object that will be allocated in each space.
   // Each space has a number of chunks reserved by the GC, with each chunk
   // fitting into a page. Deserialized objects are allocated into the
@@ -491,18 +595,9 @@ class Serializer : public SerializerDeserializer {
  public:
   Serializer(Isolate* isolate, SnapshotByteSink* sink);
   ~Serializer();
-  virtual void VisitPointers(Object** start, Object** end) OVERRIDE;
+  void VisitPointers(Object** start, Object** end) OVERRIDE;
 
-  void FinalizeAllocation();
-
-  Vector<const uint32_t> FinalAllocationChunks(int space) const {
-    if (space == LO_SPACE) {
-      return Vector<const uint32_t>(&large_objects_total_size_, 1);
-    } else {
-      DCHECK_EQ(0, pending_chunk_[space]);  // No pending chunks.
-      return completed_chunks_[space].ToConstVector();
-    }
-  }
+  void EncodeReservations(List<SerializedData::Reservation>* out) const;
 
   Isolate* isolate() const { return isolate_; }
 
@@ -569,9 +664,17 @@ class Serializer : public SerializerDeserializer {
   void PutRoot(int index, HeapObject* object, HowToCode how, WhereToPoint where,
                int skip);
 
-  void SerializeBackReference(BackReference back_reference,
-                              HowToCode how_to_code,
-                              WhereToPoint where_to_point, int skip);
+  // Returns true if the object was successfully serialized.
+  bool SerializeKnownObject(HeapObject* obj, HowToCode how_to_code,
+                            WhereToPoint where_to_point, int skip);
+
+  inline void FlushSkip(int skip) {
+    if (skip != 0) {
+      sink_->Put(kSkip, "SkipFromSerializeObject");
+      sink_->PutInt(skip, "SkipDistanceFromSerializeObject");
+    }
+  }
+
   void InitializeAllocators();
   // This will return the space for an object.
   static AllocationSpace SpaceOfObject(HeapObject* object);
@@ -677,7 +780,7 @@ class StartupSerializer : public Serializer {
 
   // The StartupSerializer has to serialize the root array, which is slightly
   // different.
-  virtual void VisitPointers(Object** start, Object** end) OVERRIDE;
+  void VisitPointers(Object** start, Object** end) OVERRIDE;
 
   // Serialize the current state of the heap.  The order is:
   // 1) Strong references.
@@ -706,9 +809,11 @@ class CodeSerializer : public Serializer {
                                Handle<String> source);
 
   MUST_USE_RESULT static MaybeHandle<SharedFunctionInfo> Deserialize(
-      Isolate* isolate, ScriptData* data, Handle<String> source);
+      Isolate* isolate, ScriptData* cached_data, Handle<String> source);
 
   static const int kSourceObjectIndex = 0;
+  STATIC_ASSERT(kSourceObjectReference == kSourceObjectIndex);
+
   static const int kCodeStubsBaseIndex = 1;
 
   String* source() const {
@@ -716,7 +821,7 @@ class CodeSerializer : public Serializer {
     return source_;
   }
 
-  List<uint32_t>* stub_keys() { return &stub_keys_; }
+  const List<uint32_t>* stub_keys() const { return &stub_keys_; }
   int num_internalized_strings() const { return num_internalized_strings_; }
 
  private:
@@ -753,83 +858,67 @@ class CodeSerializer : public Serializer {
 };
 
 
-// Wrapper around ScriptData to provide code-serializer-specific functionality.
-class SerializedCodeData {
+// Wrapper around reservation sizes and the serialization payload.
+class SnapshotData : public SerializedData {
  public:
-  // Used by when consuming.
-  explicit SerializedCodeData(ScriptData* data, String* source)
-      : script_data_(data), owns_script_data_(false) {
-    DisallowHeapAllocation no_gc;
-    CHECK(IsSane(source));
-  }
-
   // Used when producing.
-  SerializedCodeData(const List<byte>& payload, CodeSerializer* cs);
+  SnapshotData(const SnapshotByteSink& sink, const Serializer& ser);
 
-  ~SerializedCodeData() {
-    if (owns_script_data_) delete script_data_;
+  // Used when consuming.
+  explicit SnapshotData(const Vector<const byte> snapshot)
+      : SerializedData(const_cast<byte*>(snapshot.begin()), snapshot.length()) {
+    CHECK(IsSane());
   }
 
-  // Return ScriptData object and relinquish ownership over it to the caller.
-  ScriptData* GetScriptData() {
-    ScriptData* result = script_data_;
-    script_data_ = NULL;
-    DCHECK(owns_script_data_);
-    owns_script_data_ = false;
-    return result;
-  }
+  Vector<const Reservation> Reservations() const;
+  Vector<const byte> Payload() const;
 
-  class Reservation {
-   public:
-    uint32_t chunk_size() const { return ChunkSizeBits::decode(reservation); }
-    bool is_last_chunk() const { return IsLastChunkBits::decode(reservation); }
+  Vector<const byte> RawData() const {
+    return Vector<const byte>(data_, size_);
+  }
 
-   private:
-    uint32_t reservation;
+ private:
+  bool IsSane();
+  // The data header consists of int-sized entries:
+  // [0] version hash
+  // [1] number of reservation size entries
+  // [2] payload length
+  static const int kCheckSumOffset = 0;
+  static const int kReservationsOffset = 1;
+  static const int kPayloadLengthOffset = 2;
+  static const int kHeaderSize = (kPayloadLengthOffset + 1) * kIntSize;
+};
 
-    DISALLOW_COPY_AND_ASSIGN(Reservation);
-  };
 
-  int NumInternalizedStrings() const {
-    return GetHeaderValue(kNumInternalizedStringsOffset);
+// Wrapper around ScriptData to provide code-serializer-specific functionality.
+class SerializedCodeData : public SerializedData {
+ public:
+  // Used when consuming.
+  static SerializedCodeData* FromCachedData(ScriptData* cached_data,
+                                            String* source) {
+    DisallowHeapAllocation no_gc;
+    SerializedCodeData* scd = new SerializedCodeData(cached_data);
+    if (scd->IsSane(source)) return scd;
+    cached_data->Reject();
+    delete scd;
+    return NULL;
   }
 
-  Vector<const Reservation> Reservations() const {
-    return Vector<const Reservation>(reinterpret_cast<const Reservation*>(
-                                         script_data_->data() + kHeaderSize),
-                                     GetHeaderValue(kReservationsOffset));
-  }
+  // Used when producing.
+  SerializedCodeData(const List<byte>& payload, const CodeSerializer& cs);
 
-  Vector<const uint32_t> CodeStubKeys() const {
-    int reservations_size = GetHeaderValue(kReservationsOffset) * kInt32Size;
-    const byte* start = script_data_->data() + kHeaderSize + reservations_size;
-    return Vector<const uint32_t>(reinterpret_cast<const uint32_t*>(start),
-                                  GetHeaderValue(kNumCodeStubKeysOffset));
-  }
+  // Return ScriptData object and relinquish ownership over it to the caller.
+  ScriptData* GetScriptData();
 
-  const byte* Payload() const {
-    int reservations_size = GetHeaderValue(kReservationsOffset) * kInt32Size;
-    int code_stubs_size = GetHeaderValue(kNumCodeStubKeysOffset) * kInt32Size;
-    return script_data_->data() + kHeaderSize + reservations_size +
-           code_stubs_size;
-  }
+  Vector<const Reservation> Reservations() const;
+  Vector<const byte> Payload() const;
 
-  int PayloadLength() const {
-    int payload_length = GetHeaderValue(kPayloadLengthOffset);
-    DCHECK_EQ(script_data_->data() + script_data_->length(),
-              Payload() + payload_length);
-    return payload_length;
-  }
+  int NumInternalizedStrings() const;
+  Vector<const uint32_t> CodeStubKeys() const;
 
  private:
-  void SetHeaderValue(int offset, int value) {
-    reinterpret_cast<int*>(const_cast<byte*>(script_data_->data()))[offset] =
-        value;
-  }
-
-  int GetHeaderValue(int offset) const {
-    return reinterpret_cast<const int*>(script_data_->data())[offset];
-  }
+  explicit SerializedCodeData(ScriptData* data)
+      : SerializedData(const_cast<byte*>(data->data()), data->length()) {}
 
   bool IsSane(String* source);
 
@@ -839,24 +928,14 @@ class SerializedCodeData {
   // [0] version hash
   // [1] number of internalized strings
   // [2] number of code stub keys
-  // [3] payload length
-  // [4..10] reservation sizes for spaces from NEW_SPACE to PROPERTY_CELL_SPACE.
+  // [3] number of reservation size entries
+  // [4] payload length
   static const int kCheckSumOffset = 0;
   static const int kNumInternalizedStringsOffset = 1;
   static const int kReservationsOffset = 2;
   static const int kNumCodeStubKeysOffset = 3;
   static const int kPayloadLengthOffset = 4;
   static const int kHeaderSize = (kPayloadLengthOffset + 1) * kIntSize;
-
-  class ChunkSizeBits : public BitField<uint32_t, 0, 31> {};
-  class IsLastChunkBits : public BitField<bool, 31, 1> {};
-
-  // Following the header, we store, in sequential order
-  // - code stub keys
-  // - serialization payload
-
-  ScriptData* script_data_;
-  bool owns_script_data_;
 };
 } }  // namespace v8::internal
 
index b152ad3..dc7f655 100644 (file)
 namespace v8 {
 namespace internal {
 
-void Snapshot::ReserveSpaceForLinkedInSnapshot(Deserializer* deserializer) {
-  deserializer->AddReservation(NEW_SPACE, new_space_used_);
-  deserializer->AddReservation(OLD_POINTER_SPACE, pointer_space_used_);
-  deserializer->AddReservation(OLD_DATA_SPACE, data_space_used_);
-  deserializer->AddReservation(CODE_SPACE, code_space_used_);
-  deserializer->AddReservation(MAP_SPACE, map_space_used_);
-  deserializer->AddReservation(CELL_SPACE, cell_space_used_);
-  deserializer->AddReservation(PROPERTY_CELL_SPACE, property_cell_space_used_);
-  deserializer->AddReservation(LO_SPACE, lo_space_used_);
+bool Snapshot::HaveASnapshotToStartFrom() {
+  return SnapshotBlob().data != NULL;
 }
 
 
 bool Snapshot::Initialize(Isolate* isolate) {
-  if (size_ > 0) {
-    base::ElapsedTimer timer;
-    if (FLAG_profile_deserialization) {
-      timer.Start();
-    }
-    SnapshotByteSource source(raw_data_, raw_size_);
-    Deserializer deserializer(&source);
-    ReserveSpaceForLinkedInSnapshot(&deserializer);
-    bool success = isolate->Init(&deserializer);
-    if (FLAG_profile_deserialization) {
-      double ms = timer.Elapsed().InMillisecondsF();
-      PrintF("[Snapshot loading and deserialization took %0.3f ms]\n", ms);
-    }
-    return success;
-  }
-  return false;
-}
-
+  if (!HaveASnapshotToStartFrom()) return false;
+  base::ElapsedTimer timer;
+  if (FLAG_profile_deserialization) timer.Start();
 
-bool Snapshot::HaveASnapshotToStartFrom() {
-  return size_ != 0;
+  const v8::StartupData blob = SnapshotBlob();
+  SnapshotData snapshot_data(ExtractStartupData(&blob));
+  Deserializer deserializer(&snapshot_data);
+  bool success = isolate->Init(&deserializer);
+  if (FLAG_profile_deserialization) {
+    double ms = timer.Elapsed().InMillisecondsF();
+    PrintF("[Snapshot loading and deserialization took %0.3f ms]\n", ms);
+  }
+  return success;
 }
 
 
 Handle<Context> Snapshot::NewContextFromSnapshot(Isolate* isolate) {
-  if (context_size_ == 0) {
-    return Handle<Context>();
-  }
-  SnapshotByteSource source(context_raw_data_,
-                            context_raw_size_);
-  Deserializer deserializer(&source);
+  if (!HaveASnapshotToStartFrom()) return Handle<Context>();
+
+  const v8::StartupData blob = SnapshotBlob();
+  SnapshotData snapshot_data(ExtractContextData(&blob));
+  Deserializer deserializer(&snapshot_data);
   Object* root;
-  deserializer.AddReservation(NEW_SPACE, context_new_space_used_);
-  deserializer.AddReservation(OLD_POINTER_SPACE, context_pointer_space_used_);
-  deserializer.AddReservation(OLD_DATA_SPACE, context_data_space_used_);
-  deserializer.AddReservation(CODE_SPACE, context_code_space_used_);
-  deserializer.AddReservation(MAP_SPACE, context_map_space_used_);
-  deserializer.AddReservation(CELL_SPACE, context_cell_space_used_);
-  deserializer.AddReservation(PROPERTY_CELL_SPACE,
-                              context_property_cell_space_used_);
-  deserializer.AddReservation(LO_SPACE, context_lo_space_used_);
   deserializer.DeserializePartial(isolate, &root);
   CHECK(root->IsContext());
   return Handle<Context>(Context::cast(root));
 }
 
 
-#ifdef V8_USE_EXTERNAL_STARTUP_DATA
-// Dummy implementations of Set*FromFile(..) APIs.
-//
-// These are meant for use with snapshot-external.cc. Should this file
-// be compiled with those options we just supply these dummy implementations
-// below. This happens when compiling the mksnapshot utility.
-void SetNativesFromFile(StartupData* data) { CHECK(false); }
-void SetSnapshotFromFile(StartupData* data) { CHECK(false); }
-#endif  // V8_USE_EXTERNAL_STARTUP_DATA
+v8::StartupData Snapshot::CreateSnapshotBlob(
+    const Vector<const byte> startup_data,
+    const Vector<const byte> context_data) {
+  int startup_length = startup_data.length();
+  int context_length = context_data.length();
+  int context_offset = kIntSize + startup_length;
+  int length = context_offset + context_length;
+  char* data = new char[length];
 
+  memcpy(data, &startup_length, kIntSize);
+  memcpy(data + kIntSize, startup_data.begin(), startup_length);
+  memcpy(data + context_offset, context_data.begin(), context_length);
+  v8::StartupData result = {data, length};
+  return result;
+}
+
+
+Vector<const byte> Snapshot::ExtractStartupData(const v8::StartupData* data) {
+  DCHECK_LT(kIntSize, data->raw_size);
+  int startup_length;
+  memcpy(&startup_length, data->data, kIntSize);
+  DCHECK_LT(startup_length, data->raw_size);
+  const byte* startup_data =
+      reinterpret_cast<const byte*>(data->data + kIntSize);
+  return Vector<const byte>(startup_data, startup_length);
+}
+
+
+Vector<const byte> Snapshot::ExtractContextData(const v8::StartupData* data) {
+  DCHECK_LT(kIntSize, data->raw_size);
+  int startup_length;
+  memcpy(&startup_length, data->data, kIntSize);
+  int context_offset = kIntSize + startup_length;
+  const byte* context_data =
+      reinterpret_cast<const byte*>(data->data + context_offset);
+  DCHECK_LT(context_offset, data->raw_size);
+  int context_length = data->raw_size - context_offset;
+  return Vector<const byte>(context_data, context_length);
+}
 } }  // namespace v8::internal
index e8673e5..020d1cb 100644 (file)
 namespace v8 {
 namespace internal {
 
-const byte Snapshot::data_[] = { 0 };
-const byte* Snapshot::raw_data_ = NULL;
-const int Snapshot::size_ = 0;
-const int Snapshot::raw_size_ = 0;
-const byte Snapshot::context_data_[] = { 0 };
-const byte* Snapshot::context_raw_data_ = NULL;
-const int Snapshot::context_size_ = 0;
-const int Snapshot::context_raw_size_ = 0;
+#ifdef V8_USE_EXTERNAL_STARTUP_DATA
+// Dummy implementations of Set*FromFile(..) APIs.
+//
+// These are meant for use with snapshot-external.cc. Should this file
+// be compiled with those options we just supply these dummy implementations
+// below. This happens when compiling the mksnapshot utility.
+void SetNativesFromFile(StartupData* data) { CHECK(false); }
+void SetSnapshotFromFile(StartupData* data) { CHECK(false); }
+#endif  // V8_USE_EXTERNAL_STARTUP_DATA
 
-const int Snapshot::new_space_used_ = 0;
-const int Snapshot::pointer_space_used_ = 0;
-const int Snapshot::data_space_used_ = 0;
-const int Snapshot::code_space_used_ = 0;
-const int Snapshot::map_space_used_ = 0;
-const int Snapshot::cell_space_used_ = 0;
-const int Snapshot::property_cell_space_used_ = 0;
-const int Snapshot::lo_space_used_ = 0;
 
-const int Snapshot::context_new_space_used_ = 0;
-const int Snapshot::context_pointer_space_used_ = 0;
-const int Snapshot::context_data_space_used_ = 0;
-const int Snapshot::context_code_space_used_ = 0;
-const int Snapshot::context_map_space_used_ = 0;
-const int Snapshot::context_cell_space_used_ = 0;
-const int Snapshot::context_property_cell_space_used_ = 0;
-const int Snapshot::context_lo_space_used_ = 0;
+const v8::StartupData Snapshot::SnapshotBlob() {
+  return {NULL, 0};
+}
 } }  // namespace v8::internal
index b75e232..2fda571 100644 (file)
 #include "src/snapshot-source-sink.h"
 #include "src/v8.h"  // for V8::Initialize
 
-namespace v8 {
-namespace internal {
-
-
-struct SnapshotImpl {
- public:
-  const byte* data;
-  int size;
-  int new_space_used;
-  int pointer_space_used;
-  int data_space_used;
-  int code_space_used;
-  int map_space_used;
-  int cell_space_used;
-  int property_cell_space_used;
-  int lo_space_used;
-
-  const byte* context_data;
-  int context_size;
-  int context_new_space_used;
-  int context_pointer_space_used;
-  int context_data_space_used;
-  int context_code_space_used;
-  int context_map_space_used;
-  int context_cell_space_used;
-  int context_property_cell_space_used;
-  int context_lo_space_used;
-};
-
-
-static SnapshotImpl* snapshot_impl_ = NULL;
-
-
-bool Snapshot::HaveASnapshotToStartFrom() {
-  return snapshot_impl_ != NULL;
-}
-
 
-bool Snapshot::Initialize(Isolate* isolate) {
-  if (!HaveASnapshotToStartFrom())
-    return false;
+#ifndef V8_USE_EXTERNAL_STARTUP_DATA
+#error snapshot-external.cc is used only for the external snapshot build.
+#endif  // V8_USE_EXTERNAL_STARTUP_DATA
 
-  base::ElapsedTimer timer;
-  if (FLAG_profile_deserialization) {
-    timer.Start();
-  }
-  SnapshotByteSource source(snapshot_impl_->data, snapshot_impl_->size);
-  Deserializer deserializer(&source);
-  deserializer.AddReservation(NEW_SPACE, snapshot_impl_->new_space_used);
-  deserializer.AddReservation(OLD_POINTER_SPACE,
-                              snapshot_impl_->pointer_space_used);
-  deserializer.AddReservation(OLD_DATA_SPACE, snapshot_impl_->data_space_used);
-  deserializer.AddReservation(CODE_SPACE, snapshot_impl_->code_space_used);
-  deserializer.AddReservation(MAP_SPACE, snapshot_impl_->map_space_used);
-  deserializer.AddReservation(CELL_SPACE, snapshot_impl_->cell_space_used);
-  deserializer.AddReservation(PROPERTY_CELL_SPACE,
-                              snapshot_impl_->property_cell_space_used);
-  deserializer.AddReservation(LO_SPACE, snapshot_impl_->lo_space_used);
-  bool success = isolate->Init(&deserializer);
-  if (FLAG_profile_deserialization) {
-    double ms = timer.Elapsed().InMillisecondsF();
-    PrintF("[Snapshot loading and deserialization took %0.3f ms]\n", ms);
-  }
-  return success;
-}
-
-
-Handle<Context> Snapshot::NewContextFromSnapshot(Isolate* isolate) {
-  if (!HaveASnapshotToStartFrom())
-    return Handle<Context>();
 
-  SnapshotByteSource source(snapshot_impl_->context_data,
-                            snapshot_impl_->context_size);
-  Deserializer deserializer(&source);
-  deserializer.AddReservation(NEW_SPACE,
-                              snapshot_impl_->context_new_space_used);
-  deserializer.AddReservation(OLD_POINTER_SPACE,
-                              snapshot_impl_->context_pointer_space_used);
-  deserializer.AddReservation(OLD_DATA_SPACE,
-                              snapshot_impl_->context_data_space_used);
-  deserializer.AddReservation(CODE_SPACE,
-                              snapshot_impl_->context_code_space_used);
-  deserializer.AddReservation(MAP_SPACE,
-                              snapshot_impl_->context_map_space_used);
-  deserializer.AddReservation(CELL_SPACE,
-                              snapshot_impl_->context_cell_space_used);
-  deserializer.AddReservation(PROPERTY_CELL_SPACE,
-                              snapshot_impl_->context_property_cell_space_used);
-  deserializer.AddReservation(LO_SPACE, snapshot_impl_->context_lo_space_used);
-  Object* root;
-  deserializer.DeserializePartial(isolate, &root);
-  CHECK(root->IsContext());
-  return Handle<Context>(Context::cast(root));
-}
+namespace v8 {
+namespace internal {
 
+static v8::StartupData external_startup_blob = {NULL, 0};
 
 void SetSnapshotFromFile(StartupData* snapshot_blob) {
   DCHECK(snapshot_blob);
   DCHECK(snapshot_blob->data);
   DCHECK(snapshot_blob->raw_size > 0);
-  DCHECK(!snapshot_impl_);
-
-  snapshot_impl_ = new SnapshotImpl;
-  SnapshotByteSource source(reinterpret_cast<const byte*>(snapshot_blob->data),
-                            snapshot_blob->raw_size);
-
-  bool success = source.GetBlob(&snapshot_impl_->data,
-                                &snapshot_impl_->size);
-  snapshot_impl_->new_space_used = source.GetInt();
-  snapshot_impl_->pointer_space_used = source.GetInt();
-  snapshot_impl_->data_space_used = source.GetInt();
-  snapshot_impl_->code_space_used = source.GetInt();
-  snapshot_impl_->map_space_used = source.GetInt();
-  snapshot_impl_->cell_space_used = source.GetInt();
-  snapshot_impl_->property_cell_space_used = source.GetInt();
-  snapshot_impl_->lo_space_used = source.GetInt();
-
-  success &= source.GetBlob(&snapshot_impl_->context_data,
-                            &snapshot_impl_->context_size);
-  snapshot_impl_->context_new_space_used = source.GetInt();
-  snapshot_impl_->context_pointer_space_used = source.GetInt();
-  snapshot_impl_->context_data_space_used = source.GetInt();
-  snapshot_impl_->context_code_space_used = source.GetInt();
-  snapshot_impl_->context_map_space_used = source.GetInt();
-  snapshot_impl_->context_cell_space_used = source.GetInt();
-  snapshot_impl_->context_property_cell_space_used = source.GetInt();
-  snapshot_impl_->context_lo_space_used = source.GetInt();
-
-  DCHECK(success);
+  DCHECK(!external_startup_blob.data);
+  // Validate snapshot blob.
+  DCHECK(!Snapshot::ExtractStartupData(snapshot_blob).is_empty());
+  DCHECK(!Snapshot::ExtractContextData(snapshot_blob).is_empty());
+  external_startup_blob = *snapshot_blob;
 }
 
+
+const v8::StartupData Snapshot::SnapshotBlob() { return external_startup_blob; }
 } }  // namespace v8::internal
index 9935ff7..fd94724 100644 (file)
 namespace v8 {
 namespace internal {
 
-
-SnapshotByteSource::SnapshotByteSource(const byte* array, int length)
-    : data_(array), length_(length), position_(0) {
-}
-
-
-SnapshotByteSource::~SnapshotByteSource() { }
-
-
 int32_t SnapshotByteSource::GetUnalignedInt() {
   DCHECK(position_ < length_);  // Require at least one byte left.
   int32_t answer = data_[position_];
@@ -52,15 +43,10 @@ void SnapshotByteSink::PutInt(uintptr_t integer, const char* description) {
   if (bytes > 3) Put(static_cast<int>((integer >> 24) & 0xff), "IntPart4");
 }
 
-void SnapshotByteSink::PutRaw(byte* data, int number_of_bytes,
-                              const char* description) {
-  data_.AddAll(Vector<byte>(data, number_of_bytes));
-}
 
-void SnapshotByteSink::PutBlob(byte* data, int number_of_bytes,
-                               const char* description) {
-  PutInt(number_of_bytes, description);
-  PutRaw(data, number_of_bytes, description);
+void SnapshotByteSink::PutRaw(const byte* data, int number_of_bytes,
+                              const char* description) {
+  data_.AddAll(Vector<byte>(const_cast<byte*>(data), number_of_bytes));
 }
 
 
@@ -77,7 +63,7 @@ bool SnapshotByteSource::GetBlob(const byte** data, int* number_of_bytes) {
   int size = GetInt();
   *number_of_bytes = size;
 
-  if (position_ + size < length_) {
+  if (position_ + size <= length_) {
     *data = &data_[position_];
     Advance(size);
     return true;
index 68901a1..66feaec 100644 (file)
@@ -19,12 +19,19 @@ namespace internal {
  */
 class SnapshotByteSource FINAL {
  public:
-  SnapshotByteSource(const byte* array, int length);
-  ~SnapshotByteSource();
+  SnapshotByteSource(const char* data, int length)
+      : data_(reinterpret_cast<const byte*>(data)),
+        length_(length),
+        position_(0) {}
+
+  explicit SnapshotByteSource(Vector<const byte> payload)
+      : data_(payload.start()), length_(payload.length()), position_(0) {}
+
+  ~SnapshotByteSource() {}
 
   bool HasMore() { return position_ < length_; }
 
-  int Get() {
+  byte Get() {
     DCHECK(position_ < length_);
     return data_[position_++];
   }
@@ -83,8 +90,7 @@ class SnapshotByteSink {
   }
 
   void PutInt(uintptr_t integer, const char* description);
-  void PutRaw(byte* data, int number_of_bytes, const char* description);
-  void PutBlob(byte* data, int number_of_bytes, const char* description);
+  void PutRaw(const byte* data, int number_of_bytes, const char* description);
   int Position() { return data_.length(); }
 
   const List<byte>& data() const { return data_; }
index 590ecf1..25a07cd 100644 (file)
 namespace v8 {
 namespace internal {
 
-class Snapshot {
+class Snapshot : public AllStatic {
  public:
   // Initialize the Isolate from the internal snapshot. Returns false if no
   // snapshot could be found.
   static bool Initialize(Isolate* isolate);
-
-  static bool HaveASnapshotToStartFrom();
-
   // Create a new context using the internal partial snapshot.
   static Handle<Context> NewContextFromSnapshot(Isolate* isolate);
 
-  // These methods support COMPRESS_STARTUP_DATA_BZ2.
-  static const byte* data() { return data_; }
-  static int size() { return size_; }
-  static int raw_size() { return raw_size_; }
-  static void set_raw_data(const byte* raw_data) {
-    raw_data_ = raw_data;
-  }
-  static const byte* context_data() { return context_data_; }
-  static int context_size() { return context_size_; }
-  static int context_raw_size() { return context_raw_size_; }
-  static void set_context_raw_data(
-      const byte* context_raw_data) {
-    context_raw_data_ = context_raw_data;
-  }
+  static bool HaveASnapshotToStartFrom();
 
- private:
-  static const byte data_[];
-  static const byte* raw_data_;
-  static const byte context_data_[];
-  static const byte* context_raw_data_;
-  static const int new_space_used_;
-  static const int pointer_space_used_;
-  static const int data_space_used_;
-  static const int code_space_used_;
-  static const int map_space_used_;
-  static const int cell_space_used_;
-  static const int property_cell_space_used_;
-  static const int lo_space_used_;
-  static const int context_new_space_used_;
-  static const int context_pointer_space_used_;
-  static const int context_data_space_used_;
-  static const int context_code_space_used_;
-  static const int context_map_space_used_;
-  static const int context_cell_space_used_;
-  static const int context_property_cell_space_used_;
-  static const int context_lo_space_used_;
-  static const int size_;
-  static const int raw_size_;
-  static const int context_size_;
-  static const int context_raw_size_;
+  // To be implemented by the snapshot source.
+  static const v8::StartupData SnapshotBlob();
+
+  static v8::StartupData CreateSnapshotBlob(
+      const Vector<const byte> startup_data,
+      const Vector<const byte> context_data);
 
-  static void ReserveSpaceForLinkedInSnapshot(Deserializer* deserializer);
+  static Vector<const byte> ExtractStartupData(const v8::StartupData* data);
+  static Vector<const byte> ExtractContextData(const v8::StartupData* data);
 
+ private:
   DISALLOW_IMPLICIT_CONSTRUCTORS(Snapshot);
 };
 
diff --git a/deps/v8/src/string-builder.cc b/deps/v8/src/string-builder.cc
new file mode 100644 (file)
index 0000000..38c3188
--- /dev/null
@@ -0,0 +1,111 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/string-builder.h"
+
+namespace v8 {
+namespace internal {
+
+MaybeHandle<String> ReplacementStringBuilder::ToString() {
+  Isolate* isolate = heap_->isolate();
+  if (array_builder_.length() == 0) {
+    return isolate->factory()->empty_string();
+  }
+
+  Handle<String> joined_string;
+  if (is_one_byte_) {
+    Handle<SeqOneByteString> seq;
+    ASSIGN_RETURN_ON_EXCEPTION(
+        isolate, seq, isolate->factory()->NewRawOneByteString(character_count_),
+        String);
+
+    DisallowHeapAllocation no_gc;
+    uint8_t* char_buffer = seq->GetChars();
+    StringBuilderConcatHelper(*subject_, char_buffer, *array_builder_.array(),
+                              array_builder_.length());
+    joined_string = Handle<String>::cast(seq);
+  } else {
+    // Two-byte.
+    Handle<SeqTwoByteString> seq;
+    ASSIGN_RETURN_ON_EXCEPTION(
+        isolate, seq, isolate->factory()->NewRawTwoByteString(character_count_),
+        String);
+
+    DisallowHeapAllocation no_gc;
+    uc16* char_buffer = seq->GetChars();
+    StringBuilderConcatHelper(*subject_, char_buffer, *array_builder_.array(),
+                              array_builder_.length());
+    joined_string = Handle<String>::cast(seq);
+  }
+  return joined_string;
+}
+
+
+IncrementalStringBuilder::IncrementalStringBuilder(Isolate* isolate)
+    : isolate_(isolate),
+      encoding_(String::ONE_BYTE_ENCODING),
+      overflowed_(false),
+      part_length_(kInitialPartLength),
+      current_index_(0) {
+  // Create an accumulator handle starting with the empty string.
+  accumulator_ = Handle<String>(isolate->heap()->empty_string(), isolate);
+  current_part_ =
+      factory()->NewRawOneByteString(part_length_).ToHandleChecked();
+}
+
+
+void IncrementalStringBuilder::Accumulate() {
+  // Only accumulate fully written strings. Shrink first if necessary.
+  DCHECK_EQ(current_index_, current_part()->length());
+  Handle<String> new_accumulator;
+  if (accumulator()->length() + current_part()->length() > String::kMaxLength) {
+    // Set the flag and carry on. Delay throwing the exception till the end.
+    new_accumulator = factory()->empty_string();
+    overflowed_ = true;
+  } else {
+    new_accumulator = factory()
+                          ->NewConsString(accumulator(), current_part())
+                          .ToHandleChecked();
+  }
+  set_accumulator(new_accumulator);
+}
+
+
+void IncrementalStringBuilder::Extend() {
+  Accumulate();
+  if (part_length_ <= kMaxPartLength / kPartLengthGrowthFactor) {
+    part_length_ *= kPartLengthGrowthFactor;
+  }
+  Handle<String> new_part;
+  if (encoding_ == String::ONE_BYTE_ENCODING) {
+    new_part = factory()->NewRawOneByteString(part_length_).ToHandleChecked();
+  } else {
+    new_part = factory()->NewRawTwoByteString(part_length_).ToHandleChecked();
+  }
+  // Reuse the same handle to avoid being invalidated when exiting handle scope.
+  set_current_part(new_part);
+  current_index_ = 0;
+}
+
+
+MaybeHandle<String> IncrementalStringBuilder::Finish() {
+  ShrinkCurrentPart();
+  Accumulate();
+  if (overflowed_) {
+    THROW_NEW_ERROR(isolate_, NewInvalidStringLengthError(), String);
+  }
+  return accumulator();
+}
+
+
+void IncrementalStringBuilder::AppendString(Handle<String> string) {
+  ShrinkCurrentPart();
+  part_length_ = kInitialPartLength;  // Allocate conservatively.
+  Extend();  // Attach current part and allocate new part.
+  Handle<String> concat =
+      factory()->NewConsString(accumulator(), string).ToHandleChecked();
+  set_accumulator(concat);
+}
+}
+}  // namespace v8::internal
similarity index 62%
rename from deps/v8/src/runtime/string-builder.h
rename to deps/v8/src/string-builder.h
index 890b7f6..43b690d 100644 (file)
@@ -2,8 +2,10 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#ifndef V8_RUNTIME_STRING_BUILDER_H_
-#define V8_RUNTIME_STRING_BUILDER_H_
+#ifndef V8_STRING_BUILDER_H_
+#define V8_STRING_BUILDER_H_
+
+#include "src/v8.h"
 
 namespace v8 {
 namespace internal {
@@ -233,39 +235,7 @@ class ReplacementStringBuilder {
   }
 
 
-  MaybeHandle<String> ToString() {
-    Isolate* isolate = heap_->isolate();
-    if (array_builder_.length() == 0) {
-      return isolate->factory()->empty_string();
-    }
-
-    Handle<String> joined_string;
-    if (is_one_byte_) {
-      Handle<SeqOneByteString> seq;
-      ASSIGN_RETURN_ON_EXCEPTION(
-          isolate, seq,
-          isolate->factory()->NewRawOneByteString(character_count_), String);
-
-      DisallowHeapAllocation no_gc;
-      uint8_t* char_buffer = seq->GetChars();
-      StringBuilderConcatHelper(*subject_, char_buffer, *array_builder_.array(),
-                                array_builder_.length());
-      joined_string = Handle<String>::cast(seq);
-    } else {
-      // Two-byte.
-      Handle<SeqTwoByteString> seq;
-      ASSIGN_RETURN_ON_EXCEPTION(
-          isolate, seq,
-          isolate->factory()->NewRawTwoByteString(character_count_), String);
-
-      DisallowHeapAllocation no_gc;
-      uc16* char_buffer = seq->GetChars();
-      StringBuilderConcatHelper(*subject_, char_buffer, *array_builder_.array(),
-                                array_builder_.length());
-      joined_string = Handle<String>::cast(seq);
-    }
-    return joined_string;
-  }
+  MaybeHandle<String> ToString();
 
 
   void IncrementCharacterCount(int by) {
@@ -290,7 +260,171 @@ class ReplacementStringBuilder {
   int character_count_;
   bool is_one_byte_;
 };
+
+
+class IncrementalStringBuilder {
+ public:
+  explicit IncrementalStringBuilder(Isolate* isolate);
+
+  INLINE(String::Encoding CurrentEncoding()) { return encoding_; }
+
+  template <typename SrcChar, typename DestChar>
+  INLINE(void Append(SrcChar c));
+
+  INLINE(void AppendCharacter(uint8_t c)) {
+    if (encoding_ == String::ONE_BYTE_ENCODING) {
+      Append<uint8_t, uint8_t>(c);
+    } else {
+      Append<uint8_t, uc16>(c);
+    }
+  }
+
+  INLINE(void AppendCString(const char* s)) {
+    const uint8_t* u = reinterpret_cast<const uint8_t*>(s);
+    if (encoding_ == String::ONE_BYTE_ENCODING) {
+      while (*u != '\0') Append<uint8_t, uint8_t>(*(u++));
+    } else {
+      while (*u != '\0') Append<uint8_t, uc16>(*(u++));
+    }
+  }
+
+  INLINE(bool CurrentPartCanFit(int length)) {
+    return part_length_ - current_index_ > length;
+  }
+
+  void AppendString(Handle<String> string);
+
+  MaybeHandle<String> Finish();
+
+  // Change encoding to two-byte.
+  void ChangeEncoding() {
+    DCHECK_EQ(String::ONE_BYTE_ENCODING, encoding_);
+    ShrinkCurrentPart();
+    encoding_ = String::TWO_BYTE_ENCODING;
+    Extend();
+  }
+
+  template <typename DestChar>
+  class NoExtend {
+   public:
+    explicit NoExtend(Handle<String> string, int offset) {
+      DCHECK(string->IsSeqOneByteString() || string->IsSeqTwoByteString());
+      if (sizeof(DestChar) == 1) {
+        start_ = reinterpret_cast<DestChar*>(
+            Handle<SeqOneByteString>::cast(string)->GetChars() + offset);
+      } else {
+        start_ = reinterpret_cast<DestChar*>(
+            Handle<SeqTwoByteString>::cast(string)->GetChars() + offset);
+      }
+      cursor_ = start_;
+    }
+
+    INLINE(void Append(DestChar c)) { *(cursor_++) = c; }
+    INLINE(void AppendCString(const char* s)) {
+      const uint8_t* u = reinterpret_cast<const uint8_t*>(s);
+      while (*u != '\0') Append(*(u++));
+    }
+
+    int written() { return static_cast<int>(cursor_ - start_); }
+
+   private:
+    DestChar* start_;
+    DestChar* cursor_;
+    DisallowHeapAllocation no_gc_;
+  };
+
+  template <typename DestChar>
+  class NoExtendString : public NoExtend<DestChar> {
+   public:
+    NoExtendString(Handle<String> string, int required_length)
+        : NoExtend<DestChar>(string, 0), string_(string) {
+      DCHECK(string->length() >= required_length);
+    }
+
+    ~NoExtendString() {
+      Handle<SeqString> string = Handle<SeqString>::cast(string_);
+      int length = NoExtend<DestChar>::written();
+      *string_.location() = *SeqString::Truncate(string, length);
+    }
+
+   private:
+    Handle<String> string_;
+  };
+
+  template <typename DestChar>
+  class NoExtendBuilder : public NoExtend<DestChar> {
+   public:
+    NoExtendBuilder(IncrementalStringBuilder* builder, int required_length)
+        : NoExtend<DestChar>(builder->current_part(), builder->current_index_),
+          builder_(builder) {
+      DCHECK(builder->CurrentPartCanFit(required_length));
+    }
+
+    ~NoExtendBuilder() {
+      builder_->current_index_ += NoExtend<DestChar>::written();
+    }
+
+   private:
+    IncrementalStringBuilder* builder_;
+  };
+
+ private:
+  Factory* factory() { return isolate_->factory(); }
+
+  INLINE(Handle<String> accumulator()) { return accumulator_; }
+
+  INLINE(void set_accumulator(Handle<String> string)) {
+    *accumulator_.location() = *string;
+  }
+
+  INLINE(Handle<String> current_part()) { return current_part_; }
+
+  INLINE(void set_current_part(Handle<String> string)) {
+    *current_part_.location() = *string;
+  }
+
+  // Add the current part to the accumulator.
+  void Accumulate();
+
+  // Finish the current part and allocate a new part.
+  void Extend();
+
+  // Shrink current part to the right size.
+  void ShrinkCurrentPart() {
+    DCHECK(current_index_ < part_length_);
+    set_current_part(SeqString::Truncate(
+        Handle<SeqString>::cast(current_part()), current_index_));
+  }
+
+  static const int kInitialPartLength = 32;
+  static const int kMaxPartLength = 16 * 1024;
+  static const int kPartLengthGrowthFactor = 2;
+
+  Isolate* isolate_;
+  String::Encoding encoding_;
+  bool overflowed_;
+  int part_length_;
+  int current_index_;
+  Handle<String> accumulator_;
+  Handle<String> current_part_;
+};
+
+
+template <typename SrcChar, typename DestChar>
+void IncrementalStringBuilder::Append(SrcChar c) {
+  DCHECK_EQ(encoding_ == String::ONE_BYTE_ENCODING, sizeof(DestChar) == 1);
+  if (sizeof(DestChar) == 1) {
+    DCHECK_EQ(String::ONE_BYTE_ENCODING, encoding_);
+    SeqOneByteString::cast(*current_part_)
+        ->SeqOneByteStringSet(current_index_++, c);
+  } else {
+    DCHECK_EQ(String::TWO_BYTE_ENCODING, encoding_);
+    SeqTwoByteString::cast(*current_part_)
+        ->SeqTwoByteStringSet(current_index_++, c);
+  }
+  if (current_index_ == part_length_) Extend();
+}
 }
 }  // namespace v8::internal
 
-#endif  // V8_RUNTIME_STRING_BUILDER_H_
+#endif  // V8_STRING_BUILDER_H_
index dcaddaf..f5eef37 100644 (file)
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-'use strict';
+"use strict";
 
 
 // This file relies on the fact that the following declaration has been made
index 2101460..21f66a6 100644 (file)
@@ -351,8 +351,13 @@ void StringStream::PrintUsingMap(JSObject* js_object) {
         }
         Add(": ");
         FieldIndex index = FieldIndex::ForDescriptor(map, i);
-        Object* value = js_object->RawFastPropertyAt(index);
-        Add("%o\n", value);
+        if (js_object->IsUnboxedDoubleField(index)) {
+          double value = js_object->RawFastDoublePropertyAt(index);
+          Add("<unboxed double> %.16g\n", FmtElm(value));
+        } else {
+          Object* value = js_object->RawFastPropertyAt(index);
+          Add("%o\n", value);
+        }
       }
     }
   }
index fca1d4b..3b820cd 100644 (file)
@@ -27,8 +27,8 @@ class StringAllocator {
 class HeapStringAllocator FINAL : public StringAllocator {
  public:
   ~HeapStringAllocator() { DeleteArray(space_); }
-  virtual char* allocate(unsigned bytes) OVERRIDE;
-  virtual char* grow(unsigned* bytes) OVERRIDE;
+  char* allocate(unsigned bytes) OVERRIDE;
+  char* grow(unsigned* bytes) OVERRIDE;
 
  private:
   char* space_;
index b4ae708..d9cf792 100644 (file)
@@ -8,6 +8,14 @@
 // in runtime.js:
 // var $Array = global.Array;
 
+// And requires following symbols to be set in the bootstrapper during genesis:
+// - symbolHasInstance
+// - symbolIsConcatSpreadable
+// - symbolIsRegExp
+// - symbolIterator
+// - symbolToStringTag
+// - symbolUnscopables
+
 var $Symbol = global.Symbol;
 
 // -------------------------------------------------------------------
@@ -40,15 +48,6 @@ function SymbolValueOf() {
 }
 
 
-function InternalSymbol(key) {
-  var internal_registry = %SymbolRegistry().for_intern;
-  if (IS_UNDEFINED(internal_registry[key])) {
-    internal_registry[key] = %CreateSymbol(key);
-  }
-  return internal_registry[key];
-}
-
-
 function SymbolFor(key) {
   key = TO_STRING_INLINE(key);
   var registry = %SymbolRegistry();
@@ -76,17 +75,6 @@ function ObjectGetOwnPropertySymbols(obj) {
   return ObjectGetOwnPropertyKeys(obj, PROPERTY_ATTRIBUTES_STRING);
 }
 
-
-//-------------------------------------------------------------------
-
-var symbolHasInstance = InternalSymbol("Symbol.hasInstance");
-var symbolIsConcatSpreadable = InternalSymbol("Symbol.isConcatSpreadable");
-var symbolIsRegExp = InternalSymbol("Symbol.isRegExp");
-var symbolIterator = InternalSymbol("Symbol.iterator");
-var symbolToStringTag = InternalSymbol("Symbol.toStringTag");
-var symbolUnscopables = InternalSymbol("Symbol.unscopables");
-
-
 //-------------------------------------------------------------------
 
 function SetUpSymbol() {
index 050bd2a..cc5dbc2 100644 (file)
@@ -27,59 +27,72 @@ inline double scalbn(double x, int y) { return _scalb(x, y); }
 #endif  // _MSC_VER
 
 const double MathConstants::constants[] = {
-    6.36619772367581382433e-01,   // invpio2   0
-    1.57079632673412561417e+00,   // pio2_1    1
-    6.07710050650619224932e-11,   // pio2_1t   2
-    6.07710050630396597660e-11,   // pio2_2    3
-    2.02226624879595063154e-21,   // pio2_2t   4
-    2.02226624871116645580e-21,   // pio2_3    5
-    8.47842766036889956997e-32,   // pio2_3t   6
-    -1.66666666666666324348e-01,  // S1        7  coefficients for sin
-    8.33333333332248946124e-03,   //           8
-    -1.98412698298579493134e-04,  //           9
-    2.75573137070700676789e-06,   //          10
-    -2.50507602534068634195e-08,  //          11
-    1.58969099521155010221e-10,   // S6       12
-    4.16666666666666019037e-02,   // C1       13  coefficients for cos
-    -1.38888888888741095749e-03,  //          14
-    2.48015872894767294178e-05,   //          15
-    -2.75573143513906633035e-07,  //          16
-    2.08757232129817482790e-09,   //          17
-    -1.13596475577881948265e-11,  // C6       18
-    3.33333333333334091986e-01,   // T0       19  coefficients for tan
-    1.33333333333201242699e-01,   //          20
-    5.39682539762260521377e-02,   //          21
-    2.18694882948595424599e-02,   //          22
-    8.86323982359930005737e-03,   //          23
-    3.59207910759131235356e-03,   //          24
-    1.45620945432529025516e-03,   //          25
-    5.88041240820264096874e-04,   //          26
-    2.46463134818469906812e-04,   //          27
-    7.81794442939557092300e-05,   //          28
-    7.14072491382608190305e-05,   //          29
-    -1.85586374855275456654e-05,  //          30
-    2.59073051863633712884e-05,   // T12      31
-    7.85398163397448278999e-01,   // pio4     32
-    3.06161699786838301793e-17,   // pio4lo   33
-    6.93147180369123816490e-01,   // ln2_hi   34
-    1.90821492927058770002e-10,   // ln2_lo   35
-    1.80143985094819840000e+16,   // 2^54     36
-    6.666666666666666666e-01,     // 2/3      37
-    6.666666666666735130e-01,     // LP1      38  coefficients for log1p
-    3.999999999940941908e-01,     //          39
-    2.857142874366239149e-01,     //          40
-    2.222219843214978396e-01,     //          41
-    1.818357216161805012e-01,     //          42
-    1.531383769920937332e-01,     //          43
-    1.479819860511658591e-01,     // LP7      44
-    7.09782712893383973096e+02,   //          45  overflow threshold for expm1
-    1.44269504088896338700e+00,   // 1/ln2    46
-    -3.33333333333331316428e-02,  // Q1       47  coefficients for expm1
-    1.58730158725481460165e-03,   //          48
-    -7.93650757867487942473e-05,  //          49
-    4.00821782732936239552e-06,   //          50
-    -2.01099218183624371326e-07,  // Q5       51
-    710.4758600739439             //          52  overflow threshold sinh, cosh
+    6.36619772367581382433e-01,   // invpio2    0
+    1.57079632673412561417e+00,   // pio2_1     1
+    6.07710050650619224932e-11,   // pio2_1t    2
+    6.07710050630396597660e-11,   // pio2_2     3
+    2.02226624879595063154e-21,   // pio2_2t    4
+    2.02226624871116645580e-21,   // pio2_3     5
+    8.47842766036889956997e-32,   // pio2_3t    6
+    -1.66666666666666324348e-01,  // S1         7  coefficients for sin
+    8.33333333332248946124e-03,   //            8
+    -1.98412698298579493134e-04,  //            9
+    2.75573137070700676789e-06,   //           10
+    -2.50507602534068634195e-08,  //           11
+    1.58969099521155010221e-10,   // S6        12
+    4.16666666666666019037e-02,   // C1        13  coefficients for cos
+    -1.38888888888741095749e-03,  //           14
+    2.48015872894767294178e-05,   //           15
+    -2.75573143513906633035e-07,  //           16
+    2.08757232129817482790e-09,   //           17
+    -1.13596475577881948265e-11,  // C6        18
+    3.33333333333334091986e-01,   // T0        19  coefficients for tan
+    1.33333333333201242699e-01,   //           20
+    5.39682539762260521377e-02,   //           21
+    2.18694882948595424599e-02,   //           22
+    8.86323982359930005737e-03,   //           23
+    3.59207910759131235356e-03,   //           24
+    1.45620945432529025516e-03,   //           25
+    5.88041240820264096874e-04,   //           26
+    2.46463134818469906812e-04,   //           27
+    7.81794442939557092300e-05,   //           28
+    7.14072491382608190305e-05,   //           29
+    -1.85586374855275456654e-05,  //           30
+    2.59073051863633712884e-05,   // T12       31
+    7.85398163397448278999e-01,   // pio4      32
+    3.06161699786838301793e-17,   // pio4lo    33
+    6.93147180369123816490e-01,   // ln2_hi    34
+    1.90821492927058770002e-10,   // ln2_lo    35
+    6.666666666666666666e-01,     // 2/3       36
+    6.666666666666735130e-01,     // LP1       37  coefficients for log1p
+    3.999999999940941908e-01,     //           38
+    2.857142874366239149e-01,     //           39
+    2.222219843214978396e-01,     //           40
+    1.818357216161805012e-01,     //           41
+    1.531383769920937332e-01,     //           42
+    1.479819860511658591e-01,     // LP7       43
+    7.09782712893383973096e+02,   //           44  overflow threshold for expm1
+    1.44269504088896338700e+00,   // 1/ln2     45
+    -3.33333333333331316428e-02,  // Q1        46  coefficients for expm1
+    1.58730158725481460165e-03,   //           47
+    -7.93650757867487942473e-05,  //           48
+    4.00821782732936239552e-06,   //           49
+    -2.01099218183624371326e-07,  // Q5        50
+    710.4758600739439,            //           51  overflow threshold sinh, cosh
+    4.34294481903251816668e-01,   // ivln10    52  coefficients for log10
+    3.01029995663611771306e-01,   // log10_2hi 53
+    3.69423907715893078616e-13,   // log10_2lo 54
+    5.99999999999994648725e-01,   // L1        55  coefficients for log2
+    4.28571428578550184252e-01,   //           56
+    3.33333329818377432918e-01,   //           57
+    2.72728123808534006489e-01,   //           58
+    2.30660745775561754067e-01,   //           59
+    2.06975017800338417784e-01,   // L6        60
+    9.61796693925975554329e-01,   // cp        61  2/(3*ln(2))
+    9.61796700954437255859e-01,   // cp_h      62
+    -7.02846165095275826516e-09,  // cp_l      63
+    5.84962487220764160156e-01,   // dp_h      64
+    1.35003920212974897128e-08    // dp_l      65
 };
 
 
index cadf85b..c7bc09a 100644 (file)
@@ -23,7 +23,7 @@ int rempio2(double x, double* y);
 
 // Constants to be exposed to builtins via Float64Array.
 struct MathConstants {
-  static const double constants[53];
+  static const double constants[66];
 };
 }
 }  // namespace v8::internal
index b52f1de..ceeacc5 100644 (file)
@@ -20,6 +20,9 @@
 // and exposed through kMath as typed array. We assume the compiler to convert
 // from decimal to binary accurately enough to produce the intended values.
 // kMath is initialized to a Float64Array during genesis and not writable.
+
+"use strict";
+
 var kMath;
 
 const INVPIO2 = kMath[0];
@@ -424,11 +427,12 @@ function MathTan(x) {
 //
 const LN2_HI    = kMath[34];
 const LN2_LO    = kMath[35];
-const TWO54     = kMath[36];
-const TWO_THIRD = kMath[37];
+const TWO_THIRD = kMath[36];
 macro KLOG1P(x)
-(kMath[38+x])
+(kMath[37+x])
 endmacro
+// 2^54
+const TWO54 = 18014398509481984;
 
 function MathLog1p(x) {
   x = x * 1;  // Convert to number.
@@ -607,10 +611,10 @@ function MathLog1p(x) {
 //      For IEEE double 
 //          if x > 7.09782712893383973096e+02 then expm1(x) overflow
 //
-const KEXPM1_OVERFLOW = kMath[45];
-const INVLN2          = kMath[46];
+const KEXPM1_OVERFLOW = kMath[44];
+const INVLN2          = kMath[45];
 macro KEXPM1(x)
-(kMath[47+x])
+(kMath[46+x])
 endmacro
 
 function MathExpm1(x) {
@@ -730,7 +734,7 @@ function MathExpm1(x) {
 //      sinh(x) is |x| if x is +Infinity, -Infinity, or NaN.
 //      only sinh(0)=0 is exact for finite x.
 //
-const KSINH_OVERFLOW = kMath[52];
+const KSINH_OVERFLOW = kMath[51];
 const TWO_M28 = 3.725290298461914e-9;  // 2^-28, empty lower half
 const LOG_MAXD = 709.7822265625;  // 0x40862e42 00000000, empty lower half
 
@@ -782,7 +786,7 @@ function MathSinh(x) {
 //      cosh(x) is |x| if x is +INF, -INF, or NaN.
 //      only cosh(0)=1 is exact for finite x.
 //
-const KCOSH_OVERFLOW = kMath[52];
+const KCOSH_OVERFLOW = kMath[51];
 
 function MathCosh(x) {
   x = x * 1;  // Convert to number.
@@ -812,3 +816,181 @@ function MathCosh(x) {
   // |x| > overflowthreshold.
   return INFINITY;
 }
+
+// ES6 draft 09-27-13, section 20.2.2.21.
+// Return the base 10 logarithm of x
+//
+// Method :
+//      Let log10_2hi = leading 40 bits of log10(2) and
+//          log10_2lo = log10(2) - log10_2hi,
+//          ivln10   = 1/log(10) rounded.
+//      Then
+//              n = ilogb(x),
+//              if(n<0)  n = n+1;
+//              x = scalbn(x,-n);
+//              log10(x) := n*log10_2hi + (n*log10_2lo + ivln10*log(x))
+//
+// Note 1:
+//      To guarantee log10(10**n)=n, where 10**n is normal, the rounding
+//      mode must set to Round-to-Nearest.
+// Note 2:
+//      [1/log(10)] rounded to 53 bits has error .198 ulps;
+//      log10 is monotonic at all binary break points.
+//
+// Special cases:
+//      log10(x) is NaN if x < 0;
+//      log10(+INF) is +INF; log10(0) is -INF;
+//      log10(NaN) is that NaN;
+//      log10(10**N) = N  for N=0,1,...,22.
+//
+
+const IVLN10 = kMath[52];
+const LOG10_2HI = kMath[53];
+const LOG10_2LO = kMath[54];
+
+function MathLog10(x) {
+  x = x * 1;  // Convert to number.
+  var hx = %_DoubleHi(x);
+  var lx = %_DoubleLo(x);
+  var k = 0;
+
+  if (hx < 0x00100000) {
+    // x < 2^-1022
+    // log10(+/- 0) = -Infinity.
+    if (((hx & 0x7fffffff) | lx) === 0) return -INFINITY;
+    // log10 of negative number is NaN.
+    if (hx < 0) return NAN;
+    // Subnormal number. Scale up x.
+    k -= 54;
+    x *= TWO54;
+    hx = %_DoubleHi(x);
+    lx = %_DoubleLo(x);
+  }
+
+  // Infinity or NaN.
+  if (hx >= 0x7ff00000) return x;
+
+  k += (hx >> 20) - 1023;
+  i = (k & 0x80000000) >> 31;
+  hx = (hx & 0x000fffff) | ((0x3ff - i) << 20);
+  y = k + i;
+  x = %_ConstructDouble(hx, lx);
+
+  z = y * LOG10_2LO + IVLN10 * MathLog(x);
+  return z + y * LOG10_2HI;
+}
+
+
+// ES6 draft 09-27-13, section 20.2.2.22.
+// Return the base 2 logarithm of x
+//
+// fdlibm does not have an explicit log2 function, but fdlibm's pow
+// function does implement an accurate log2 function as part of the
+// pow implementation.  This extracts the core parts of that as a
+// separate log2 function.
+
+// Method:
+// Compute log2(x) in two pieces:
+// log2(x) = w1 + w2
+// where w1 has 53-24 = 29 bits of trailing zeroes.
+
+const DP_H = kMath[64];
+const DP_L = kMath[65];
+
+// Polynomial coefficients for (3/2)*(log2(x) - 2*s - 2/3*s^3)
+macro KLOG2(x)
+(kMath[55+x])
+endmacro
+
+// cp = 2/(3*ln(2)). Note that cp_h + cp_l is cp, but with more accuracy.
+const CP = kMath[61];
+const CP_H = kMath[62];
+const CP_L = kMath[63];
+// 2^53
+const TWO53 = 9007199254740992; 
+
+function MathLog2(x) {
+  x = x * 1;  // Convert to number.
+  var ax = MathAbs(x);
+  var hx = %_DoubleHi(x);
+  var lx = %_DoubleLo(x);
+  var ix = hx & 0x7fffffff;
+
+  // Handle special cases.
+  // log2(+/- 0) = -Infinity
+  if ((ix | lx) == 0) return -INFINITY;
+
+  // log(x) = NaN, if x < 0
+  if (hx < 0) return NAN;
+
+  // log2(Infinity) = Infinity, log2(NaN) = NaN
+  if (ix >= 0x7ff00000) return x;
+
+  var n = 0;
+
+  // Take care of subnormal number.
+  if (ix < 0x00100000) {
+    ax *= TWO53;
+    n -= 53;
+    ix = %_DoubleHi(ax);
+  }
+
+  n += (ix >> 20) - 0x3ff;
+  var j = ix & 0x000fffff;
+
+  // Determine interval.
+  ix = j | 0x3ff00000;  // normalize ix.
+
+  var bp = 1;
+  var dp_h = 0;
+  var dp_l = 0;
+  if (j > 0x3988e) {  // |x| > sqrt(3/2)
+    if (j < 0xbb67a) {  // |x| < sqrt(3)
+      bp = 1.5;
+      dp_h = DP_H;
+      dp_l = DP_L;
+    } else {
+      n += 1;
+      ix -= 0x00100000;
+    }
+  }
+  ax = %_ConstructDouble(ix, %_DoubleLo(ax));
+
+  // Compute ss = s_h + s_l = (x - 1)/(x+1) or (x - 1.5)/(x + 1.5)
+  var u = ax - bp;
+  var v = 1 / (ax + bp);
+  var ss = u * v;
+  var s_h = %_ConstructDouble(%_DoubleHi(ss), 0);
+
+  // t_h = ax + bp[k] High
+  var t_h = %_ConstructDouble(%_DoubleHi(ax + bp), 0)
+  var t_l = ax - (t_h - bp);
+  var s_l = v * ((u - s_h * t_h) - s_h * t_l);
+
+  // Compute log2(ax)
+  var s2 = ss * ss;
+  var r = s2 * s2 * (KLOG2(0) + s2 * (KLOG2(1) + s2 * (KLOG2(2) + s2 * (
+                     KLOG2(3) + s2 * (KLOG2(4) + s2 * KLOG2(5))))));
+  r += s_l * (s_h + ss);
+  s2  = s_h * s_h;
+  t_h = %_ConstructDouble(%_DoubleHi(3.0 + s2 + r), 0);
+  t_l = r - ((t_h - 3.0) - s2);
+  // u + v = ss * (1 + ...)
+  u = s_h * t_h;
+  v = s_l * t_h + t_l * ss;
+
+  // 2 / (3 * log(2)) * (ss + ...)
+  p_h = %_ConstructDouble(%_DoubleHi(u + v), 0);
+  p_l = v - (p_h - u);
+  z_h = CP_H * p_h;
+  z_l = CP_L * p_h + p_l * CP + dp_l;
+
+  // log2(ax) = (ss + ...) * 2 / (3 * log(2)) = n + dp_h + z_h + z_l
+  var t = n;
+  var t1 = %_ConstructDouble(%_DoubleHi(((z_h + z_l) + dp_h) + t), 0);
+  var t2 = z_l - (((t1 - t) - dp_h) - z_h);
+
+  // t1 + t2 = log2(ax), sum up because we do not care about extra precision.
+  return t1 + t2;
+}
index bab925a..de8ede9 100644 (file)
@@ -163,7 +163,11 @@ namespace internal {
   T(ILLEGAL, "ILLEGAL", 0)                                           \
                                                                      \
   /* Scanner-internal use only. */                                   \
-  T(WHITESPACE, NULL, 0)
+  T(WHITESPACE, NULL, 0)                                             \
+                                                                     \
+  /* ES6 Template Literals */                                        \
+  T(TEMPLATE_SPAN, NULL, 0)                                          \
+  T(TEMPLATE_TAIL, NULL, 0)
 
 
 class Token {
index 8a6d1bb..fd8eb8b 100644 (file)
@@ -157,23 +157,22 @@ int TransitionArray::SearchName(Name* name, int* out_insertion_index) {
 bool TransitionArray::IsSpecialTransition(Name* name) {
   if (!name->IsSymbol()) return false;
   Heap* heap = name->GetHeap();
-  return name == heap->frozen_symbol() ||
+  return name == heap->nonextensible_symbol() ||
+         name == heap->sealed_symbol() || name == heap->frozen_symbol() ||
          name == heap->elements_transition_symbol() ||
          name == heap->observed_symbol();
 }
 #endif
 
 
-int TransitionArray::CompareKeys(Name* key1, uint32_t hash1,
-                                 bool is_data_property1,
+int TransitionArray::CompareKeys(Name* key1, uint32_t hash1, PropertyKind kind1,
                                  PropertyAttributes attributes1, Name* key2,
-                                 uint32_t hash2, bool is_data_property2,
+                                 uint32_t hash2, PropertyKind kind2,
                                  PropertyAttributes attributes2) {
   int cmp = CompareNames(key1, hash1, key2, hash2);
   if (cmp != 0) return cmp;
 
-  return CompareDetails(is_data_property1, attributes1, is_data_property2,
-                        attributes2);
+  return CompareDetails(kind1, attributes1, kind2, attributes2);
 }
 
 
@@ -188,15 +187,12 @@ int TransitionArray::CompareNames(Name* key1, uint32_t hash1, Name* key2,
 }
 
 
-int TransitionArray::CompareDetails(bool is_data_property1,
+int TransitionArray::CompareDetails(PropertyKind kind1,
                                     PropertyAttributes attributes1,
-                                    bool is_data_property2,
+                                    PropertyKind kind2,
                                     PropertyAttributes attributes2) {
-  if (is_data_property1 != is_data_property2) {
-    return static_cast<int>(is_data_property1) <
-                   static_cast<int>(is_data_property2)
-               ? -1
-               : 1;
+  if (kind1 != kind2) {
+    return static_cast<int>(kind1) < static_cast<int>(kind2) ? -1 : 1;
   }
 
   if (attributes1 != attributes2) {
index 51eee6f..c8c63d7 100644 (file)
@@ -97,7 +97,7 @@ Handle<TransitionArray> TransitionArray::Insert(Handle<Map> map,
   bool is_special_transition = flag == SPECIAL_TRANSITION;
   DCHECK_EQ(is_special_transition, IsSpecialTransition(*name));
   PropertyDetails details = is_special_transition
-                                ? PropertyDetails(NONE, NORMAL, 0)
+                                ? PropertyDetails(NONE, FIELD, 0)
                                 : GetTargetDetails(*name, *target);
 
   int insertion_index = kNotFound;
@@ -105,7 +105,7 @@ Handle<TransitionArray> TransitionArray::Insert(Handle<Map> map,
       is_special_transition
           ? map->transitions()->SearchSpecial(Symbol::cast(*name),
                                               &insertion_index)
-          : map->transitions()->Search(details.type(), *name,
+          : map->transitions()->Search(details.kind(), *name,
                                        details.attributes(), &insertion_index);
   if (index == kNotFound) {
     ++new_nof;
@@ -157,7 +157,7 @@ Handle<TransitionArray> TransitionArray::Insert(Handle<Map> map,
     index = is_special_transition ? map->transitions()->SearchSpecial(
                                         Symbol::cast(*name), &insertion_index)
                                   : map->transitions()->Search(
-                                        details.type(), *name,
+                                        details.kind(), *name,
                                         details.attributes(), &insertion_index);
     if (index == kNotFound) {
       ++new_nof;
@@ -189,22 +189,18 @@ Handle<TransitionArray> TransitionArray::Insert(Handle<Map> map,
 }
 
 
-int TransitionArray::SearchDetails(int transition, PropertyType type,
+int TransitionArray::SearchDetails(int transition, PropertyKind kind,
                                    PropertyAttributes attributes,
                                    int* out_insertion_index) {
   int nof_transitions = number_of_transitions();
   DCHECK(transition < nof_transitions);
   Name* key = GetKey(transition);
-  bool is_data = type == FIELD || type == CONSTANT;
   for (; transition < nof_transitions && GetKey(transition) == key;
        transition++) {
     Map* target = GetTarget(transition);
     PropertyDetails target_details = GetTargetDetails(key, target);
 
-    bool target_is_data =
-        target_details.type() == FIELD || target_details.type() == CONSTANT;
-
-    int cmp = CompareDetails(is_data, attributes, target_is_data,
+    int cmp = CompareDetails(kind, attributes, target_details.kind(),
                              target_details.attributes());
     if (cmp == 0) {
       return transition;
@@ -217,13 +213,13 @@ int TransitionArray::SearchDetails(int transition, PropertyType type,
 }
 
 
-int TransitionArray::Search(PropertyType type, Name* name,
+int TransitionArray::Search(PropertyKind kind, Name* name,
                             PropertyAttributes attributes,
                             int* out_insertion_index) {
   int transition = SearchName(name, out_insertion_index);
   if (transition == kNotFound) {
     return kNotFound;
   }
-  return SearchDetails(transition, type, attributes, out_insertion_index);
+  return SearchDetails(transition, kind, attributes, out_insertion_index);
 }
 } }  // namespace v8::internal
index ef74b4d..999ad86 100644 (file)
@@ -98,8 +98,8 @@ class TransitionArray: public FixedArray {
   static Handle<TransitionArray> Insert(Handle<Map> map, Handle<Name> name,
                                         Handle<Map> target,
                                         SimpleTransitionFlag flag);
-  // Search a  transition for a given type, property name and attributes.
-  int Search(PropertyType type, Name* name, PropertyAttributes attributes,
+  // Search a  transition for a given kind, property name and attributes.
+  int Search(PropertyKind kind, Name* name, PropertyAttributes attributes,
              int* out_insertion_index = NULL);
 
   // Search a non-property transition (like elements kind, observe or frozen
@@ -163,7 +163,7 @@ class TransitionArray: public FixedArray {
   static const int kTransitionTarget = 1;
   static const int kTransitionSize = 2;
 
-#ifdef OBJECT_PRINT
+#if defined(DEBUG) || defined(OBJECT_PRINT)
   // For our gdb macros, we should perhaps change these in the future.
   void Print();
 
@@ -216,15 +216,14 @@ class TransitionArray: public FixedArray {
 
   // Search a first transition for a given property name.
   inline int SearchName(Name* name, int* out_insertion_index = NULL);
-  int SearchDetails(int transition, PropertyType type,
+  int SearchDetails(int transition, PropertyKind kind,
                     PropertyAttributes attributes, int* out_insertion_index);
 
-  // Compares two tuples <key, is_data_property, attributes>, returns -1 if
+  // Compares two tuples <key, kind, attributes>, returns -1 if
   // tuple1 is "less" than tuple2, 0 if tuple1 equal to tuple2 and 1 otherwise.
-  static inline int CompareKeys(Name* key1, uint32_t hash1,
-                                bool is_data_property1,
+  static inline int CompareKeys(Name* key1, uint32_t hash1, PropertyKind kind1,
                                 PropertyAttributes attributes1, Name* key2,
-                                uint32_t hash2, bool is_data_property2,
+                                uint32_t hash2, PropertyKind kind2,
                                 PropertyAttributes attributes2);
 
   // Compares keys, returns -1 if key1 is "less" than key2,
@@ -234,9 +233,9 @@ class TransitionArray: public FixedArray {
 
   // Compares two details, returns -1 if details1 is "less" than details2,
   // 0 if details1 equal to details2 and 1 otherwise.
-  static inline int CompareDetails(bool is_data_property1,
+  static inline int CompareDetails(PropertyKind kind1,
                                    PropertyAttributes attributes1,
-                                   bool is_data_property2,
+                                   PropertyKind kind2,
                                    PropertyAttributes attributes2);
 
   inline void NoIncrementalWriteBarrierSet(int transition_number,
index 676290c..c51d987 100644 (file)
@@ -76,12 +76,14 @@ void TypeFeedbackVector::SetKind(FeedbackVectorICSlot slot, Code::Kind kind) {
 
 
 // static
-Handle<TypeFeedbackVector> TypeFeedbackVector::Allocate(Isolate* isolate,
-                                                        int slot_count,
-                                                        int ic_slot_count) {
-  int index_count =
+Handle<TypeFeedbackVector> TypeFeedbackVector::Allocate(
+    Isolate* isolate, const FeedbackVectorSpec& spec) {
+  const int slot_count = spec.slots();
+  const int ic_slot_count = spec.ic_slots();
+  const int index_count =
       FLAG_vector_ics ? VectorICComputer::word_count(ic_slot_count) : 0;
-  int length = slot_count + ic_slot_count + index_count + kReservedIndexCount;
+  const int length =
+      slot_count + ic_slot_count + index_count + kReservedIndexCount;
   if (length == kReservedIndexCount) {
     return Handle<TypeFeedbackVector>::cast(
         isolate->factory()->empty_fixed_array());
@@ -107,7 +109,14 @@ Handle<TypeFeedbackVector> TypeFeedbackVector::Allocate(Isolate* isolate,
   for (int i = kReservedIndexCount + index_count; i < length; i++) {
     array->set(i, *uninitialized_sentinel, SKIP_WRITE_BARRIER);
   }
-  return Handle<TypeFeedbackVector>::cast(array);
+
+  Handle<TypeFeedbackVector> vector = Handle<TypeFeedbackVector>::cast(array);
+  if (FLAG_vector_ics) {
+    for (int i = 0; i < ic_slot_count; i++) {
+      vector->SetKind(FeedbackVectorICSlot(i), spec.GetKind(i));
+    }
+  }
+  return vector;
 }
 
 
@@ -121,6 +130,27 @@ Handle<TypeFeedbackVector> TypeFeedbackVector::Copy(
 }
 
 
+// This logic is copied from
+// StaticMarkingVisitor<StaticVisitor>::VisitCodeTarget.
+// TODO(mvstanton): with weak handling of all vector ics, this logic should
+// actually be completely eliminated and we no longer need to clear the
+// vector ICs.
+static bool ClearLogic(Heap* heap, int ic_age, Code::Kind kind,
+                       InlineCacheState state) {
+  if (FLAG_cleanup_code_caches_at_gc &&
+      (kind == Code::CALL_IC || heap->flush_monomorphic_ics() ||
+       // TODO(mvstanton): is this ic_age granular enough? it comes from
+       // the SharedFunctionInfo which may change on a different schedule
+       // than ic targets.
+       // ic_age != heap->global_ic_age() ||
+       // is_invalidated_weak_stub ||
+       heap->isolate()->serializer_enabled())) {
+    return true;
+  }
+  return false;
+}
+
+
 void TypeFeedbackVector::ClearSlots(SharedFunctionInfo* shared) {
   int slots = Slots();
   Isolate* isolate = GetIsolate();
@@ -145,19 +175,33 @@ void TypeFeedbackVector::ClearSlots(SharedFunctionInfo* shared) {
   slots = ICSlots();
   if (slots == 0) return;
 
-  // Now clear vector-based ICs. They are all CallICs.
+  // Now clear vector-based ICs.
   // Try and pass the containing code (the "host").
+  Heap* heap = isolate->heap();
   Code* host = shared->code();
+  // I'm not sure yet if this ic age is the correct one.
+  int ic_age = shared->ic_age();
   for (int i = 0; i < slots; i++) {
     FeedbackVectorICSlot slot(i);
     Object* obj = Get(slot);
     if (obj != uninitialized_sentinel) {
-      // TODO(mvstanton): To make this code work with --vector-ics,
-      // additional Nexus types must be created.
-      DCHECK(!FLAG_vector_ics);
-      DCHECK(GetKind(slot) == Code::CALL_IC);
-      CallICNexus nexus(this, slot);
-      ICUtility::Clear(isolate, Code::CALL_IC, host, &nexus);
+      Code::Kind kind = GetKind(slot);
+      if (kind == Code::CALL_IC) {
+        CallICNexus nexus(this, slot);
+        if (ClearLogic(heap, ic_age, kind, nexus.StateFromFeedback())) {
+          nexus.Clear(host);
+        }
+      } else if (kind == Code::LOAD_IC) {
+        LoadICNexus nexus(this, slot);
+        if (ClearLogic(heap, ic_age, kind, nexus.StateFromFeedback())) {
+          nexus.Clear(host);
+        }
+      } else if (kind == Code::KEYED_LOAD_IC) {
+        KeyedLoadICNexus nexus(this, slot);
+        if (ClearLogic(heap, ic_age, kind, nexus.StateFromFeedback())) {
+          nexus.Clear(host);
+        }
+      }
     }
   }
 }
@@ -179,34 +223,80 @@ Handle<FixedArray> FeedbackNexus::EnsureArrayOfSize(int length) {
 void FeedbackNexus::InstallHandlers(int start_index, TypeHandleList* types,
                                     CodeHandleList* handlers) {
   Isolate* isolate = GetIsolate();
-  FixedArray* array = FixedArray::cast(GetFeedback());
+  Handle<FixedArray> array = handle(FixedArray::cast(GetFeedback()), isolate);
   int receiver_count = types->length();
   for (int current = 0; current < receiver_count; ++current) {
     Handle<HeapType> type = types->at(current);
     Handle<Map> map = IC::TypeToMap(*type, isolate);
-    array->set(start_index + (current * 2), *map);
+    Handle<WeakCell> cell = Map::WeakCellForMap(map);
+    array->set(start_index + (current * 2), *cell);
     array->set(start_index + (current * 2 + 1), *handlers->at(current));
   }
 }
 
 
+InlineCacheState LoadICNexus::StateFromFeedback() const {
+  Isolate* isolate = GetIsolate();
+  Object* feedback = GetFeedback();
+  if (feedback == *vector()->UninitializedSentinel(isolate)) {
+    return UNINITIALIZED;
+  } else if (feedback == *vector()->MegamorphicSentinel(isolate)) {
+    return MEGAMORPHIC;
+  } else if (feedback == *vector()->PremonomorphicSentinel(isolate)) {
+    return PREMONOMORPHIC;
+  } else if (feedback->IsFixedArray()) {
+    // Determine state purely by our structure, don't check if the maps are
+    // cleared.
+    FixedArray* array = FixedArray::cast(feedback);
+    int length = array->length();
+    DCHECK(length >= 2);
+    return length == 2 ? MONOMORPHIC : POLYMORPHIC;
+  }
+
+  return UNINITIALIZED;
+}
+
+
+InlineCacheState KeyedLoadICNexus::StateFromFeedback() const {
+  Isolate* isolate = GetIsolate();
+  Object* feedback = GetFeedback();
+  if (feedback == *vector()->UninitializedSentinel(isolate)) {
+    return UNINITIALIZED;
+  } else if (feedback == *vector()->PremonomorphicSentinel(isolate)) {
+    return PREMONOMORPHIC;
+  } else if (feedback == *vector()->GenericSentinel(isolate)) {
+    return GENERIC;
+  } else if (feedback->IsFixedArray()) {
+    // Determine state purely by our structure, don't check if the maps are
+    // cleared.
+    FixedArray* array = FixedArray::cast(feedback);
+    int length = array->length();
+    DCHECK(length >= 3);
+    return length == 3 ? MONOMORPHIC : POLYMORPHIC;
+  }
+
+  return UNINITIALIZED;
+}
+
+
 InlineCacheState CallICNexus::StateFromFeedback() const {
   Isolate* isolate = GetIsolate();
-  InlineCacheState state = UNINITIALIZED;
   Object* feedback = GetFeedback();
 
   if (feedback == *vector()->MegamorphicSentinel(isolate)) {
-    state = GENERIC;
+    return GENERIC;
   } else if (feedback->IsAllocationSite() || feedback->IsJSFunction()) {
-    state = MONOMORPHIC;
-  } else {
-    CHECK(feedback == *vector()->UninitializedSentinel(isolate));
+    return MONOMORPHIC;
   }
 
-  return state;
+  CHECK(feedback == *vector()->UninitializedSentinel(isolate));
+  return UNINITIALIZED;
 }
 
 
+void CallICNexus::Clear(Code* host) { CallIC::Clear(GetIsolate(), host, this); }
+
+
 void CallICNexus::ConfigureGeneric() {
   SetFeedback(*vector()->MegamorphicSentinel(GetIsolate()), SKIP_WRITE_BARRIER);
 }
@@ -233,19 +323,94 @@ void CallICNexus::ConfigureMonomorphic(Handle<JSFunction> function) {
 }
 
 
+void KeyedLoadICNexus::ConfigureGeneric() {
+  SetFeedback(*vector()->GenericSentinel(GetIsolate()), SKIP_WRITE_BARRIER);
+}
+
+
+void LoadICNexus::ConfigureMegamorphic() {
+  SetFeedback(*vector()->MegamorphicSentinel(GetIsolate()), SKIP_WRITE_BARRIER);
+}
+
+
+void LoadICNexus::ConfigurePremonomorphic() {
+  SetFeedback(*vector()->PremonomorphicSentinel(GetIsolate()),
+              SKIP_WRITE_BARRIER);
+}
+
+
+void KeyedLoadICNexus::ConfigurePremonomorphic() {
+  SetFeedback(*vector()->PremonomorphicSentinel(GetIsolate()),
+              SKIP_WRITE_BARRIER);
+}
+
+
+void LoadICNexus::ConfigureMonomorphic(Handle<HeapType> type,
+                                       Handle<Code> handler) {
+  Handle<FixedArray> array = EnsureArrayOfSize(2);
+  Handle<Map> receiver_map = IC::TypeToMap(*type, GetIsolate());
+  Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
+  array->set(0, *cell);
+  array->set(1, *handler);
+}
+
+
+void KeyedLoadICNexus::ConfigureMonomorphic(Handle<Name> name,
+                                            Handle<HeapType> type,
+                                            Handle<Code> handler) {
+  Handle<FixedArray> array = EnsureArrayOfSize(3);
+  Handle<Map> receiver_map = IC::TypeToMap(*type, GetIsolate());
+  if (name.is_null()) {
+    array->set(0, Smi::FromInt(0));
+  } else {
+    array->set(0, *name);
+  }
+  Handle<WeakCell> cell = Map::WeakCellForMap(receiver_map);
+  array->set(1, *cell);
+  array->set(2, *handler);
+}
+
+
+void LoadICNexus::ConfigurePolymorphic(TypeHandleList* types,
+                                       CodeHandleList* handlers) {
+  int receiver_count = types->length();
+  EnsureArrayOfSize(receiver_count * 2);
+  InstallHandlers(0, types, handlers);
+}
+
+
+void KeyedLoadICNexus::ConfigurePolymorphic(Handle<Name> name,
+                                            TypeHandleList* types,
+                                            CodeHandleList* handlers) {
+  int receiver_count = types->length();
+  Handle<FixedArray> array = EnsureArrayOfSize(1 + receiver_count * 2);
+  if (name.is_null()) {
+    array->set(0, Smi::FromInt(0));
+  } else {
+    array->set(0, *name);
+  }
+  InstallHandlers(1, types, handlers);
+}
+
+
 int FeedbackNexus::ExtractMaps(int start_index, MapHandleList* maps) const {
   Isolate* isolate = GetIsolate();
   Object* feedback = GetFeedback();
   if (feedback->IsFixedArray()) {
+    int found = 0;
     FixedArray* array = FixedArray::cast(feedback);
     // The array should be of the form [<optional name>], then
     // [map, handler, map, handler, ... ]
     DCHECK(array->length() >= (2 + start_index));
     for (int i = start_index; i < array->length(); i += 2) {
-      Map* map = Map::cast(array->get(i));
-      maps->Add(handle(map, isolate));
+      WeakCell* cell = WeakCell::cast(array->get(i));
+      if (!cell->cleared()) {
+        Map* map = Map::cast(cell->value());
+        maps->Add(handle(map, isolate));
+        found++;
+      }
     }
-    return (array->length() - start_index) / 2;
+    return found;
   }
 
   return 0;
@@ -258,11 +423,14 @@ MaybeHandle<Code> FeedbackNexus::FindHandlerForMap(int start_index,
   if (feedback->IsFixedArray()) {
     FixedArray* array = FixedArray::cast(feedback);
     for (int i = start_index; i < array->length(); i += 2) {
-      Map* array_map = Map::cast(array->get(i));
-      if (array_map == *map) {
-        Code* code = Code::cast(array->get(i + 1));
-        DCHECK(code->kind() == Code::HANDLER);
-        return handle(code);
+      WeakCell* cell = WeakCell::cast(array->get(i));
+      if (!cell->cleared()) {
+        Map* array_map = Map::cast(cell->value());
+        if (array_map == *map) {
+          Code* code = Code::cast(array->get(i + 1));
+          DCHECK(code->kind() == Code::HANDLER);
+          return handle(code);
+        }
       }
     }
   }
@@ -278,16 +446,71 @@ bool FeedbackNexus::FindHandlers(int start_index, CodeHandleList* code_list,
   if (feedback->IsFixedArray()) {
     FixedArray* array = FixedArray::cast(feedback);
     // The array should be of the form [<optional name>], then
-    // [map, handler, map, handler, ... ]
+    // [map, handler, map, handler, ... ]. Be sure to skip handlers whose maps
+    // have been cleared.
     DCHECK(array->length() >= (2 + start_index));
     for (int i = start_index; i < array->length(); i += 2) {
-      Code* code = Code::cast(array->get(i + 1));
-      DCHECK(code->kind() == Code::HANDLER);
-      code_list->Add(handle(code));
-      count++;
+      WeakCell* cell = WeakCell::cast(array->get(i));
+      if (!cell->cleared()) {
+        Code* code = Code::cast(array->get(i + 1));
+        DCHECK(code->kind() == Code::HANDLER);
+        code_list->Add(handle(code));
+        count++;
+      }
     }
   }
   return count == length;
 }
+
+
+int LoadICNexus::ExtractMaps(MapHandleList* maps) const {
+  return FeedbackNexus::ExtractMaps(0, maps);
+}
+
+
+void LoadICNexus::Clear(Code* host) { LoadIC::Clear(GetIsolate(), host, this); }
+
+
+void KeyedLoadICNexus::Clear(Code* host) {
+  KeyedLoadIC::Clear(GetIsolate(), host, this);
+}
+
+
+int KeyedLoadICNexus::ExtractMaps(MapHandleList* maps) const {
+  return FeedbackNexus::ExtractMaps(1, maps);
+}
+
+
+MaybeHandle<Code> LoadICNexus::FindHandlerForMap(Handle<Map> map) const {
+  return FeedbackNexus::FindHandlerForMap(0, map);
+}
+
+
+MaybeHandle<Code> KeyedLoadICNexus::FindHandlerForMap(Handle<Map> map) const {
+  return FeedbackNexus::FindHandlerForMap(1, map);
+}
+
+
+bool LoadICNexus::FindHandlers(CodeHandleList* code_list, int length) const {
+  return FeedbackNexus::FindHandlers(0, code_list, length);
+}
+
+
+bool KeyedLoadICNexus::FindHandlers(CodeHandleList* code_list,
+                                    int length) const {
+  return FeedbackNexus::FindHandlers(1, code_list, length);
+}
+
+
+Name* KeyedLoadICNexus::FindFirstName() const {
+  Object* feedback = GetFeedback();
+  if (feedback->IsFixedArray()) {
+    FixedArray* array = FixedArray::cast(feedback);
+    DCHECK(array->length() >= 3);
+    Object* name = array->get(0);
+    if (name->IsName()) return Name::cast(name);
+  }
+  return NULL;
+}
 }
 }  // namespace v8::internal
index de5a7e1..864f336 100644 (file)
@@ -5,6 +5,8 @@
 #ifndef V8_TYPE_FEEDBACK_VECTOR_H_
 #define V8_TYPE_FEEDBACK_VECTOR_H_
 
+#include <vector>
+
 #include "src/checks.h"
 #include "src/elements-kind.h"
 #include "src/heap/heap.h"
 namespace v8 {
 namespace internal {
 
+class FeedbackVectorSpec {
+ public:
+  FeedbackVectorSpec() : slots_(0), ic_slots_(0) {}
+  FeedbackVectorSpec(int slots, int ic_slots)
+      : slots_(slots), ic_slots_(ic_slots) {
+    if (FLAG_vector_ics) ic_slot_kinds_.resize(ic_slots);
+  }
+
+  int slots() const { return slots_; }
+  void increase_slots(int count) { slots_ += count; }
+
+  int ic_slots() const { return ic_slots_; }
+  void increase_ic_slots(int count) {
+    ic_slots_ += count;
+    if (FLAG_vector_ics) ic_slot_kinds_.resize(ic_slots_);
+  }
+
+  void SetKind(int ic_slot, Code::Kind kind) {
+    DCHECK(FLAG_vector_ics);
+    ic_slot_kinds_[ic_slot] = kind;
+  }
+
+  Code::Kind GetKind(int ic_slot) const {
+    DCHECK(FLAG_vector_ics);
+    return static_cast<Code::Kind>(ic_slot_kinds_.at(ic_slot));
+  }
+
+ private:
+  int slots_;
+  int ic_slots_;
+  std::vector<unsigned char> ic_slot_kinds_;
+};
+
+
 // The shape of the TypeFeedbackVector is an array with:
 // 0: first_ic_slot_index (== length() if no ic slots are present)
 // 1: ics_with_types
@@ -118,17 +154,11 @@ class TypeFeedbackVector : public FixedArray {
     set(GetIndex(slot), value, mode);
   }
 
-  // IC slots need metadata to recognize the type of IC. Set a Kind for every
-  // slot. If GetKind() returns Code::NUMBER_OF_KINDS, then there is
-  // no kind associated with this slot. This may happen in the current design
-  // if a decision is made at compile time not to emit an IC that was planned
-  // for at parse time. This can be eliminated if we encode kind at parse
-  // time.
+  // IC slots need metadata to recognize the type of IC.
   Code::Kind GetKind(FeedbackVectorICSlot slot) const;
-  void SetKind(FeedbackVectorICSlot slot, Code::Kind kind);
 
-  static Handle<TypeFeedbackVector> Allocate(Isolate* isolate, int slot_count,
-                                             int ic_slot_count);
+  static Handle<TypeFeedbackVector> Allocate(Isolate* isolate,
+                                             const FeedbackVectorSpec& spec);
 
   static Handle<TypeFeedbackVector> Copy(Isolate* isolate,
                                          Handle<TypeFeedbackVector> vector);
@@ -168,6 +198,8 @@ class TypeFeedbackVector : public FixedArray {
   static const int kVectorICKindBits = 2;
   static VectorICKind FromCodeKind(Code::Kind kind);
   static Code::Kind FromVectorICKind(VectorICKind kind);
+  void SetKind(FeedbackVectorICSlot slot, Code::Kind kind);
+
   typedef BitSetComputer<VectorICKind, kVectorICKindBits, kSmiValueSize,
                          uint32_t> VectorICComputer;
 
@@ -202,6 +234,9 @@ class FeedbackNexus {
     return NULL;
   }
 
+  // TODO(mvstanton): remove FindAllMaps, it didn't survive a code review.
+  void FindAllMaps(MapHandleList* maps) const { ExtractMaps(maps); }
+
   virtual InlineCacheState StateFromFeedback() const = 0;
   virtual int ExtractMaps(MapHandleList* maps) const = 0;
   virtual MaybeHandle<Code> FindHandlerForMap(Handle<Map> map) const = 0;
@@ -250,18 +285,20 @@ class CallICNexus : public FeedbackNexus {
     DCHECK(vector->GetKind(slot) == Code::CALL_IC);
   }
 
+  void Clear(Code* host);
+
   void ConfigureUninitialized();
   void ConfigureGeneric();
   void ConfigureMonomorphicArray();
   void ConfigureMonomorphic(Handle<JSFunction> function);
 
-  virtual InlineCacheState StateFromFeedback() const OVERRIDE;
+  InlineCacheState StateFromFeedback() const OVERRIDE;
 
-  virtual int ExtractMaps(MapHandleList* maps) const OVERRIDE {
+  int ExtractMaps(MapHandleList* maps) const OVERRIDE {
     // CallICs don't record map feedback.
     return 0;
   }
-  virtual MaybeHandle<Code> FindHandlerForMap(Handle<Map> map) const OVERRIDE {
+  MaybeHandle<Code> FindHandlerForMap(Handle<Map> map) const OVERRIDE {
     return MaybeHandle<Code>();
   }
   virtual bool FindHandlers(CodeHandleList* code_list,
@@ -269,6 +306,64 @@ class CallICNexus : public FeedbackNexus {
     return length == 0;
   }
 };
+
+
+class LoadICNexus : public FeedbackNexus {
+ public:
+  LoadICNexus(Handle<TypeFeedbackVector> vector, FeedbackVectorICSlot slot)
+      : FeedbackNexus(vector, slot) {
+    DCHECK(vector->GetKind(slot) == Code::LOAD_IC);
+  }
+  LoadICNexus(TypeFeedbackVector* vector, FeedbackVectorICSlot slot)
+      : FeedbackNexus(vector, slot) {
+    DCHECK(vector->GetKind(slot) == Code::LOAD_IC);
+  }
+
+  void Clear(Code* host);
+
+  void ConfigureMegamorphic();
+  void ConfigurePremonomorphic();
+  void ConfigureMonomorphic(Handle<HeapType> type, Handle<Code> handler);
+
+  void ConfigurePolymorphic(TypeHandleList* types, CodeHandleList* handlers);
+
+  InlineCacheState StateFromFeedback() const OVERRIDE;
+  int ExtractMaps(MapHandleList* maps) const OVERRIDE;
+  MaybeHandle<Code> FindHandlerForMap(Handle<Map> map) const OVERRIDE;
+  virtual bool FindHandlers(CodeHandleList* code_list,
+                            int length = -1) const OVERRIDE;
+};
+
+
+class KeyedLoadICNexus : public FeedbackNexus {
+ public:
+  KeyedLoadICNexus(Handle<TypeFeedbackVector> vector, FeedbackVectorICSlot slot)
+      : FeedbackNexus(vector, slot) {
+    DCHECK(vector->GetKind(slot) == Code::KEYED_LOAD_IC);
+  }
+  KeyedLoadICNexus(TypeFeedbackVector* vector, FeedbackVectorICSlot slot)
+      : FeedbackNexus(vector, slot) {
+    DCHECK(vector->GetKind(slot) == Code::KEYED_LOAD_IC);
+  }
+
+  void Clear(Code* host);
+
+  void ConfigureGeneric();
+  void ConfigurePremonomorphic();
+  // name can be a null handle for element loads.
+  void ConfigureMonomorphic(Handle<Name> name, Handle<HeapType> type,
+                            Handle<Code> handler);
+  // name can be null.
+  void ConfigurePolymorphic(Handle<Name> name, TypeHandleList* types,
+                            CodeHandleList* handlers);
+
+  InlineCacheState StateFromFeedback() const OVERRIDE;
+  int ExtractMaps(MapHandleList* maps) const OVERRIDE;
+  MaybeHandle<Code> FindHandlerForMap(Handle<Map> map) const OVERRIDE;
+  virtual bool FindHandlers(CodeHandleList* code_list,
+                            int length = -1) const OVERRIDE;
+  Name* FindFirstName() const OVERRIDE;
+};
 }
 }  // namespace v8::internal
 
index 2c033b0..611373f 100644 (file)
@@ -81,6 +81,24 @@ bool TypeFeedbackOracle::LoadIsUninitialized(TypeFeedbackId id) {
 }
 
 
+bool TypeFeedbackOracle::LoadIsUninitialized(FeedbackVectorICSlot slot) {
+  Code::Kind kind = feedback_vector_->GetKind(slot);
+  if (kind == Code::LOAD_IC) {
+    LoadICNexus nexus(feedback_vector_, slot);
+    return nexus.StateFromFeedback() == UNINITIALIZED;
+  } else if (kind == Code::KEYED_LOAD_IC) {
+    KeyedLoadICNexus nexus(feedback_vector_, slot);
+    return nexus.StateFromFeedback() == UNINITIALIZED;
+  } else if (kind == Code::NUMBER_OF_KINDS) {
+    // Code::NUMBER_OF_KINDS indicates a slot that was never even compiled
+    // in full code.
+    return true;
+  }
+
+  return false;
+}
+
+
 bool TypeFeedbackOracle::StoreIsUninitialized(TypeFeedbackId ast_id) {
   Handle<Object> maybe_code = GetInfo(ast_id);
   if (!maybe_code->IsCode()) return false;
@@ -89,6 +107,14 @@ bool TypeFeedbackOracle::StoreIsUninitialized(TypeFeedbackId ast_id) {
 }
 
 
+bool TypeFeedbackOracle::CallIsUninitialized(FeedbackVectorICSlot slot) {
+  Handle<Object> value = GetInfo(slot);
+  return value->IsUndefined() ||
+         value.is_identical_to(
+             TypeFeedbackVector::UninitializedSentinel(isolate()));
+}
+
+
 bool TypeFeedbackOracle::CallIsMonomorphic(FeedbackVectorICSlot slot) {
   Handle<Object> value = GetInfo(slot);
   return value->IsAllocationSite() || value->IsJSFunction();
@@ -130,6 +156,21 @@ void TypeFeedbackOracle::GetStoreModeAndKeyType(
 }
 
 
+void TypeFeedbackOracle::GetLoadKeyType(
+    TypeFeedbackId ast_id, IcCheckType* key_type) {
+  Handle<Object> maybe_code = GetInfo(ast_id);
+  if (maybe_code->IsCode()) {
+    Handle<Code> code = Handle<Code>::cast(maybe_code);
+    if (code->kind() == Code::KEYED_LOAD_IC) {
+      ExtraICState extra_ic_state = code->extra_ic_state();
+      *key_type = KeyedLoadIC::GetKeyType(extra_ic_state);
+      return;
+    }
+  }
+  *key_type = ELEMENT;
+}
+
+
 Handle<JSFunction> TypeFeedbackOracle::GetCallTarget(
     FeedbackVectorICSlot slot) {
   Handle<Object> info = GetInfo(slot);
@@ -269,17 +310,45 @@ void TypeFeedbackOracle::PropertyReceiverTypes(TypeFeedbackId id,
 }
 
 
-void TypeFeedbackOracle::KeyedPropertyReceiverTypes(
-    TypeFeedbackId id, SmallMapList* receiver_types, bool* is_string) {
-  receiver_types->Clear();
-  CollectReceiverTypes(id, receiver_types);
-
-  // Are all the receiver maps string maps?
+bool TypeFeedbackOracle::HasOnlyStringMaps(SmallMapList* receiver_types) {
   bool all_strings = receiver_types->length() > 0;
   for (int i = 0; i < receiver_types->length(); i++) {
     all_strings &= receiver_types->at(i)->IsStringMap();
   }
-  *is_string = all_strings;
+  return all_strings;
+}
+
+
+void TypeFeedbackOracle::KeyedPropertyReceiverTypes(
+    TypeFeedbackId id,
+    SmallMapList* receiver_types,
+    bool* is_string,
+    IcCheckType* key_type) {
+  receiver_types->Clear();
+  CollectReceiverTypes(id, receiver_types);
+  *is_string = HasOnlyStringMaps(receiver_types);
+  GetLoadKeyType(id, key_type);
+}
+
+
+void TypeFeedbackOracle::PropertyReceiverTypes(FeedbackVectorICSlot slot,
+                                               Handle<String> name,
+                                               SmallMapList* receiver_types) {
+  receiver_types->Clear();
+  LoadICNexus nexus(feedback_vector_, slot);
+  Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC);
+  CollectReceiverTypes(&nexus, name, flags, receiver_types);
+}
+
+
+void TypeFeedbackOracle::KeyedPropertyReceiverTypes(
+    FeedbackVectorICSlot slot, SmallMapList* receiver_types, bool* is_string,
+    IcCheckType* key_type) {
+  receiver_types->Clear();
+  KeyedLoadICNexus nexus(feedback_vector_, slot);
+  CollectReceiverTypes<FeedbackNexus>(&nexus, receiver_types);
+  *is_string = HasOnlyStringMaps(receiver_types);
+  *key_type = nexus.FindFirstName() != NULL ? PROPERTY : ELEMENT;
 }
 
 
@@ -316,14 +385,21 @@ void TypeFeedbackOracle::CollectReceiverTypes(TypeFeedbackId ast_id,
 
   DCHECK(object->IsCode());
   Handle<Code> code(Handle<Code>::cast(object));
+  CollectReceiverTypes<Code>(*code, name, flags, types);
+}
 
+
+template <class T>
+void TypeFeedbackOracle::CollectReceiverTypes(T* obj, Handle<String> name,
+                                              Code::Flags flags,
+                                              SmallMapList* types) {
   if (FLAG_collect_megamorphic_maps_from_stub_cache &&
-      code->ic_state() == MEGAMORPHIC) {
+      obj->ic_state() == MEGAMORPHIC) {
     types->Reserve(4, zone());
     isolate()->stub_cache()->CollectMatchingMaps(
         types, name, flags, native_context_, zone());
   } else {
-    CollectReceiverTypes(ast_id, types);
+    CollectReceiverTypes<T>(obj, types);
   }
 }
 
@@ -367,12 +443,18 @@ void TypeFeedbackOracle::CollectReceiverTypes(TypeFeedbackId ast_id,
   Handle<Object> object = GetInfo(ast_id);
   if (!object->IsCode()) return;
   Handle<Code> code = Handle<Code>::cast(object);
+  CollectReceiverTypes<Code>(*code, types);
+}
+
+
+template <class T>
+void TypeFeedbackOracle::CollectReceiverTypes(T* obj, SmallMapList* types) {
   MapHandleList maps;
-  if (code->ic_state() == MONOMORPHIC) {
-    Map* map = code->FindFirstMap();
+  if (obj->ic_state() == MONOMORPHIC) {
+    Map* map = obj->FindFirstMap();
     if (map != NULL) maps.Add(handle(map));
-  } else if (code->ic_state() == POLYMORPHIC) {
-    code->FindAllMaps(&maps);
+  } else if (obj->ic_state() == POLYMORPHIC) {
+    obj->FindAllMaps(&maps);
   } else {
     return;
   }
index 8fe5818..60f156f 100644 (file)
@@ -24,7 +24,9 @@ class TypeFeedbackOracle: public ZoneObject {
                      Handle<Context> native_context, Zone* zone);
 
   bool LoadIsUninitialized(TypeFeedbackId id);
+  bool LoadIsUninitialized(FeedbackVectorICSlot slot);
   bool StoreIsUninitialized(TypeFeedbackId id);
+  bool CallIsUninitialized(FeedbackVectorICSlot slot);
   bool CallIsMonomorphic(FeedbackVectorICSlot slot);
   bool KeyedArrayCallIsHoley(TypeFeedbackId id);
   bool CallNewIsMonomorphic(FeedbackVectorSlot slot);
@@ -38,12 +40,19 @@ class TypeFeedbackOracle: public ZoneObject {
   void GetStoreModeAndKeyType(TypeFeedbackId id,
                               KeyedAccessStoreMode* store_mode,
                               IcCheckType* key_type);
+  void GetLoadKeyType(TypeFeedbackId id, IcCheckType* key_type);
 
   void PropertyReceiverTypes(TypeFeedbackId id, Handle<String> name,
                              SmallMapList* receiver_types);
+  void PropertyReceiverTypes(FeedbackVectorICSlot slot, Handle<String> name,
+                             SmallMapList* receiver_types);
   void KeyedPropertyReceiverTypes(TypeFeedbackId id,
                                   SmallMapList* receiver_types,
-                                  bool* is_string);
+                                  bool* is_string,
+                                  IcCheckType* key_type);
+  void KeyedPropertyReceiverTypes(FeedbackVectorICSlot slot,
+                                  SmallMapList* receiver_types, bool* is_string,
+                                  IcCheckType* key_type);
   void AssignmentReceiverTypes(TypeFeedbackId id,
                                Handle<String> name,
                                SmallMapList* receiver_types);
@@ -56,6 +65,8 @@ class TypeFeedbackOracle: public ZoneObject {
 
   void CollectReceiverTypes(TypeFeedbackId id,
                             SmallMapList* types);
+  template <class T>
+  void CollectReceiverTypes(T* obj, SmallMapList* types);
 
   static bool CanRetainOtherContext(Map* map, Context* native_context);
   static bool CanRetainOtherContext(JSFunction* function,
@@ -97,6 +108,13 @@ class TypeFeedbackOracle: public ZoneObject {
                             Handle<String> name,
                             Code::Flags flags,
                             SmallMapList* types);
+  template <class T>
+  void CollectReceiverTypes(T* obj, Handle<String> name, Code::Flags flags,
+                            SmallMapList* types);
+
+  // Returns true if there is at least one string map and if
+  // all maps are string maps.
+  bool HasOnlyStringMaps(SmallMapList* receiver_types);
 
   void SetInfo(TypeFeedbackId id, Object* target);
 
index 162c35a..c4f1bae 100644 (file)
@@ -2,6 +2,8 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include <iomanip>
+
 #include "src/types.h"
 
 #include "src/ostreams.h"
@@ -151,9 +153,9 @@ TypeImpl<Config>::BitsetType::Lub(TypeImpl* type) {
   }
   if (type->IsConstant()) return type->AsConstant()->Bound()->AsBitset();
   if (type->IsRange()) return type->AsRange()->BitsetLub();
-  if (type->IsContext()) return kInternal & kTaggedPtr;
+  if (type->IsContext()) return kInternal & kTaggedPointer;
   if (type->IsArray()) return kArray;
-  if (type->IsFunction()) return kFunction;
+  if (type->IsFunction()) return kOtherObject;  // TODO(rossberg): kFunction
   UNREACHABLE();
   return kNone;
 }
@@ -198,10 +200,10 @@ TypeImpl<Config>::BitsetType::Lub(i::Map* map) {
              map == heap->no_interceptor_result_sentinel_map() ||
              map == heap->termination_exception_map() ||
              map == heap->arguments_marker_map());
-      return kInternal & kTaggedPtr;
+      return kInternal & kTaggedPointer;
     }
     case HEAP_NUMBER_TYPE:
-      return kNumber & kTaggedPtr;
+      return kNumber & kTaggedPointer;
     case JS_VALUE_TYPE:
     case JS_DATE_TYPE:
     case JS_OBJECT_TYPE:
@@ -225,9 +227,9 @@ TypeImpl<Config>::BitsetType::Lub(i::Map* map) {
     case JS_ARRAY_TYPE:
       return kArray;
     case JS_FUNCTION_TYPE:
-      return kFunction;
+      return kOtherObject;  // TODO(rossberg): there should be a Function type.
     case JS_REGEXP_TYPE:
-      return kRegExp;
+      return kOtherObject;  // TODO(rossberg): there should be a RegExp type.
     case JS_PROXY_TYPE:
     case JS_FUNCTION_PROXY_TYPE:
       return kProxy;
@@ -250,7 +252,7 @@ TypeImpl<Config>::BitsetType::Lub(i::Map* map) {
     case BYTE_ARRAY_TYPE:
     case FOREIGN_TYPE:
     case CODE_TYPE:
-      return kInternal & kTaggedPtr;
+      return kInternal & kTaggedPointer;
     default:
       UNREACHABLE();
       return kNone;
@@ -263,7 +265,8 @@ typename TypeImpl<Config>::bitset
 TypeImpl<Config>::BitsetType::Lub(i::Object* value) {
   DisallowHeapAllocation no_allocation;
   if (value->IsNumber()) {
-    return Lub(value->Number()) & (value->IsSmi() ? kTaggedInt : kTaggedPtr);
+    return Lub(value->Number()) &
+        (value->IsSmi() ? kTaggedSigned : kTaggedPointer);
   }
   return Lub(i::HeapObject::cast(value)->map());
 }
@@ -276,35 +279,33 @@ TypeImpl<Config>::BitsetType::Lub(double value) {
   if (i::IsMinusZero(value)) return kMinusZero;
   if (std::isnan(value)) return kNaN;
   if (IsUint32Double(value) || IsInt32Double(value)) return Lub(value, value);
-  return kOtherNumber;
+  return kPlainNumber;
 }
 
 
 // Minimum values of regular numeric bitsets when SmiValuesAre31Bits.
-template<class Config>
+template <class Config>
 const typename TypeImpl<Config>::BitsetType::BitsetMin
-TypeImpl<Config>::BitsetType::BitsetMins31[] = {
-    {kOtherNumber, -V8_INFINITY},
-    {kOtherSigned32, kMinInt},
-    {kOtherSignedSmall, -0x40000000},
-    {kUnsignedSmall, 0},
-    {kOtherUnsigned31, 0x40000000},
-    {kOtherUnsigned32, 0x80000000},
-    {kOtherNumber, static_cast<double>(kMaxUInt32) + 1}
-};
+    TypeImpl<Config>::BitsetType::BitsetMins31[] = {
+        {kOtherNumber, -V8_INFINITY},
+        {kOtherSigned32, kMinInt},
+        {kNegativeSignedSmall, -0x40000000},
+        {kUnsignedSmall, 0},
+        {kOtherUnsigned31, 0x40000000},
+        {kOtherUnsigned32, 0x80000000},
+        {kOtherNumber, static_cast<double>(kMaxUInt32) + 1}};
 
 
 // Minimum values of regular numeric bitsets when SmiValuesAre32Bits.
 // OtherSigned32 and OtherUnsigned31 are empty (see the diagrams in types.h).
-template<class Config>
+template <class Config>
 const typename TypeImpl<Config>::BitsetType::BitsetMin
-TypeImpl<Config>::BitsetType::BitsetMins32[] = {
-    {kOtherNumber, -V8_INFINITY},
-    {kOtherSignedSmall, kMinInt},
-    {kUnsignedSmall, 0},
-    {kOtherUnsigned32, 0x80000000},
-    {kOtherNumber, static_cast<double>(kMaxUInt32) + 1}
-};
+    TypeImpl<Config>::BitsetType::BitsetMins32[] = {
+        {kOtherNumber, -V8_INFINITY},
+        {kNegativeSignedSmall, kMinInt},
+        {kUnsignedSmall, 0},
+        {kOtherUnsigned32, 0x80000000},
+        {kOtherNumber, static_cast<double>(kMaxUInt32) + 1}};
 
 
 template<class Config>
@@ -314,6 +315,11 @@ TypeImpl<Config>::BitsetType::Lub(double min, double max) {
   int lub = kNone;
   const BitsetMin* mins = BitsetMins();
 
+  // Make sure the min-max range touches 0, so we are guaranteed no holes
+  // in unions of valid bitsets.
+  if (max < -1) max = -1;
+  if (min > 0) min = 0;
+
   for (size_t i = 1; i < BitsetMinsSize(); ++i) {
     if (min < mins[i].min) {
       lub |= mins[i-1].bits;
@@ -984,6 +990,7 @@ void TypeImpl<Config>::BitsetType::Print(std::ostream& os,  // NOLINT
 #undef BITSET_CONSTANT
 
 #define BITSET_CONSTANT(type, value) SEMANTIC(k##type),
+      INTERNAL_BITSET_TYPE_LIST(BITSET_CONSTANT)
       SEMANTIC_BITSET_TYPE_LIST(BITSET_CONSTANT)
 #undef BITSET_CONSTANT
   };
@@ -1017,8 +1024,12 @@ void TypeImpl<Config>::PrintTo(std::ostream& os, PrintDimension dim) {
     } else if (this->IsConstant()) {
       os << "Constant(" << Brief(*this->AsConstant()->Value()) << ")";
     } else if (this->IsRange()) {
-      os << "Range(" << this->AsRange()->Min()->Number()
-         << ", " << this->AsRange()->Max()->Number() << ")";
+      std::ostream::fmtflags saved_flags = os.setf(std::ios::fixed);
+      std::streamsize saved_precision = os.precision(0);
+      os << "Range(" << this->AsRange()->Min()->Number() << ", "
+         << this->AsRange()->Max()->Number() << ")";
+      os.flags(saved_flags);
+      os.precision(saved_precision);
     } else if (this->IsContext()) {
       os << "Context(";
       this->AsContext()->Outer()->PrintTo(os, dim);
index 1d506d0..4b7d8ba 100644 (file)
@@ -154,76 +154,90 @@ namespace internal {
 // Values for bitset types
 
 #define MASK_BITSET_TYPE_LIST(V) \
-  V(Representation, 0xff800000u) \
-  V(Semantic,       0x007ffffeu)
+  V(Representation, 0xfff00000u) \
+  V(Semantic,       0x000ffffeu)
 
 #define REPRESENTATION(k) ((k) & BitsetType::kRepresentation)
 #define SEMANTIC(k)       ((k) & BitsetType::kSemantic)
 
-#define REPRESENTATION_BITSET_TYPE_LIST(V) \
-  V(None,             0)                   \
-  V(UntaggedInt1,     1u << 23 | kSemantic) \
-  V(UntaggedInt8,     1u << 24 | kSemantic) \
-  V(UntaggedInt16,    1u << 25 | kSemantic) \
-  V(UntaggedInt32,    1u << 26 | kSemantic) \
-  V(UntaggedFloat32,  1u << 27 | kSemantic) \
-  V(UntaggedFloat64,  1u << 28 | kSemantic) \
-  V(UntaggedPtr,      1u << 29 | kSemantic) \
-  V(TaggedInt,        1u << 30 | kSemantic) \
-  V(TaggedPtr,        1u << 31 | kSemantic) \
+#define REPRESENTATION_BITSET_TYPE_LIST(V)    \
+  V(None,               0)                    \
+  V(UntaggedBit,        1u << 20 | kSemantic) \
+  V(UntaggedSigned8,    1u << 21 | kSemantic) \
+  V(UntaggedSigned16,   1u << 22 | kSemantic) \
+  V(UntaggedSigned32,   1u << 23 | kSemantic) \
+  V(UntaggedUnsigned8,  1u << 24 | kSemantic) \
+  V(UntaggedUnsigned16, 1u << 25 | kSemantic) \
+  V(UntaggedUnsigned32, 1u << 26 | kSemantic) \
+  V(UntaggedFloat32,    1u << 27 | kSemantic) \
+  V(UntaggedFloat64,    1u << 28 | kSemantic) \
+  V(UntaggedPointer,    1u << 29 | kSemantic) \
+  V(TaggedSigned,       1u << 30 | kSemantic) \
+  V(TaggedPointer,      1u << 31 | kSemantic) \
   \
-  V(UntaggedInt,      kUntaggedInt1 | kUntaggedInt8 |      \
-                      kUntaggedInt16 | kUntaggedInt32)     \
-  V(UntaggedFloat,    kUntaggedFloat32 | kUntaggedFloat64) \
-  V(UntaggedNumber,   kUntaggedInt | kUntaggedFloat)       \
-  V(Untagged,         kUntaggedNumber | kUntaggedPtr)      \
-  V(Tagged,           kTaggedInt | kTaggedPtr)
+  V(UntaggedSigned,     kUntaggedSigned8 | kUntaggedSigned16 |              \
+                        kUntaggedSigned32)                                  \
+  V(UntaggedUnsigned,   kUntaggedUnsigned8 | kUntaggedUnsigned16 |          \
+                        kUntaggedUnsigned32)                                \
+  V(UntaggedIntegral8,  kUntaggedSigned8 | kUntaggedUnsigned8)              \
+  V(UntaggedIntegral16, kUntaggedSigned16 | kUntaggedUnsigned16)            \
+  V(UntaggedIntegral32, kUntaggedSigned32 | kUntaggedUnsigned32)            \
+  V(UntaggedIntegral,   kUntaggedBit | kUntaggedSigned | kUntaggedUnsigned) \
+  V(UntaggedFloat,      kUntaggedFloat32 | kUntaggedFloat64)                \
+  V(UntaggedNumber,     kUntaggedIntegral | kUntaggedFloat)                 \
+  V(Untagged,           kUntaggedNumber | kUntaggedPointer)                 \
+  V(Tagged,             kTaggedSigned | kTaggedPointer)
+
+#define INTERNAL_BITSET_TYPE_LIST(V)                                      \
+  V(OtherUnsigned31, 1u << 1 | REPRESENTATION(kTagged | kUntaggedNumber)) \
+  V(OtherUnsigned32, 1u << 2 | REPRESENTATION(kTagged | kUntaggedNumber)) \
+  V(OtherSigned32,   1u << 3 | REPRESENTATION(kTagged | kUntaggedNumber)) \
+  V(OtherNumber,     1u << 4 | REPRESENTATION(kTagged | kUntaggedNumber))
 
 #define SEMANTIC_BITSET_TYPE_LIST(V) \
-  V(Null,                1u << 1  | REPRESENTATION(kTaggedPtr)) \
-  V(Undefined,           1u << 2  | REPRESENTATION(kTaggedPtr)) \
-  V(Boolean,             1u << 3  | REPRESENTATION(kTaggedPtr)) \
-  V(UnsignedSmall,       1u << 4  | REPRESENTATION(kTagged | kUntaggedNumber)) \
-  V(OtherSignedSmall,    1u << 5  | REPRESENTATION(kTagged | kUntaggedNumber)) \
-  V(OtherUnsigned31,     1u << 6  | REPRESENTATION(kTagged | kUntaggedNumber)) \
-  V(OtherUnsigned32,     1u << 7  | REPRESENTATION(kTagged | kUntaggedNumber)) \
-  V(OtherSigned32,       1u << 8  | REPRESENTATION(kTagged | kUntaggedNumber)) \
-  V(MinusZero,           1u << 9  | REPRESENTATION(kTagged | kUntaggedNumber)) \
-  V(NaN,                 1u << 10 | REPRESENTATION(kTagged | kUntaggedNumber)) \
-  V(OtherNumber,         1u << 11 | REPRESENTATION(kTagged | kUntaggedNumber)) \
-  V(Symbol,              1u << 12 | REPRESENTATION(kTaggedPtr)) \
-  V(InternalizedString,  1u << 13 | REPRESENTATION(kTaggedPtr)) \
-  V(OtherString,         1u << 14 | REPRESENTATION(kTaggedPtr)) \
-  V(Undetectable,        1u << 15 | REPRESENTATION(kTaggedPtr)) \
-  V(Array,               1u << 16 | REPRESENTATION(kTaggedPtr)) \
-  V(Buffer,              1u << 17 | REPRESENTATION(kTaggedPtr)) \
-  V(Function,            1u << 18 | REPRESENTATION(kTaggedPtr)) \
-  V(RegExp,              1u << 19 | REPRESENTATION(kTaggedPtr)) \
-  V(OtherObject,         1u << 20 | REPRESENTATION(kTaggedPtr)) \
-  V(Proxy,               1u << 21 | REPRESENTATION(kTaggedPtr)) \
-  V(Internal,            1u << 22 | REPRESENTATION(kTagged | kUntagged)) \
+  V(NegativeSignedSmall, 1u << 5  | REPRESENTATION(kTagged | kUntaggedNumber)) \
+  V(Null,                1u << 6  | REPRESENTATION(kTaggedPointer)) \
+  V(Undefined,           1u << 7  | REPRESENTATION(kTaggedPointer)) \
+  V(Boolean,             1u << 8  | REPRESENTATION(kTaggedPointer)) \
+  V(UnsignedSmall,       1u << 9  | REPRESENTATION(kTagged | kUntaggedNumber)) \
+  V(MinusZero,           1u << 10 | REPRESENTATION(kTagged | kUntaggedNumber)) \
+  V(NaN,                 1u << 11 | REPRESENTATION(kTagged | kUntaggedNumber)) \
+  V(Symbol,              1u << 12 | REPRESENTATION(kTaggedPointer)) \
+  V(InternalizedString,  1u << 13 | REPRESENTATION(kTaggedPointer)) \
+  V(OtherString,         1u << 14 | REPRESENTATION(kTaggedPointer)) \
+  V(Undetectable,        1u << 15 | REPRESENTATION(kTaggedPointer)) \
+  V(Array,               1u << 16 | REPRESENTATION(kTaggedPointer)) \
+  V(OtherObject,         1u << 17 | REPRESENTATION(kTaggedPointer)) \
+  V(Proxy,               1u << 18 | REPRESENTATION(kTaggedPointer)) \
+  V(Internal,            1u << 19 | REPRESENTATION(kTagged | kUntagged)) \
   \
-  V(SignedSmall,         kUnsignedSmall | kOtherSignedSmall) \
+  V(SignedSmall,         kUnsignedSmall | kNegativeSignedSmall) \
   V(Signed32,            kSignedSmall | kOtherUnsigned31 | kOtherSigned32) \
+  V(NegativeSigned32,    kNegativeSignedSmall | kOtherSigned32) \
+  V(NonNegativeSigned32, kUnsignedSmall | kOtherUnsigned31) \
   V(Unsigned32,          kUnsignedSmall | kOtherUnsigned31 | kOtherUnsigned32) \
   V(Integral32,          kSigned32 | kUnsigned32) \
-  V(OrderedNumber,       kIntegral32 | kMinusZero | kOtherNumber) \
+  V(PlainNumber,         kIntegral32 | kOtherNumber) \
+  V(OrderedNumber,       kPlainNumber | kMinusZero) \
   V(Number,              kOrderedNumber | kNaN) \
   V(String,              kInternalizedString | kOtherString) \
   V(UniqueName,          kSymbol | kInternalizedString) \
   V(Name,                kSymbol | kString) \
   V(NumberOrString,      kNumber | kString) \
-  V(Primitive,           kNumber | kName | kBoolean | kNull | kUndefined) \
-  V(DetectableObject,    kArray | kFunction | kRegExp | kOtherObject) \
+  V(PlainPrimitive,      kNumberOrString | kBoolean | kNull | kUndefined) \
+  V(Primitive,           kSymbol | kPlainPrimitive) \
+  V(DetectableObject,    kArray | kOtherObject) \
   V(DetectableReceiver,  kDetectableObject | kProxy) \
   V(Detectable,          kDetectableReceiver | kNumber | kName) \
   V(Object,              kDetectableObject | kUndetectable) \
   V(Receiver,            kObject | kProxy) \
+  V(StringOrReceiver,    kString | kReceiver) \
   V(Unique,              kBoolean | kUniqueName | kNull | kUndefined | \
                          kReceiver) \
   V(NonNumber,           kUnique | kString | kInternal) \
   V(Any,                 0xfffffffeu)
 
+
 /*
  * The following diagrams show how integers (in the mathematical sense) are
  * divided among the different atomic numerical types.
@@ -252,9 +266,11 @@ namespace internal {
   REPRESENTATION_BITSET_TYPE_LIST(V) \
   SEMANTIC_BITSET_TYPE_LIST(V)
 
-#define BITSET_TYPE_LIST(V) \
-  MASK_BITSET_TYPE_LIST(V) \
-  PROPER_BITSET_TYPE_LIST(V)
+#define BITSET_TYPE_LIST(V)          \
+  MASK_BITSET_TYPE_LIST(V)           \
+  REPRESENTATION_BITSET_TYPE_LIST(V) \
+  INTERNAL_BITSET_TYPE_LIST(V)       \
+  SEMANTIC_BITSET_TYPE_LIST(V)
 
 
 // -----------------------------------------------------------------------------
@@ -464,6 +480,11 @@ class TypeImpl : public Config::Base {
   double Min();
   double Max();
 
+  // Extracts a range from the type. If the type is a range, it just
+  // returns it; if it is a union, it returns the range component.
+  // Note that it does not contain range for constants.
+  RangeType* GetRange();
+
   int NumClasses();
   int NumConstants();
 
@@ -551,7 +572,6 @@ class TypeImpl : public Config::Base {
   static bool Contains(RangeType* lhs, RangeType* rhs);
   static bool Contains(RangeType* range, i::Object* val);
 
-  RangeType* GetRange();
   static int UpdateRange(
       RangeHandle type, UnionHandle result, int size, Region* region);
 
@@ -587,6 +607,20 @@ class TypeImpl<Config>::BitsetType : public TypeImpl<Config> {
 
   static TypeImpl* New(bitset bits) {
     DCHECK(bits == kNone || IsInhabited(bits));
+
+    if (FLAG_enable_slow_asserts) {
+      // Check that the bitset does not contain any holes in number ranges.
+      bitset mask = kSemantic;
+      if (!i::SmiValuesAre31Bits()) {
+        mask &= ~(kOtherUnsigned31 | kOtherSigned32);
+      }
+      bitset number_bits = bits & kPlainNumber & mask;
+      if (number_bits != 0) {
+        bitset lub = Lub(Min(number_bits), Max(number_bits)) & mask;
+        CHECK(lub == number_bits);
+      }
+    }
+
     return Config::from_bitset(bits);
   }
   static TypeHandle New(bitset bits, Region* region) {
@@ -1031,7 +1065,7 @@ struct BoundsImpl {
     TypeHandle lower = Type::Union(b1.lower, b2.lower, region);
     TypeHandle upper = Type::Intersect(b1.upper, b2.upper, region);
     // Lower bounds are considered approximate, correct as necessary.
-    lower = Type::Intersect(lower, upper, region);
+    if (!lower->Is(upper)) lower = upper;
     return BoundsImpl(lower, upper);
   }
 
@@ -1043,14 +1077,16 @@ struct BoundsImpl {
   }
 
   static BoundsImpl NarrowLower(BoundsImpl b, TypeHandle t, Region* region) {
-    // Lower bounds are considered approximate, correct as necessary.
-    t = Type::Intersect(t, b.upper, region);
     TypeHandle lower = Type::Union(b.lower, t, region);
+    // Lower bounds are considered approximate, correct as necessary.
+    if (!lower->Is(b.upper)) lower = b.upper;
     return BoundsImpl(lower, b.upper);
   }
   static BoundsImpl NarrowUpper(BoundsImpl b, TypeHandle t, Region* region) {
-    TypeHandle lower = Type::Intersect(b.lower, t, region);
+    TypeHandle lower = b.lower;
     TypeHandle upper = Type::Intersect(b.upper, t, region);
+    // Lower bounds are considered approximate, correct as necessary.
+    if (!lower->Is(upper)) lower = upper;
     return BoundsImpl(lower, upper);
   }
 
index 1cfaf64..17deb6d 100644 (file)
@@ -394,7 +394,8 @@ void AstTyper::VisitLiteral(Literal* expr) {
 
 
 void AstTyper::VisitRegExpLiteral(RegExpLiteral* expr) {
-  NarrowType(expr, Bounds(Type::RegExp(zone())));
+  // TODO(rossberg): Reintroduce RegExp type.
+  NarrowType(expr, Bounds(Type::Object(zone())));
 }
 
 
@@ -484,19 +485,38 @@ void AstTyper::VisitThrow(Throw* expr) {
 
 void AstTyper::VisitProperty(Property* expr) {
   // Collect type feedback.
-  TypeFeedbackId id = expr->PropertyFeedbackId();
-  expr->set_is_uninitialized(oracle()->LoadIsUninitialized(id));
+  FeedbackVectorICSlot slot(FeedbackVectorICSlot::Invalid());
+  TypeFeedbackId id(TypeFeedbackId::None());
+  if (FLAG_vector_ics) {
+    slot = expr->PropertyFeedbackSlot();
+    expr->set_is_uninitialized(oracle()->LoadIsUninitialized(slot));
+  } else {
+    id = expr->PropertyFeedbackId();
+    expr->set_is_uninitialized(oracle()->LoadIsUninitialized(id));
+  }
+
   if (!expr->IsUninitialized()) {
     if (expr->key()->IsPropertyName()) {
       Literal* lit_key = expr->key()->AsLiteral();
       DCHECK(lit_key != NULL && lit_key->value()->IsString());
       Handle<String> name = Handle<String>::cast(lit_key->value());
-      oracle()->PropertyReceiverTypes(id, name, expr->GetReceiverTypes());
+      if (FLAG_vector_ics) {
+        oracle()->PropertyReceiverTypes(slot, name, expr->GetReceiverTypes());
+      } else {
+        oracle()->PropertyReceiverTypes(id, name, expr->GetReceiverTypes());
+      }
     } else {
       bool is_string;
-      oracle()->KeyedPropertyReceiverTypes(
-          id, expr->GetReceiverTypes(), &is_string);
+      IcCheckType key_type;
+      if (FLAG_vector_ics) {
+        oracle()->KeyedPropertyReceiverTypes(slot, expr->GetReceiverTypes(),
+                                             &is_string, &key_type);
+      } else {
+        oracle()->KeyedPropertyReceiverTypes(id, expr->GetReceiverTypes(),
+                                             &is_string, &key_type);
+      }
       expr->set_is_string_access(is_string);
+      expr->set_key_type(key_type);
     }
   }
 
@@ -510,15 +530,20 @@ void AstTyper::VisitProperty(Property* expr) {
 void AstTyper::VisitCall(Call* expr) {
   // Collect type feedback.
   RECURSE(Visit(expr->expression()));
-  if (!expr->expression()->IsProperty() &&
-      expr->IsUsingCallFeedbackSlot(isolate()) &&
-      oracle()->CallIsMonomorphic(expr->CallFeedbackSlot())) {
-    expr->set_target(oracle()->GetCallTarget(expr->CallFeedbackSlot()));
-    Handle<AllocationSite> site =
-        oracle()->GetCallAllocationSite(expr->CallFeedbackSlot());
-    expr->set_allocation_site(site);
+  bool is_uninitialized = true;
+  if (expr->IsUsingCallFeedbackSlot(isolate())) {
+    FeedbackVectorICSlot slot = expr->CallFeedbackSlot();
+    is_uninitialized = oracle()->CallIsUninitialized(slot);
+    if (!expr->expression()->IsProperty() &&
+        oracle()->CallIsMonomorphic(slot)) {
+      expr->set_target(oracle()->GetCallTarget(slot));
+      Handle<AllocationSite> site = oracle()->GetCallAllocationSite(slot);
+      expr->set_allocation_site(site);
+    }
   }
 
+  expr->set_is_uninitialized(is_uninitialized);
+
   ZoneList<Expression*>* args = expr->arguments();
   for (int i = 0; i < args->length(); ++i) {
     Expression* arg = args->at(i);
index d5685c9..525c6f8 100644 (file)
@@ -863,7 +863,7 @@ class SimpleStringBuilder {
   // compute the length of the input string.
   void AddString(const char* s);
 
-  // Add the first 'n' characters of the given string 's' to the
+  // Add the first 'n' characters of the given 0-terminated string 's' to the
   // builder. The input string must have enough characters.
   void AddSubstring(const char* s, int n);
 
index 62c3da4..b04ccc0 100644 (file)
 #include "src/hydrogen.h"
 #include "src/isolate.h"
 #include "src/lithium-allocator.h"
+#include "src/natives.h"
 #include "src/objects.h"
 #include "src/runtime-profiler.h"
 #include "src/sampler.h"
 #include "src/serialize.h"
+#include "src/snapshot.h"
 
 
 namespace v8 {
@@ -29,6 +31,11 @@ namespace internal {
 
 V8_DECLARE_ONCE(init_once);
 
+#ifdef V8_USE_EXTERNAL_STARTUP_DATA
+V8_DECLARE_ONCE(init_natives_once);
+V8_DECLARE_ONCE(init_snapshot_once);
+#endif
+
 v8::ArrayBuffer::Allocator* V8::array_buffer_allocator_ = NULL;
 v8::Platform* V8::platform_ = NULL;
 
@@ -117,4 +124,21 @@ v8::Platform* V8::GetCurrentPlatform() {
   return platform_;
 }
 
+
+void V8::SetNativesBlob(StartupData* natives_blob) {
+#ifdef V8_USE_EXTERNAL_STARTUP_DATA
+  base::CallOnce(&init_natives_once, &SetNativesFromFile, natives_blob);
+#else
+  CHECK(false);
+#endif
+}
+
+
+void V8::SetSnapshotBlob(StartupData* snapshot_blob) {
+#ifdef V8_USE_EXTERNAL_STARTUP_DATA
+  base::CallOnce(&init_snapshot_once, &SetSnapshotFromFile, snapshot_blob);
+#else
+  CHECK(false);
+#endif
+}
 } }  // namespace v8::internal
index 13c33e1..4267434 100644 (file)
@@ -9,8 +9,8 @@
 #ifndef V8_V8_H_
 #define V8_V8_H_
 
-#if defined(GOOGLE3)
-// Google3 special flag handling.
+#if defined(GOOGLE3) || defined(DCHECK_ALWAYS_ON)
+// Google3 and Chromium special flag handling.
 #if defined(DEBUG) && defined(NDEBUG)
 // V8 only uses DEBUG and whenever it is set we are building a debug
 // version of V8. We do not use NDEBUG and simply undef it here for
@@ -82,6 +82,9 @@ class V8 : public AllStatic {
   static void ShutdownPlatform();
   static v8::Platform* GetCurrentPlatform();
 
+  static void SetNativesBlob(StartupData* natives_blob);
+  static void SetSnapshotBlob(StartupData* snapshot_blob);
+
  private:
   static void InitializeOncePerProcessImpl();
   static void InitializeOncePerProcess();
index 6215ab0..ac96f63 100644 (file)
@@ -163,19 +163,9 @@ function GlobalParseFloat(string) {
 function GlobalEval(x) {
   if (!IS_STRING(x)) return x;
 
-  // For consistency with JSC we require the global object passed to
-  // eval to be the global object from which 'eval' originated. This
-  // is not mandated by the spec.
-  // We only throw if the global has been detached, since we need the
-  // receiver as this-value for the call.
-  if (!%IsAttachedGlobal(global)) {
-    throw new $EvalError('The "this" value passed to eval must ' +
-                         'be the global object from which eval originated');
-  }
-
   var global_proxy = %GlobalProxy(global);
 
-  var f = %CompileString(x, false);
+  var f = %CompileString(x, false, 0);
   if (!IS_FUNCTION(f)) return f;
 
   return %_CallFunction(global_proxy, f);
@@ -897,15 +887,6 @@ function DefineArrayProperty(obj, p, desc, should_throw) {
         break;
       }
     }
-    // Make sure the below call to DefineObjectProperty() doesn't overwrite
-    // any magic "length" property by removing the value.
-    // TODO(mstarzinger): This hack should be removed once we have addressed the
-    // respective TODO in Runtime_DefineDataPropertyUnchecked.
-    // For the time being, we need a hack to prevent Object.observe from
-    // generating two change records.
-    obj.length = new_length;
-    desc.value_ = UNDEFINED;
-    desc.hasValue_ = false;
     threw = !DefineObjectProperty(obj, "length", desc, should_throw) || threw;
     if (emit_splice) {
       EndPerformSplice(obj);
@@ -983,7 +964,7 @@ function ObjectGetPrototypeOf(obj) {
   if (!IS_SPEC_OBJECT(obj)) {
     throw MakeTypeError("called_on_non_object", ["Object.getPrototypeOf"]);
   }
-  return %GetPrototype(obj);
+  return %_GetPrototype(obj);
 }
 
 // ES6 section 19.1.2.19.
@@ -1041,6 +1022,7 @@ function ToNameArray(obj, trap, includeSymbols) {
 function ObjectGetOwnPropertyKeys(obj, filter) {
   var nameArrays = new InternalArray();
   filter |= PROPERTY_ATTRIBUTES_PRIVATE_SYMBOL;
+  var interceptorInfo = %GetInterceptorInfo(obj);
 
   // Find all the indexed properties.
 
@@ -1051,9 +1033,7 @@ function ObjectGetOwnPropertyKeys(obj, filter) {
       ownElementNames[i] = %_NumberToString(ownElementNames[i]);
     }
     nameArrays.push(ownElementNames);
-
     // Get names for indexed interceptor properties.
-    var interceptorInfo = %GetInterceptorInfo(obj);
     if ((interceptorInfo & 1) != 0) {
       var indexedInterceptorNames = %GetIndexedInterceptorElementNames(obj);
       if (!IS_UNDEFINED(indexedInterceptorNames)) {
@@ -1243,23 +1223,30 @@ function ProxyFix(obj) {
 
 
 // ES5 section 15.2.3.8.
-function ObjectSeal(obj) {
+function ObjectSealJS(obj) {
   if (!IS_SPEC_OBJECT(obj)) {
     throw MakeTypeError("called_on_non_object", ["Object.seal"]);
   }
-  if (%_IsJSProxy(obj)) {
-    ProxyFix(obj);
-  }
-  var names = ObjectGetOwnPropertyNames(obj);
-  for (var i = 0; i < names.length; i++) {
-    var name = names[i];
-    var desc = GetOwnPropertyJS(obj, name);
-    if (desc.isConfigurable()) {
-      desc.setConfigurable(false);
-      DefineOwnProperty(obj, name, desc, true);
+  var isProxy = %_IsJSProxy(obj);
+  if (isProxy || %HasSloppyArgumentsElements(obj) || %IsObserved(obj)) {
+    if (isProxy) {
+      ProxyFix(obj);
+    }
+    var names = ObjectGetOwnPropertyNames(obj);
+    for (var i = 0; i < names.length; i++) {
+      var name = names[i];
+      var desc = GetOwnPropertyJS(obj, name);
+      if (desc.isConfigurable()) {
+        desc.setConfigurable(false);
+        DefineOwnProperty(obj, name, desc, true);
+      }
     }
+    %PreventExtensions(obj);
+  } else {
+    // TODO(adamk): Is it worth going to this fast path if the
+    // object's properties are already in dictionary mode?
+    %ObjectSeal(obj);
   }
-  %PreventExtensions(obj);
   return obj;
 }
 
@@ -1372,7 +1359,7 @@ function ObjectIs(obj1, obj2) {
 
 // ECMA-262, Edition 6, section B.2.2.1.1
 function ObjectGetProto() {
-  return %GetPrototype(ToObject(this));
+  return %_GetPrototype(ToObject(this));
 }
 
 
@@ -1441,7 +1428,7 @@ function SetUpObject() {
     "isFrozen", ObjectIsFrozen,
     "isSealed", ObjectIsSealed,
     "preventExtensions", ObjectPreventExtension,
-    "seal", ObjectSeal
+    "seal", ObjectSealJS
     // deliverChangeRecords, getNotifier, observe and unobserve are added
     // in object-observe.js.
   ));
@@ -1829,7 +1816,7 @@ function FunctionBind(this_arg) { // Length is 1.
 }
 
 
-function NewFunctionString(arguments, function_token) {
+function NewFunctionFromString(arguments, function_token) {
   var n = arguments.length;
   var p = '';
   if (n > 1) {
@@ -1846,21 +1833,20 @@ function NewFunctionString(arguments, function_token) {
     // If the formal parameters include an unbalanced block comment, the
     // function must be rejected. Since JavaScript does not allow nested
     // comments we can include a trailing block comment to catch this.
-    p += '\n/' + '**/';
+    p += '\n\x2f**\x2f';
   }
   var body = (n > 0) ? ToString(arguments[n - 1]) : '';
-  return '(' + function_token + '(' + p + ') {\n' + body + '\n})';
+  var head = '(' + function_token + '(' + p + ') {\n';
+  var src = head + body + '\n})';
+  var global_proxy = %GlobalProxy(global);
+  var f = %_CallFunction(global_proxy, %CompileString(src, true, head.length));
+  %FunctionMarkNameShouldPrintAsAnonymous(f);
+  return f;
 }
 
 
 function FunctionConstructor(arg1) {  // length == 1
-  var source = NewFunctionString(arguments, 'function');
-  var global_proxy = %GlobalProxy(global);
-  // Compile the string in the constructor and not a helper so that errors
-  // appear to come from here.
-  var f = %_CallFunction(global_proxy, %CompileString(source, true));
-  %FunctionMarkNameShouldPrintAsAnonymous(f);
-  return f;
+  return NewFunctionFromString(arguments, 'function');
 }
 
 
index b799126..e2d9d49 100644 (file)
@@ -27,7 +27,7 @@ base::Atomic32 g_locker_was_ever_used_ = 0;
 // the lock for a given isolate.
 void Locker::Initialize(v8::Isolate* isolate) {
   DCHECK(isolate != NULL);
-  has_lock_= false;
+  has_lock_ = false;
   top_level_ = true;
   isolate_ = reinterpret_cast<i::Isolate*>(isolate);
   // Record that the Locker has been used at least once.
index 6588312..3b1559d 100644 (file)
@@ -60,7 +60,7 @@ bool Variable::IsGlobalObjectProperty() const {
   // activation frame.
   return (IsDynamicVariableMode(mode_) ||
           (IsDeclaredVariableMode(mode_) && !IsLexicalVariableMode(mode_)))
-      && scope_ != NULL && scope_->is_global_scope();
+      && scope_ != NULL && scope_->is_script_scope();
 }
 
 
index a8cf5e3..93bfb4a 100644 (file)
@@ -64,7 +64,7 @@ class Variable: public ZoneObject {
 
   // The source code for an eval() call may refer to a variable that is
   // in an outer scope about which we don't know anything (it may not
-  // be the global scope). scope() is NULL in that case. Currently the
+  // be the script scope). scope() is NULL in that case. Currently the
   // scope is only used to follow the context chain length.
   Scope* scope() const { return scope_; }
 
index 601a599..e84633d 100644 (file)
@@ -33,9 +33,9 @@
 // NOTE these macros are used by some of the tool scripts and the build
 // system so their names cannot be changed without changing the scripts.
 #define MAJOR_VERSION     3
-#define MINOR_VERSION     30
-#define BUILD_NUMBER      37
-#define PATCH_LEVEL       0
+#define MINOR_VERSION     31
+#define BUILD_NUMBER      74
+#define PATCH_LEVEL       1
 // Use 1 for candidates and 0 otherwise.
 // (Boolean macro values are not supported by all preprocessors.)
 #define IS_CANDIDATE_VERSION 0
index 4f60005..008ed27 100644 (file)
@@ -5,6 +5,8 @@
 #ifndef V8_VERSION_H_
 #define V8_VERSION_H_
 
+#include "src/base/functional.h"
+
 namespace v8 {
 namespace internal {
 
@@ -16,7 +18,9 @@ class Version {
   static int GetBuild() { return build_; }
   static int GetPatch() { return patch_; }
   static bool IsCandidate() { return candidate_; }
-  static int Hash() { return (major_ << 20) ^ (minor_ << 10) ^ patch_; }
+  static int Hash() {
+    return static_cast<int>(base::hash_combine(major_, minor_, build_, patch_));
+  }
 
   // Calculate the V8 version string.
   static void GetString(Vector<char> str);
index 273060a..a44c3d7 100644 (file)
@@ -96,16 +96,6 @@ function WeakMapDelete(key) {
 }
 
 
-function WeakMapClear() {
-  if (!IS_WEAKMAP(this)) {
-    throw MakeTypeError('incompatible_method_receiver',
-                        ['WeakMap.prototype.clear', this]);
-  }
-  // Replace the internal table with a new empty table.
-  %WeakCollectionInitialize(this);
-}
-
-
 // -------------------------------------------------------------------
 
 function SetUpWeakMap() {
@@ -122,8 +112,7 @@ function SetUpWeakMap() {
     "get", WeakMapGet,
     "set", WeakMapSet,
     "has", WeakMapHas,
-    "delete", WeakMapDelete,
-    "clear", WeakMapClear
+    "delete", WeakMapDelete
   ));
 }
 
@@ -198,16 +187,6 @@ function WeakSetDelete(value) {
 }
 
 
-function WeakSetClear() {
-  if (!IS_WEAKSET(this)) {
-    throw MakeTypeError('incompatible_method_receiver',
-                        ['WeakSet.prototype.clear', this]);
-  }
-  // Replace the internal table with a new empty table.
-  %WeakCollectionInitialize(this);
-}
-
-
 // -------------------------------------------------------------------
 
 function SetUpWeakSet() {
@@ -223,8 +202,7 @@ function SetUpWeakSet() {
   InstallFunctions($WeakSet.prototype, DONT_ENUM, $Array(
     "add", WeakSetAdd,
     "has", WeakSetHas,
-    "delete", WeakSetDelete,
-    "clear", WeakSetClear
+    "delete", WeakSetDelete
   ));
 }
 
index c3d2cdf..caf7af6 100644 (file)
@@ -189,6 +189,65 @@ void Assembler::emit_optional_rex_32(const Operand& op) {
 }
 
 
+// byte 1 of 3-byte VEX
+void Assembler::emit_vex3_byte1(XMMRegister reg, XMMRegister rm,
+                                LeadingOpcode m) {
+  byte rxb = ~((reg.high_bit() << 2) | rm.high_bit()) << 5;
+  emit(rxb | m);
+}
+
+
+// byte 1 of 3-byte VEX
+void Assembler::emit_vex3_byte1(XMMRegister reg, const Operand& rm,
+                                LeadingOpcode m) {
+  byte rxb = ~((reg.high_bit() << 2) | rm.rex_) << 5;
+  emit(rxb | m);
+}
+
+
+// byte 1 of 2-byte VEX
+void Assembler::emit_vex2_byte1(XMMRegister reg, XMMRegister v, VectorLength l,
+                                SIMDPrefix pp) {
+  byte rv = ~((reg.high_bit() << 4) | v.code()) << 3;
+  emit(rv | l | pp);
+}
+
+
+// byte 2 of 3-byte VEX
+void Assembler::emit_vex3_byte2(VexW w, XMMRegister v, VectorLength l,
+                                SIMDPrefix pp) {
+  emit(w | ((~v.code() & 0xf) << 3) | l | pp);
+}
+
+
+void Assembler::emit_vex_prefix(XMMRegister reg, XMMRegister vreg,
+                                XMMRegister rm, VectorLength l, SIMDPrefix pp,
+                                LeadingOpcode mm, VexW w) {
+  if (rm.high_bit() || mm != k0F || w != kW0) {
+    emit_vex3_byte0();
+    emit_vex3_byte1(reg, rm, mm);
+    emit_vex3_byte2(w, vreg, l, pp);
+  } else {
+    emit_vex2_byte0();
+    emit_vex2_byte1(reg, vreg, l, pp);
+  }
+}
+
+
+void Assembler::emit_vex_prefix(XMMRegister reg, XMMRegister vreg,
+                                const Operand& rm, VectorLength l,
+                                SIMDPrefix pp, LeadingOpcode mm, VexW w) {
+  if (rm.rex_ || mm != k0F || w != kW0) {
+    emit_vex3_byte0();
+    emit_vex3_byte1(reg, rm, mm);
+    emit_vex3_byte2(w, vreg, l, pp);
+  } else {
+    emit_vex2_byte0();
+    emit_vex2_byte1(reg, vreg, l, pp);
+  }
+}
+
+
 Address Assembler::target_address_at(Address pc,
                                      ConstantPoolArray* constant_pool) {
   return Memory::int32_at(pc) + pc + 4;
index dfd51a4..469ebe9 100644 (file)
@@ -27,12 +27,19 @@ void CpuFeatures::ProbeImpl(bool cross_compile) {
   if (cpu.has_sse41() && FLAG_enable_sse4_1) supported_ |= 1u << SSE4_1;
   if (cpu.has_sse3() && FLAG_enable_sse3) supported_ |= 1u << SSE3;
   // SAHF is not generally available in long mode.
-  if (cpu.has_sahf() && FLAG_enable_sahf) supported_|= 1u << SAHF;
+  if (cpu.has_sahf() && FLAG_enable_sahf) supported_ |= 1u << SAHF;
+  if (cpu.has_avx() && FLAG_enable_avx) supported_ |= 1u << AVX;
+  if (cpu.has_fma3() && FLAG_enable_fma3) supported_ |= 1u << FMA3;
 }
 
 
 void CpuFeatures::PrintTarget() { }
-void CpuFeatures::PrintFeatures() { }
+void CpuFeatures::PrintFeatures() {
+  printf("SSE3=%d SSE4_1=%d SAHF=%d AVX=%d FMA3=%d\n",
+         CpuFeatures::IsSupported(SSE3), CpuFeatures::IsSupported(SSE4_1),
+         CpuFeatures::IsSupported(SAHF), CpuFeatures::IsSupported(AVX),
+         CpuFeatures::IsSupported(FMA3));
+}
 
 
 // -----------------------------------------------------------------------------
@@ -1383,6 +1390,20 @@ void Assembler::movl(const Operand& dst, Label* src) {
 }
 
 
+void Assembler::movsxbl(Register dst, Register src) {
+  EnsureSpace ensure_space(this);
+  if (!src.is_byte_register()) {
+    // Register is not one of al, bl, cl, dl.  Its encoding needs REX.
+    emit_rex_32(dst, src);
+  } else {
+    emit_optional_rex_32(dst, src);
+  }
+  emit(0x0F);
+  emit(0xBE);
+  emit_modrm(dst, src);
+}
+
+
 void Assembler::movsxbl(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
   emit_optional_rex_32(dst, src);
@@ -1401,6 +1422,15 @@ void Assembler::movsxbq(Register dst, const Operand& src) {
 }
 
 
+void Assembler::movsxwl(Register dst, Register src) {
+  EnsureSpace ensure_space(this);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0xBF);
+  emit_modrm(dst, src);
+}
+
+
 void Assembler::movsxwl(Register dst, const Operand& src) {
   EnsureSpace ensure_space(this);
   emit_optional_rex_32(dst, src);
@@ -2615,6 +2645,104 @@ void Assembler::movapd(XMMRegister dst, XMMRegister src) {
 }
 
 
+void Assembler::addss(XMMRegister dst, XMMRegister src) {
+  EnsureSpace ensure_space(this);
+  emit(0xF3);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x58);
+  emit_sse_operand(dst, src);
+}
+
+
+void Assembler::addss(XMMRegister dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  emit(0xF3);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x58);
+  emit_sse_operand(dst, src);
+}
+
+
+void Assembler::subss(XMMRegister dst, XMMRegister src) {
+  EnsureSpace ensure_space(this);
+  emit(0xF3);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x5C);
+  emit_sse_operand(dst, src);
+}
+
+
+void Assembler::subss(XMMRegister dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  emit(0xF3);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x5C);
+  emit_sse_operand(dst, src);
+}
+
+
+void Assembler::mulss(XMMRegister dst, XMMRegister src) {
+  EnsureSpace ensure_space(this);
+  emit(0xF3);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x59);
+  emit_sse_operand(dst, src);
+}
+
+
+void Assembler::mulss(XMMRegister dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  emit(0xF3);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x59);
+  emit_sse_operand(dst, src);
+}
+
+
+void Assembler::divss(XMMRegister dst, XMMRegister src) {
+  EnsureSpace ensure_space(this);
+  emit(0xF3);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x5E);
+  emit_sse_operand(dst, src);
+}
+
+
+void Assembler::divss(XMMRegister dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  emit(0xF3);
+  emit_optional_rex_32(dst, src);
+  emit(0x0F);
+  emit(0x5E);
+  emit_sse_operand(dst, src);
+}
+
+
+void Assembler::ucomiss(XMMRegister dst, XMMRegister src) {
+  EnsureSpace ensure_space(this);
+  emit_optional_rex_32(dst, src);
+  emit(0x0f);
+  emit(0x2e);
+  emit_sse_operand(dst, src);
+}
+
+
+void Assembler::ucomiss(XMMRegister dst, const Operand& src) {
+  EnsureSpace ensure_space(this);
+  emit_optional_rex_32(dst, src);
+  emit(0x0f);
+  emit(0x2e);
+  emit_sse_operand(dst, src);
+}
+
+
 void Assembler::movss(XMMRegister dst, const Operand& src) {
   EnsureSpace ensure_space(this);
   emit(0xF3);  // single
@@ -3054,6 +3182,67 @@ void Assembler::pcmpeqd(XMMRegister dst, XMMRegister src) {
 }
 
 
+// AVX instructions
+void Assembler::vfmasd(byte op, XMMRegister dst, XMMRegister src1,
+                       XMMRegister src2) {
+  DCHECK(IsEnabled(FMA3));
+  EnsureSpace ensure_space(this);
+  emit_vex_prefix(dst, src1, src2, kLIG, k66, k0F38, kW1);
+  emit(op);
+  emit_sse_operand(dst, src2);
+}
+
+
+void Assembler::vfmasd(byte op, XMMRegister dst, XMMRegister src1,
+                       const Operand& src2) {
+  DCHECK(IsEnabled(FMA3));
+  EnsureSpace ensure_space(this);
+  emit_vex_prefix(dst, src1, src2, kLIG, k66, k0F38, kW1);
+  emit(op);
+  emit_sse_operand(dst, src2);
+}
+
+
+void Assembler::vfmass(byte op, XMMRegister dst, XMMRegister src1,
+                       XMMRegister src2) {
+  DCHECK(IsEnabled(FMA3));
+  EnsureSpace ensure_space(this);
+  emit_vex_prefix(dst, src1, src2, kLIG, k66, k0F38, kW0);
+  emit(op);
+  emit_sse_operand(dst, src2);
+}
+
+
+void Assembler::vfmass(byte op, XMMRegister dst, XMMRegister src1,
+                       const Operand& src2) {
+  DCHECK(IsEnabled(FMA3));
+  EnsureSpace ensure_space(this);
+  emit_vex_prefix(dst, src1, src2, kLIG, k66, k0F38, kW0);
+  emit(op);
+  emit_sse_operand(dst, src2);
+}
+
+
+void Assembler::vsd(byte op, XMMRegister dst, XMMRegister src1,
+                    XMMRegister src2) {
+  DCHECK(IsEnabled(AVX));
+  EnsureSpace ensure_space(this);
+  emit_vex_prefix(dst, src1, src2, kLIG, kF2, k0F, kWIG);
+  emit(op);
+  emit_sse_operand(dst, src2);
+}
+
+
+void Assembler::vsd(byte op, XMMRegister dst, XMMRegister src1,
+                    const Operand& src2) {
+  DCHECK(IsEnabled(AVX));
+  EnsureSpace ensure_space(this);
+  emit_vex_prefix(dst, src1, src2, kLIG, kF2, k0F, kWIG);
+  emit(op);
+  emit_sse_operand(dst, src2);
+}
+
+
 void Assembler::emit_sse_operand(XMMRegister reg, const Operand& adr) {
   Register ireg = { reg.code() };
   emit_operand(ireg, adr);
index 3b55396..2ebae3b 100644 (file)
@@ -731,8 +731,10 @@ class Assembler : public AssemblerBase {
   void movq(Register dst, int64_t value);
   void movq(Register dst, uint64_t value);
 
+  void movsxbl(Register dst, Register src);
   void movsxbl(Register dst, const Operand& src);
   void movsxbq(Register dst, const Operand& src);
+  void movsxwl(Register dst, Register src);
   void movsxwl(Register dst, const Operand& src);
   void movsxwq(Register dst, const Operand& src);
   void movsxlq(Register dst, Register src);
@@ -1012,6 +1014,17 @@ class Assembler : public AssemblerBase {
   void sahf();
 
   // SSE instructions
+  void addss(XMMRegister dst, XMMRegister src);
+  void addss(XMMRegister dst, const Operand& src);
+  void subss(XMMRegister dst, XMMRegister src);
+  void subss(XMMRegister dst, const Operand& src);
+  void mulss(XMMRegister dst, XMMRegister src);
+  void mulss(XMMRegister dst, const Operand& src);
+  void divss(XMMRegister dst, XMMRegister src);
+  void divss(XMMRegister dst, const Operand& src);
+
+  void ucomiss(XMMRegister dst, XMMRegister src);
+  void ucomiss(XMMRegister dst, const Operand& src);
   void movaps(XMMRegister dst, XMMRegister src);
   void movss(XMMRegister dst, const Operand& src);
   void movss(const Operand& dst, XMMRegister src);
@@ -1121,6 +1134,184 @@ class Assembler : public AssemblerBase {
 
   void roundsd(XMMRegister dst, XMMRegister src, RoundingMode mode);
 
+  // AVX instruction
+  void vfmadd132sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+    vfmasd(0x99, dst, src1, src2);
+  }
+  void vfmadd213sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+    vfmasd(0xa9, dst, src1, src2);
+  }
+  void vfmadd231sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+    vfmasd(0xb9, dst, src1, src2);
+  }
+  void vfmadd132sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+    vfmasd(0x99, dst, src1, src2);
+  }
+  void vfmadd213sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+    vfmasd(0xa9, dst, src1, src2);
+  }
+  void vfmadd231sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+    vfmasd(0xb9, dst, src1, src2);
+  }
+  void vfmsub132sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+    vfmasd(0x9b, dst, src1, src2);
+  }
+  void vfmsub213sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+    vfmasd(0xab, dst, src1, src2);
+  }
+  void vfmsub231sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+    vfmasd(0xbb, dst, src1, src2);
+  }
+  void vfmsub132sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+    vfmasd(0x9b, dst, src1, src2);
+  }
+  void vfmsub213sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+    vfmasd(0xab, dst, src1, src2);
+  }
+  void vfmsub231sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+    vfmasd(0xbb, dst, src1, src2);
+  }
+  void vfnmadd132sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+    vfmasd(0x9d, dst, src1, src2);
+  }
+  void vfnmadd213sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+    vfmasd(0xad, dst, src1, src2);
+  }
+  void vfnmadd231sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+    vfmasd(0xbd, dst, src1, src2);
+  }
+  void vfnmadd132sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+    vfmasd(0x9d, dst, src1, src2);
+  }
+  void vfnmadd213sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+    vfmasd(0xad, dst, src1, src2);
+  }
+  void vfnmadd231sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+    vfmasd(0xbd, dst, src1, src2);
+  }
+  void vfnmsub132sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+    vfmasd(0x9f, dst, src1, src2);
+  }
+  void vfnmsub213sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+    vfmasd(0xaf, dst, src1, src2);
+  }
+  void vfnmsub231sd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+    vfmasd(0xbf, dst, src1, src2);
+  }
+  void vfnmsub132sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+    vfmasd(0x9f, dst, src1, src2);
+  }
+  void vfnmsub213sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+    vfmasd(0xaf, dst, src1, src2);
+  }
+  void vfnmsub231sd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+    vfmasd(0xbf, dst, src1, src2);
+  }
+  void vfmasd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
+  void vfmasd(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
+
+  void vfmadd132ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+    vfmass(0x99, dst, src1, src2);
+  }
+  void vfmadd213ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+    vfmass(0xa9, dst, src1, src2);
+  }
+  void vfmadd231ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+    vfmass(0xb9, dst, src1, src2);
+  }
+  void vfmadd132ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+    vfmass(0x99, dst, src1, src2);
+  }
+  void vfmadd213ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+    vfmass(0xa9, dst, src1, src2);
+  }
+  void vfmadd231ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+    vfmass(0xb9, dst, src1, src2);
+  }
+  void vfmsub132ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+    vfmass(0x9b, dst, src1, src2);
+  }
+  void vfmsub213ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+    vfmass(0xab, dst, src1, src2);
+  }
+  void vfmsub231ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+    vfmass(0xbb, dst, src1, src2);
+  }
+  void vfmsub132ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+    vfmass(0x9b, dst, src1, src2);
+  }
+  void vfmsub213ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+    vfmass(0xab, dst, src1, src2);
+  }
+  void vfmsub231ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+    vfmass(0xbb, dst, src1, src2);
+  }
+  void vfnmadd132ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+    vfmass(0x9d, dst, src1, src2);
+  }
+  void vfnmadd213ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+    vfmass(0xad, dst, src1, src2);
+  }
+  void vfnmadd231ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+    vfmass(0xbd, dst, src1, src2);
+  }
+  void vfnmadd132ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+    vfmass(0x9d, dst, src1, src2);
+  }
+  void vfnmadd213ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+    vfmass(0xad, dst, src1, src2);
+  }
+  void vfnmadd231ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+    vfmass(0xbd, dst, src1, src2);
+  }
+  void vfnmsub132ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+    vfmass(0x9f, dst, src1, src2);
+  }
+  void vfnmsub213ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+    vfmass(0xaf, dst, src1, src2);
+  }
+  void vfnmsub231ss(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+    vfmass(0xbf, dst, src1, src2);
+  }
+  void vfnmsub132ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+    vfmass(0x9f, dst, src1, src2);
+  }
+  void vfnmsub213ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+    vfmass(0xaf, dst, src1, src2);
+  }
+  void vfnmsub231ss(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+    vfmass(0xbf, dst, src1, src2);
+  }
+  void vfmass(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
+  void vfmass(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
+
+  void vaddsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+    vsd(0x58, dst, src1, src2);
+  }
+  void vaddsd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+    vsd(0x58, dst, src1, src2);
+  }
+  void vsubsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+    vsd(0x5c, dst, src1, src2);
+  }
+  void vsubsd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+    vsd(0x5c, dst, src1, src2);
+  }
+  void vmulsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+    vsd(0x59, dst, src1, src2);
+  }
+  void vmulsd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+    vsd(0x59, dst, src1, src2);
+  }
+  void vdivsd(XMMRegister dst, XMMRegister src1, XMMRegister src2) {
+    vsd(0x5e, dst, src1, src2);
+  }
+  void vdivsd(XMMRegister dst, XMMRegister src1, const Operand& src2) {
+    vsd(0x5e, dst, src1, src2);
+  }
+  void vsd(byte op, XMMRegister dst, XMMRegister src1, XMMRegister src2);
+  void vsd(byte op, XMMRegister dst, XMMRegister src1, const Operand& src2);
+
   // Debugging
   void Print();
 
@@ -1314,6 +1505,28 @@ class Assembler : public AssemblerBase {
     }
   }
 
+  // Emit vex prefix
+  enum SIMDPrefix { kNone = 0x0, k66 = 0x1, kF3 = 0x2, kF2 = 0x3 };
+  enum VectorLength { kL128 = 0x0, kL256 = 0x4, kLIG = kL128 };
+  enum VexW { kW0 = 0x0, kW1 = 0x80, kWIG = kW0 };
+  enum LeadingOpcode { k0F = 0x1, k0F38 = 0x2, k0F3A = 0x2 };
+
+  void emit_vex2_byte0() { emit(0xc5); }
+  inline void emit_vex2_byte1(XMMRegister reg, XMMRegister v, VectorLength l,
+                              SIMDPrefix pp);
+  void emit_vex3_byte0() { emit(0xc4); }
+  inline void emit_vex3_byte1(XMMRegister reg, XMMRegister rm, LeadingOpcode m);
+  inline void emit_vex3_byte1(XMMRegister reg, const Operand& rm,
+                              LeadingOpcode m);
+  inline void emit_vex3_byte2(VexW w, XMMRegister v, VectorLength l,
+                              SIMDPrefix pp);
+  inline void emit_vex_prefix(XMMRegister reg, XMMRegister v, XMMRegister rm,
+                              VectorLength l, SIMDPrefix pp, LeadingOpcode m,
+                              VexW w);
+  inline void emit_vex_prefix(XMMRegister reg, XMMRegister v, const Operand& rm,
+                              VectorLength l, SIMDPrefix pp, LeadingOpcode m,
+                              VexW w);
+
   // Emit the ModR/M byte, and optionally the SIB byte and
   // 1- or 4-byte offset for a memory operand.  Also encodes
   // the second operand of the operation, a register or operation
index 64ba351..ef3df65 100644 (file)
@@ -158,22 +158,20 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
       // rax: initial map
       __ CmpInstanceType(rax, JS_FUNCTION_TYPE);
       __ j(equal, &rt_call);
-
       if (!is_api_function) {
         Label allocate;
         // The code below relies on these assumptions.
-        STATIC_ASSERT(JSFunction::kNoSlackTracking == 0);
-        STATIC_ASSERT(Map::ConstructionCount::kShift +
-                      Map::ConstructionCount::kSize == 32);
+        STATIC_ASSERT(Map::Counter::kShift + Map::Counter::kSize == 32);
         // Check if slack tracking is enabled.
         __ movl(rsi, FieldOperand(rax, Map::kBitField3Offset));
-        __ shrl(rsi, Immediate(Map::ConstructionCount::kShift));
-        __ j(zero, &allocate);  // JSFunction::kNoSlackTracking
+        __ shrl(rsi, Immediate(Map::Counter::kShift));
+        __ cmpl(rsi, Immediate(Map::kSlackTrackingCounterEnd));
+        __ j(less, &allocate);
         // Decrease generous allocation count.
         __ subl(FieldOperand(rax, Map::kBitField3Offset),
-                Immediate(1 << Map::ConstructionCount::kShift));
+                Immediate(1 << Map::Counter::kShift));
 
-        __ cmpl(rsi, Immediate(JSFunction::kFinishSlackTracking));
+        __ cmpl(rsi, Immediate(Map::kSlackTrackingCounterEnd));
         __ j(not_equal, &allocate);
 
         __ Push(rax);
@@ -184,7 +182,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
 
         __ Pop(rdi);
         __ Pop(rax);
-        __ xorl(rsi, rsi);  // JSFunction::kNoSlackTracking
+        __ movl(rsi, Immediate(Map::kSlackTrackingCounterEnd - 1));
 
         __ bind(&allocate);
       }
@@ -222,8 +220,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
         Label no_inobject_slack_tracking;
 
         // Check if slack tracking is enabled.
-        __ cmpl(rsi, Immediate(JSFunction::kNoSlackTracking));
-        __ j(equal, &no_inobject_slack_tracking);
+        __ cmpl(rsi, Immediate(Map::kSlackTrackingCounterEnd));
+        __ j(less, &no_inobject_slack_tracking);
 
         // Allocate object with a slack.
         __ movzxbp(rsi,
index 5ea5f72..f327b50 100644 (file)
@@ -524,6 +524,11 @@ void MathPowStub::Generate(MacroAssembler* masm) {
 void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
   Label miss;
   Register receiver = LoadDescriptor::ReceiverRegister();
+  // Ensure that the vector and slot registers won't be clobbered before
+  // calling the miss handler.
+  DCHECK(!FLAG_vector_ics ||
+         !AreAliased(r8, r9, VectorLoadICDescriptor::VectorRegister(),
+                     VectorLoadICDescriptor::SlotRegister()));
 
   NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, r8,
                                                           r9, &miss);
@@ -873,10 +878,16 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
 
   Register receiver = LoadDescriptor::ReceiverRegister();
   Register index = LoadDescriptor::NameRegister();
-  Register scratch = rbx;
+  Register scratch = rdi;
   Register result = rax;
   DCHECK(!scratch.is(receiver) && !scratch.is(index));
+  DCHECK(!FLAG_vector_ics ||
+         (!scratch.is(VectorLoadICDescriptor::VectorRegister()) &&
+          result.is(VectorLoadICDescriptor::SlotRegister())));
 
+  // StringCharAtGenerator doesn't use the result register until it's passed
+  // the different miss possibilities. If it did, we would have a conflict
+  // when FLAG_vector_ics is true.
   StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
                                           &miss,  // When not a string.
                                           &miss,  // When not a number.
@@ -2087,6 +2098,10 @@ void CallICStub::Generate(MacroAssembler* masm) {
   // rdi - function
   // rdx - slot id
   Isolate* isolate = masm->isolate();
+  const int with_types_offset =
+      FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
+  const int generic_offset =
+      FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
   Label extra_checks_or_miss, slow_start;
   Label slow, non_function, wrap, cont;
   Label have_js_function;
@@ -2128,34 +2143,64 @@ void CallICStub::Generate(MacroAssembler* masm) {
   }
 
   __ bind(&extra_checks_or_miss);
-  Label miss;
+  Label uninitialized, miss;
 
   __ movp(rcx, FieldOperand(rbx, rdx, times_pointer_size,
                             FixedArray::kHeaderSize));
   __ Cmp(rcx, TypeFeedbackVector::MegamorphicSentinel(isolate));
   __ j(equal, &slow_start);
+
+  // The following cases attempt to handle MISS cases without going to the
+  // runtime.
+  if (FLAG_trace_ic) {
+    __ jmp(&miss);
+  }
+
   __ Cmp(rcx, TypeFeedbackVector::UninitializedSentinel(isolate));
+  __ j(equal, &uninitialized);
+
+  // We are going megamorphic. If the feedback is a JSFunction, it is fine
+  // to handle it here. More complex cases are dealt with in the runtime.
+  __ AssertNotSmi(rcx);
+  __ CmpObjectType(rcx, JS_FUNCTION_TYPE, rcx);
+  __ j(not_equal, &miss);
+  __ Move(FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize),
+          TypeFeedbackVector::MegamorphicSentinel(isolate));
+  // We have to update statistics for runtime profiling.
+  __ SmiAddConstant(FieldOperand(rbx, with_types_offset), Smi::FromInt(-1));
+  __ SmiAddConstant(FieldOperand(rbx, generic_offset), Smi::FromInt(1));
+  __ jmp(&slow_start);
+
+  __ bind(&uninitialized);
+
+  // We are going monomorphic, provided we actually have a JSFunction.
+  __ JumpIfSmi(rdi, &miss);
+
+  // Goto miss case if we do not have a function.
+  __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx);
+  __ j(not_equal, &miss);
+
+  // Make sure the function is not the Array() function, which requires special
+  // behavior on MISS.
+  __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, rcx);
+  __ cmpp(rdi, rcx);
   __ j(equal, &miss);
 
-  if (!FLAG_trace_ic) {
-    // We are going megamorphic. If the feedback is a JSFunction, it is fine
-    // to handle it here. More complex cases are dealt with in the runtime.
-    __ AssertNotSmi(rcx);
-    __ CmpObjectType(rcx, JS_FUNCTION_TYPE, rcx);
-    __ j(not_equal, &miss);
-    __ Move(FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize),
-            TypeFeedbackVector::MegamorphicSentinel(isolate));
-    // We have to update statistics for runtime profiling.
-    const int with_types_offset =
-        FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
-    __ SmiAddConstant(FieldOperand(rbx, with_types_offset), Smi::FromInt(-1));
-    const int generic_offset =
-        FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
-    __ SmiAddConstant(FieldOperand(rbx, generic_offset), Smi::FromInt(1));
-    __ jmp(&slow_start);
-  }
+  // Update stats.
+  __ SmiAddConstant(FieldOperand(rbx, with_types_offset), Smi::FromInt(1));
+
+  // Store the function.
+  __ movp(FieldOperand(rbx, rdx, times_pointer_size, FixedArray::kHeaderSize),
+          rdi);
+
+  // Update the write barrier.
+  __ movp(rax, rdi);
+  __ RecordWriteArray(rbx, rax, rdx, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+  __ jmp(&have_js_function);
 
-  // We are here because tracing is on or we are going monomorphic.
+  // We are here because tracing is on or we encountered a MISS case we can't
+  // handle here.
   __ bind(&miss);
   GenerateMiss(masm);
 
@@ -3124,20 +3169,47 @@ void SubStringStub::Generate(MacroAssembler* masm) {
 
 void ToNumberStub::Generate(MacroAssembler* masm) {
   // The ToNumber stub takes one argument in rax.
-  Label check_heap_number, call_builtin;
-  __ JumpIfNotSmi(rax, &check_heap_number, Label::kNear);
+  Label not_smi;
+  __ JumpIfNotSmi(rax, &not_smi, Label::kNear);
   __ Ret();
+  __ bind(&not_smi);
 
-  __ bind(&check_heap_number);
+  Label not_heap_number;
   __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset),
                  Heap::kHeapNumberMapRootIndex);
-  __ j(not_equal, &call_builtin, Label::kNear);
+  __ j(not_equal, &not_heap_number, Label::kNear);
+  __ Ret();
+  __ bind(&not_heap_number);
+
+  Label not_string, slow_string;
+  __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, rdi);
+  // rax: object
+  // rdi: object map
+  __ j(above_equal, &not_string, Label::kNear);
+  // Check if string has a cached array index.
+  __ testl(FieldOperand(rax, String::kHashFieldOffset),
+           Immediate(String::kContainsCachedArrayIndexMask));
+  __ j(not_zero, &slow_string, Label::kNear);
+  __ movl(rax, FieldOperand(rax, String::kHashFieldOffset));
+  __ IndexFromHash(rax, rax);
+  __ Ret();
+  __ bind(&slow_string);
+  __ PopReturnAddressTo(rcx);     // Pop return address.
+  __ Push(rax);                   // Push argument.
+  __ PushReturnAddressFrom(rcx);  // Push return address.
+  __ TailCallRuntime(Runtime::kStringToNumber, 1, 1);
+  __ bind(&not_string);
+
+  Label not_oddball;
+  __ CmpInstanceType(rdi, ODDBALL_TYPE);
+  __ j(not_equal, &not_oddball, Label::kNear);
+  __ movp(rax, FieldOperand(rax, Oddball::kToNumberOffset));
   __ Ret();
+  __ bind(&not_oddball);
 
-  __ bind(&call_builtin);
-  __ popq(rcx);  // Pop return address.
-  __ pushq(rax);
-  __ pushq(rcx);  // Push return address.
+  __ PopReturnAddressTo(rcx);     // Pop return address.
+  __ Push(rax);                   // Push argument.
+  __ PushReturnAddressFrom(rcx);  // Push return address.
   __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
 }
 
index d17fa1b..0efcf7f 100644 (file)
@@ -71,7 +71,7 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
                                      Register r0,
                                      Register r1);
 
-  virtual bool SometimesSetsUpAFrame() { return false; }
+  bool SometimesSetsUpAFrame() OVERRIDE { return false; }
 
  private:
   static const int kInlinedProbes = 4;
@@ -134,7 +134,7 @@ class RecordWriteStub: public PlatformCodeStub {
     INCREMENTAL_COMPACTION
   };
 
-  virtual bool SometimesSetsUpAFrame() { return false; }
+  bool SometimesSetsUpAFrame() OVERRIDE { return false; }
 
   static const byte kTwoByteNopInstruction = 0x3c;  // Cmpb al, #imm8.
   static const byte kTwoByteJumpInstruction = 0xeb;  // Jmp #imm8.
@@ -313,9 +313,9 @@ class RecordWriteStub: public PlatformCodeStub {
     kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
   };
 
-  virtual Major MajorKey() const FINAL OVERRIDE { return RecordWrite; }
+  Major MajorKey() const FINAL { return RecordWrite; }
 
-  virtual void Generate(MacroAssembler* masm) OVERRIDE;
+  void Generate(MacroAssembler* masm) OVERRIDE;
   void GenerateIncremental(MacroAssembler* masm, Mode mode);
   void CheckNeedsToInformIncrementalMarker(
       MacroAssembler* masm,
@@ -323,7 +323,7 @@ class RecordWriteStub: public PlatformCodeStub {
       Mode mode);
   void InformIncrementalMarker(MacroAssembler* masm);
 
-  void Activate(Code* code) {
+  void Activate(Code* code) OVERRIDE {
     code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
   }
 
index 16b0cdc..54fe9b0 100644 (file)
@@ -23,6 +23,12 @@ int Deoptimizer::patch_size() {
 }
 
 
+void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
+  // Empty because there is no need for relocation information for the code
+  // patching in Deoptimizer::PatchCodeForDeoptimization below.
+}
+
+
 void Deoptimizer::PatchCodeForDeoptimization(Isolate* isolate, Code* code) {
   // Invalidate the relocation information, as it will become invalid by the
   // code patching below, and is not needed any more.
index 837da27..75adc89 100644 (file)
@@ -148,6 +148,8 @@ enum Prefixes {
   ESCAPE_PREFIX = 0x0F,
   OPERAND_SIZE_OVERRIDE_PREFIX = 0x66,
   ADDRESS_SIZE_OVERRIDE_PREFIX = 0x67,
+  VEX3_PREFIX = 0xC4,
+  VEX2_PREFIX = 0xC5,
   REPNE_PREFIX = 0xF2,
   REP_PREFIX = 0xF3,
   REPEQ_PREFIX = REP_PREFIX
@@ -290,11 +292,14 @@ class DisassemblerX64 {
                       ABORT_ON_UNIMPLEMENTED_OPCODE)
       : converter_(converter),
         tmp_buffer_pos_(0),
-        abort_on_unimplemented_(
-            unimplemented_action == ABORT_ON_UNIMPLEMENTED_OPCODE),
+        abort_on_unimplemented_(unimplemented_action ==
+                                ABORT_ON_UNIMPLEMENTED_OPCODE),
         rex_(0),
         operand_size_(0),
         group_1_prefix_(0),
+        vex_byte0_(0),
+        vex_byte1_(0),
+        vex_byte2_(0),
         byte_size_operand_(false),
         instruction_table_(instruction_table.Pointer()) {
     tmp_buffer_[0] = '\0';
@@ -323,6 +328,9 @@ class DisassemblerX64 {
   byte rex_;
   byte operand_size_;  // 0x66 or (if no group 3 prefix is present) 0x0.
   byte group_1_prefix_;  // 0xF2, 0xF3, or (if no group 1 prefix is present) 0.
+  byte vex_byte0_;       // 0xc4 or 0xc5
+  byte vex_byte1_;
+  byte vex_byte2_;  // only for 3 bytes vex prefix
   // Byte size operand override.
   bool byte_size_operand_;
   const InstructionTable* const instruction_table_;
@@ -345,6 +353,51 @@ class DisassemblerX64 {
 
   bool rex_w() { return (rex_ & 0x08) != 0; }
 
+  bool vex_128() {
+    DCHECK(vex_byte0_ == VEX3_PREFIX || vex_byte0_ == VEX2_PREFIX);
+    byte checked = vex_byte0_ == VEX3_PREFIX ? vex_byte2_ : vex_byte1_;
+    return (checked & 4) != 1;
+  }
+
+  bool vex_66() {
+    DCHECK(vex_byte0_ == VEX3_PREFIX || vex_byte0_ == VEX2_PREFIX);
+    byte checked = vex_byte0_ == VEX3_PREFIX ? vex_byte2_ : vex_byte1_;
+    return (checked & 3) == 1;
+  }
+
+  bool vex_f3() {
+    DCHECK(vex_byte0_ == VEX3_PREFIX || vex_byte0_ == VEX2_PREFIX);
+    byte checked = vex_byte0_ == VEX3_PREFIX ? vex_byte2_ : vex_byte1_;
+    return (checked & 3) == 2;
+  }
+
+  bool vex_f2() {
+    DCHECK(vex_byte0_ == VEX3_PREFIX || vex_byte0_ == VEX2_PREFIX);
+    byte checked = vex_byte0_ == VEX3_PREFIX ? vex_byte2_ : vex_byte1_;
+    return (checked & 3) == 3;
+  }
+
+  bool vex_0f() {
+    if (vex_byte0_ == VEX2_PREFIX) return true;
+    return (vex_byte1_ & 3) == 1;
+  }
+
+  bool vex_0f38() {
+    if (vex_byte0_ == VEX2_PREFIX) return false;
+    return (vex_byte1_ & 3) == 2;
+  }
+
+  bool vex_0f3a() {
+    if (vex_byte0_ == VEX2_PREFIX) return false;
+    return (vex_byte1_ & 3) == 3;
+  }
+
+  int vex_vreg() {
+    DCHECK(vex_byte0_ == VEX3_PREFIX || vex_byte0_ == VEX2_PREFIX);
+    byte checked = vex_byte0_ == VEX3_PREFIX ? vex_byte2_ : vex_byte1_;
+    return ~(checked >> 3) & 0xf;
+  }
+
   OperandSize operand_size() {
     if (byte_size_operand_) return OPERAND_BYTE_SIZE;
     if (rex_w()) return OPERAND_QUADWORD_SIZE;
@@ -356,6 +409,8 @@ class DisassemblerX64 {
     return "bwlq"[operand_size()];
   }
 
+  char float_size_code() { return "sd"[rex_w()]; }
+
   const char* NameOfCPURegister(int reg) const {
     return converter_.NameOfCPURegister(reg);
   }
@@ -414,6 +469,7 @@ class DisassemblerX64 {
   int FPUInstruction(byte* data);
   int MemoryFPUInstruction(int escape_opcode, int regop, byte* modrm_start);
   int RegisterFPUInstruction(int escape_opcode, byte modrm_byte);
+  int AVXInstruction(byte* data);
   void AppendToBuffer(const char* format, ...);
 
   void UnimplementedInstruction() {
@@ -811,6 +867,111 @@ int DisassemblerX64::SetCC(byte* data) {
 }
 
 
+int DisassemblerX64::AVXInstruction(byte* data) {
+  byte opcode = *data;
+  byte* current = data + 1;
+  if (vex_66() && vex_0f38()) {
+    int mod, regop, rm, vvvv = vex_vreg();
+    get_modrm(*current, &mod, &regop, &rm);
+    switch (opcode) {
+      case 0x99:
+        AppendToBuffer("vfmadd132s%c %s,%s,", float_size_code(),
+                       NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
+        current += PrintRightXMMOperand(current);
+        break;
+      case 0xa9:
+        AppendToBuffer("vfmadd213s%c %s,%s,", float_size_code(),
+                       NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
+        current += PrintRightXMMOperand(current);
+        break;
+      case 0xb9:
+        AppendToBuffer("vfmadd231s%c %s,%s,", float_size_code(),
+                       NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
+        current += PrintRightXMMOperand(current);
+        break;
+      case 0x9b:
+        AppendToBuffer("vfmsub132s%c %s,%s,", float_size_code(),
+                       NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
+        current += PrintRightXMMOperand(current);
+        break;
+      case 0xab:
+        AppendToBuffer("vfmsub213s%c %s,%s,", float_size_code(),
+                       NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
+        current += PrintRightXMMOperand(current);
+        break;
+      case 0xbb:
+        AppendToBuffer("vfmsub231s%c %s,%s,", float_size_code(),
+                       NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
+        current += PrintRightXMMOperand(current);
+        break;
+      case 0x9d:
+        AppendToBuffer("vfnmadd132s%c %s,%s,", float_size_code(),
+                       NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
+        current += PrintRightXMMOperand(current);
+        break;
+      case 0xad:
+        AppendToBuffer("vfnmadd213s%c %s,%s,", float_size_code(),
+                       NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
+        current += PrintRightXMMOperand(current);
+        break;
+      case 0xbd:
+        AppendToBuffer("vfnmadd231s%c %s,%s,", float_size_code(),
+                       NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
+        current += PrintRightXMMOperand(current);
+        break;
+      case 0x9f:
+        AppendToBuffer("vfnmsub132s%c %s,%s,", float_size_code(),
+                       NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
+        current += PrintRightXMMOperand(current);
+        break;
+      case 0xaf:
+        AppendToBuffer("vfnmsub213s%c %s,%s,", float_size_code(),
+                       NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
+        current += PrintRightXMMOperand(current);
+        break;
+      case 0xbf:
+        AppendToBuffer("vfnmsub231s%c %s,%s,", float_size_code(),
+                       NameOfXMMRegister(regop), NameOfXMMRegister(vvvv));
+        current += PrintRightXMMOperand(current);
+        break;
+      default:
+        UnimplementedInstruction();
+    }
+  } else if (vex_f2() && vex_0f()) {
+    int mod, regop, rm, vvvv = vex_vreg();
+    get_modrm(*current, &mod, &regop, &rm);
+    switch (opcode) {
+      case 0x58:
+        AppendToBuffer("vaddsd %s,%s,", NameOfXMMRegister(regop),
+                       NameOfXMMRegister(vvvv));
+        current += PrintRightXMMOperand(current);
+        break;
+      case 0x59:
+        AppendToBuffer("vmulsd %s,%s,", NameOfXMMRegister(regop),
+                       NameOfXMMRegister(vvvv));
+        current += PrintRightXMMOperand(current);
+        break;
+      case 0x5c:
+        AppendToBuffer("vsubsd %s,%s,", NameOfXMMRegister(regop),
+                       NameOfXMMRegister(vvvv));
+        current += PrintRightXMMOperand(current);
+        break;
+      case 0x5e:
+        AppendToBuffer("vdivsd %s,%s,", NameOfXMMRegister(regop),
+                       NameOfXMMRegister(vvvv));
+        current += PrintRightXMMOperand(current);
+        break;
+      default:
+        UnimplementedInstruction();
+    }
+  } else {
+    UnimplementedInstruction();
+  }
+
+  return static_cast<int>(current - data);
+}
+
+
 // Returns number of bytes used, including *data.
 int DisassemblerX64::FPUInstruction(byte* data) {
   byte escape_opcode = *data;
@@ -1189,6 +1350,16 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
       AppendToBuffer("cvttss2si%c %s,",
           operand_size_code(), NameOfCPURegister(regop));
       current += PrintRightXMMOperand(current);
+    } else if (opcode == 0x58) {
+      int mod, regop, rm;
+      get_modrm(*current, &mod, &regop, &rm);
+      AppendToBuffer("addss %s,", NameOfXMMRegister(regop));
+      current += PrintRightXMMOperand(current);
+    } else if (opcode == 0x59) {
+      int mod, regop, rm;
+      get_modrm(*current, &mod, &regop, &rm);
+      AppendToBuffer("mulss %s,", NameOfXMMRegister(regop));
+      current += PrintRightXMMOperand(current);
     } else if (opcode == 0x5A) {
       // CVTSS2SD:
       // Convert scalar single-precision FP to scalar double-precision FP.
@@ -1196,6 +1367,16 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
       get_modrm(*current, &mod, &regop, &rm);
       AppendToBuffer("cvtss2sd %s,", NameOfXMMRegister(regop));
       current += PrintRightXMMOperand(current);
+    } else if (opcode == 0x5c) {
+      int mod, regop, rm;
+      get_modrm(*current, &mod, &regop, &rm);
+      AppendToBuffer("subss %s,", NameOfXMMRegister(regop));
+      current += PrintRightXMMOperand(current);
+    } else if (opcode == 0x5e) {
+      int mod, regop, rm;
+      get_modrm(*current, &mod, &regop, &rm);
+      AppendToBuffer("divss %s,", NameOfXMMRegister(regop));
+      current += PrintRightXMMOperand(current);
     } else if (opcode == 0x7E) {
       int mod, regop, rm;
       get_modrm(*current, &mod, &regop, &rm);
@@ -1234,6 +1415,11 @@ int DisassemblerX64::TwoByteOpcodeInstruction(byte* data) {
     current += PrintRightXMMOperand(current);
     AppendToBuffer(",%s", NameOfXMMRegister(regop));
 
+  } else if (opcode == 0x2e) {
+    int mod, regop, rm;
+    get_modrm(*current, &mod, &regop, &rm);
+    AppendToBuffer("ucomiss %s,", NameOfXMMRegister(regop));
+    current += PrintRightXMMOperand(current);
   } else if (opcode == 0xA2) {
     // CPUID
     AppendToBuffer("%s", mnemonic);
@@ -1387,99 +1573,114 @@ int DisassemblerX64::InstructionDecode(v8::internal::Vector<char> out_buffer,
       if (rex_w()) AppendToBuffer("REX.W ");
     } else if ((current & 0xFE) == 0xF2) {  // Group 1 prefix (0xF2 or 0xF3).
       group_1_prefix_ = current;
+    } else if (current == VEX3_PREFIX) {
+      vex_byte0_ = current;
+      vex_byte1_ = *(data + 1);
+      vex_byte2_ = *(data + 2);
+      setRex(0x40 | (~(vex_byte1_ >> 5) & 7) | ((vex_byte2_ >> 4) & 8));
+      data += 2;
+    } else if (current == VEX2_PREFIX) {
+      vex_byte0_ = current;
+      vex_byte1_ = *(data + 1);
+      setRex(0x40 | (~(vex_byte1_ >> 5) & 4));
+      data++;
     } else {  // Not a prefix - an opcode.
       break;
     }
     data++;
   }
 
-  const InstructionDesc& idesc = instruction_table_->Get(current);
-  byte_size_operand_ = idesc.byte_size_operation;
-  switch (idesc.type) {
-    case ZERO_OPERANDS_INSTR:
-      if (current >= 0xA4 && current <= 0xA7) {
-        // String move or compare operations.
-        if (group_1_prefix_ == REP_PREFIX) {
-          // REP.
-          AppendToBuffer("rep ");
+  // Decode AVX instructions.
+  if (vex_byte0_ != 0) {
+    processed = true;
+    data += AVXInstruction(data);
+  } else {
+    const InstructionDesc& idesc = instruction_table_->Get(current);
+    byte_size_operand_ = idesc.byte_size_operation;
+    switch (idesc.type) {
+      case ZERO_OPERANDS_INSTR:
+        if (current >= 0xA4 && current <= 0xA7) {
+          // String move or compare operations.
+          if (group_1_prefix_ == REP_PREFIX) {
+            // REP.
+            AppendToBuffer("rep ");
+          }
+          if (rex_w()) AppendToBuffer("REX.W ");
+          AppendToBuffer("%s%c", idesc.mnem, operand_size_code());
+        } else {
+          AppendToBuffer("%s", idesc.mnem, operand_size_code());
         }
-        if (rex_w()) AppendToBuffer("REX.W ");
-        AppendToBuffer("%s%c", idesc.mnem, operand_size_code());
-      } else {
-        AppendToBuffer("%s", idesc.mnem, operand_size_code());
-      }
-      data++;
-      break;
+        data++;
+        break;
 
-    case TWO_OPERANDS_INSTR:
-      data++;
-      data += PrintOperands(idesc.mnem, idesc.op_order_, data);
-      break;
+      case TWO_OPERANDS_INSTR:
+        data++;
+        data += PrintOperands(idesc.mnem, idesc.op_order_, data);
+        break;
 
-    case JUMP_CONDITIONAL_SHORT_INSTR:
-      data += JumpConditionalShort(data);
-      break;
+      case JUMP_CONDITIONAL_SHORT_INSTR:
+        data += JumpConditionalShort(data);
+        break;
 
-    case REGISTER_INSTR:
-      AppendToBuffer("%s%c %s",
-                     idesc.mnem,
-                     operand_size_code(),
-                     NameOfCPURegister(base_reg(current & 0x07)));
-      data++;
-      break;
-    case PUSHPOP_INSTR:
-      AppendToBuffer("%s %s",
-                     idesc.mnem,
-                     NameOfCPURegister(base_reg(current & 0x07)));
-      data++;
-      break;
-    case MOVE_REG_INSTR: {
-      byte* addr = NULL;
-      switch (operand_size()) {
-        case OPERAND_WORD_SIZE:
-          addr = reinterpret_cast<byte*>(*reinterpret_cast<int16_t*>(data + 1));
-          data += 3;
-          break;
-        case OPERAND_DOUBLEWORD_SIZE:
-          addr =
-              reinterpret_cast<byte*>(*reinterpret_cast<uint32_t*>(data + 1));
-          data += 5;
-          break;
-        case OPERAND_QUADWORD_SIZE:
-          addr = reinterpret_cast<byte*>(*reinterpret_cast<int64_t*>(data + 1));
-          data += 9;
-          break;
-        default:
-          UNREACHABLE();
+      case REGISTER_INSTR:
+        AppendToBuffer("%s%c %s", idesc.mnem, operand_size_code(),
+                       NameOfCPURegister(base_reg(current & 0x07)));
+        data++;
+        break;
+      case PUSHPOP_INSTR:
+        AppendToBuffer("%s %s", idesc.mnem,
+                       NameOfCPURegister(base_reg(current & 0x07)));
+        data++;
+        break;
+      case MOVE_REG_INSTR: {
+        byte* addr = NULL;
+        switch (operand_size()) {
+          case OPERAND_WORD_SIZE:
+            addr =
+                reinterpret_cast<byte*>(*reinterpret_cast<int16_t*>(data + 1));
+            data += 3;
+            break;
+          case OPERAND_DOUBLEWORD_SIZE:
+            addr =
+                reinterpret_cast<byte*>(*reinterpret_cast<uint32_t*>(data + 1));
+            data += 5;
+            break;
+          case OPERAND_QUADWORD_SIZE:
+            addr =
+                reinterpret_cast<byte*>(*reinterpret_cast<int64_t*>(data + 1));
+            data += 9;
+            break;
+          default:
+            UNREACHABLE();
+        }
+        AppendToBuffer("mov%c %s,%s", operand_size_code(),
+                       NameOfCPURegister(base_reg(current & 0x07)),
+                       NameOfAddress(addr));
+        break;
       }
-      AppendToBuffer("mov%c %s,%s",
-                     operand_size_code(),
-                     NameOfCPURegister(base_reg(current & 0x07)),
-                     NameOfAddress(addr));
-      break;
-    }
 
-    case CALL_JUMP_INSTR: {
-      byte* addr = data + *reinterpret_cast<int32_t*>(data + 1) + 5;
-      AppendToBuffer("%s %s", idesc.mnem, NameOfAddress(addr));
-      data += 5;
-      break;
-    }
+      case CALL_JUMP_INSTR: {
+        byte* addr = data + *reinterpret_cast<int32_t*>(data + 1) + 5;
+        AppendToBuffer("%s %s", idesc.mnem, NameOfAddress(addr));
+        data += 5;
+        break;
+      }
 
-    case SHORT_IMMEDIATE_INSTR: {
-      byte* addr =
-          reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data + 1));
-      AppendToBuffer("%s rax,%s", idesc.mnem, NameOfAddress(addr));
-      data += 5;
-      break;
-    }
+      case SHORT_IMMEDIATE_INSTR: {
+        byte* addr =
+            reinterpret_cast<byte*>(*reinterpret_cast<int32_t*>(data + 1));
+        AppendToBuffer("%s rax,%s", idesc.mnem, NameOfAddress(addr));
+        data += 5;
+        break;
+      }
 
-    case NO_INSTR:
-      processed = false;
-      break;
+      case NO_INSTR:
+        processed = false;
+        break;
 
-    default:
-      UNIMPLEMENTED();  // This type is not implemented.
+      default:
+        UNIMPLEMENTED();  // This type is not implemented.
+    }
   }
 
   // The first byte didn't match any of the simple opcodes, so we
index 25bfd34..24747ee 100644 (file)
@@ -187,10 +187,10 @@ void FullCodeGenerator::Generate() {
     Comment cmnt(masm_, "[ Allocate context");
     bool need_write_barrier = true;
     // Argument to NewContext is the function, which is still in rdi.
-    if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
+    if (FLAG_harmony_scoping && info->scope()->is_script_scope()) {
       __ Push(rdi);
       __ Push(info->scope()->GetScopeInfo());
-      __ CallRuntime(Runtime::kNewGlobalContext, 2);
+      __ CallRuntime(Runtime::kNewScriptContext, 2);
     } else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
       FastNewContextStub stub(isolate(), heap_slots);
       __ CallStub(&stub);
@@ -895,7 +895,7 @@ void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
   EmitDebugCheckDeclarationContext(variable);
 
   // Load instance object.
-  __ LoadContext(rax, scope_->ContextChainLength(scope_->GlobalScope()));
+  __ LoadContext(rax, scope_->ContextChainLength(scope_->ScriptScope()));
   __ movp(rax, ContextOperand(rax, variable->interface()->Index()));
   __ movp(rax, ContextOperand(rax, Context::EXTENSION_INDEX));
 
@@ -1067,6 +1067,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
 
   // Get the object to enumerate over. If the object is null or undefined, skip
   // over the loop.  See ECMA-262 version 5, section 12.6.4.
+  SetExpressionPosition(stmt->enumerable());
   VisitForAccumulatorValue(stmt->enumerable());
   __ CompareRoot(rax, Heap::kUndefinedValueRootIndex);
   __ j(equal, &exit);
@@ -1170,6 +1171,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
   // Generate code for doing the condition check.
   PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
   __ bind(&loop);
+  SetExpressionPosition(stmt->each());
+
   __ movp(rax, Operand(rsp, 0 * kPointerSize));  // Get the current index.
   __ cmpp(rax, Operand(rsp, 1 * kPointerSize));  // Compare to the array length.
   __ j(above_equal, loop_statement.break_label());
@@ -1239,48 +1242,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
 }
 
 
-void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
-  Comment cmnt(masm_, "[ ForOfStatement");
-  SetStatementPosition(stmt);
-
-  Iteration loop_statement(this, stmt);
-  increment_loop_depth();
-
-  // var iterator = iterable[Symbol.iterator]();
-  VisitForEffect(stmt->assign_iterator());
-
-  // Loop entry.
-  __ bind(loop_statement.continue_label());
-
-  // result = iterator.next()
-  VisitForEffect(stmt->next_result());
-
-  // if (result.done) break;
-  Label result_not_done;
-  VisitForControl(stmt->result_done(),
-                  loop_statement.break_label(),
-                  &result_not_done,
-                  &result_not_done);
-  __ bind(&result_not_done);
-
-  // each = result.value
-  VisitForEffect(stmt->assign_each());
-
-  // Generate code for the body of the loop.
-  Visit(stmt->body());
-
-  // Check stack before looping.
-  PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS);
-  EmitBackEdgeBookkeeping(stmt, loop_statement.continue_label());
-  __ jmp(loop_statement.continue_label());
-
-  // Exit and decrement the loop depth.
-  PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
-  __ bind(loop_statement.break_label());
-  decrement_loop_depth();
-}
-
-
 void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
                                        bool pretenure) {
   // Use the fast case closure allocation code that allocates in new
@@ -1341,6 +1302,19 @@ void FullCodeGenerator::EmitLoadHomeObject(SuperReference* expr) {
 }
 
 
+void FullCodeGenerator::EmitSetHomeObjectIfNeeded(Expression* initializer,
+                                                  int offset) {
+  if (NeedsHomeObject(initializer)) {
+    __ movp(StoreDescriptor::ReceiverRegister(), Operand(rsp, 0));
+    __ Move(StoreDescriptor::NameRegister(),
+            isolate()->factory()->home_object_symbol());
+    __ movp(StoreDescriptor::ValueRegister(),
+            Operand(rsp, offset * kPointerSize));
+    CallStoreIC();
+  }
+}
+
+
 void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
                                                       TypeofState typeof_state,
                                                       Label* slow) {
@@ -1704,6 +1678,14 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
             __ movp(StoreDescriptor::ReceiverRegister(), Operand(rsp, 0));
             CallStoreIC(key->LiteralFeedbackId());
             PrepareForBailoutForId(key->id(), NO_REGISTERS);
+
+            if (NeedsHomeObject(value)) {
+              __ movp(StoreDescriptor::ReceiverRegister(), rax);
+              __ Move(StoreDescriptor::NameRegister(),
+                      isolate()->factory()->home_object_symbol());
+              __ movp(StoreDescriptor::ValueRegister(), Operand(rsp, 0));
+              CallStoreIC();
+            }
           } else {
             VisitForEffect(value);
           }
@@ -1713,6 +1695,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
         VisitForStackValue(key);
         VisitForStackValue(value);
         if (property->emit_store()) {
+          EmitSetHomeObjectIfNeeded(value, 2);
           __ Push(Smi::FromInt(SLOPPY));  // Strict mode
           __ CallRuntime(Runtime::kSetProperty, 4);
         } else {
@@ -1745,7 +1728,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
     __ Push(Operand(rsp, 0));  // Duplicate receiver.
     VisitForStackValue(it->first);
     EmitAccessor(it->second->getter);
+    EmitSetHomeObjectIfNeeded(it->second->getter, 2);
     EmitAccessor(it->second->setter);
+    EmitSetHomeObjectIfNeeded(it->second->setter, 3);
     __ Push(Smi::FromInt(NONE));
     __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
   }
@@ -2172,15 +2157,6 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
   VisitForAccumulatorValue(value);
   __ Pop(rbx);
 
-  // Check generator state.
-  Label wrong_state, closed_state, done;
-  STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting < 0);
-  STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed == 0);
-  __ SmiCompare(FieldOperand(rbx, JSGeneratorObject::kContinuationOffset),
-                Smi::FromInt(0));
-  __ j(equal, &closed_state);
-  __ j(less, &wrong_state);
-
   // Load suspended function and context.
   __ movp(rsi, FieldOperand(rbx, JSGeneratorObject::kContextOffset));
   __ movp(rdi, FieldOperand(rbx, JSGeneratorObject::kFunctionOffset));
@@ -2202,7 +2178,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
 
   // Enter a new JavaScript frame, and initialize its slots as they were when
   // the generator was suspended.
-  Label resume_frame;
+  Label resume_frame, done;
   __ bind(&push_frame);
   __ call(&resume_frame);
   __ jmp(&done);
@@ -2249,25 +2225,6 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
   // Not reached: the runtime call returns elsewhere.
   __ Abort(kGeneratorFailedToResume);
 
-  // Reach here when generator is closed.
-  __ bind(&closed_state);
-  if (resume_mode == JSGeneratorObject::NEXT) {
-    // Return completed iterator result when generator is closed.
-    __ PushRoot(Heap::kUndefinedValueRootIndex);
-    // Pop value from top-of-stack slot; box result into result register.
-    EmitCreateIteratorResult(true);
-  } else {
-    // Throw the provided value.
-    __ Push(rax);
-    __ CallRuntime(Runtime::kThrow, 1);
-  }
-  __ jmp(&done);
-
-  // Throw error if we attempt to operate on a running generator.
-  __ bind(&wrong_state);
-  __ Push(rbx);
-  __ CallRuntime(Runtime::kThrowGeneratorStateError, 1);
-
   __ bind(&done);
   context()->Plug(result_register());
 }
@@ -2447,6 +2404,7 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
     }
     VisitForStackValue(key);
     VisitForStackValue(value);
+    EmitSetHomeObjectIfNeeded(value, 2);
 
     switch (property->kind()) {
       case ObjectLiteral::Property::CONSTANT:
@@ -2638,8 +2596,9 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
       }
       EmitStoreToStackLocalOrContextSlot(var, location);
     }
+  } else if (IsSignallingAssignmentToConst(var, op, strict_mode())) {
+    __ CallRuntime(Runtime::kThrowConstAssignError, 0);
   }
-  // Non-initializing assignments to consts are ignored.
 }
 
 
@@ -5043,7 +5002,7 @@ void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
 
 void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
   Scope* declaration_scope = scope()->DeclarationScope();
-  if (declaration_scope->is_global_scope() ||
+  if (declaration_scope->is_script_scope() ||
       declaration_scope->is_module_scope()) {
     // Contexts nested in the native context have a canonical empty function
     // as their closure, not the anonymous closure containing the global
index 7e482ee..10f2bb8 100644 (file)
@@ -30,9 +30,9 @@ class SafepointGenerator FINAL : public CallWrapper {
         deopt_mode_(mode) { }
   virtual ~SafepointGenerator() {}
 
-  virtual void BeforeCall(int call_size) const OVERRIDE {}
+  void BeforeCall(int call_size) const OVERRIDE {}
 
-  virtual void AfterCall() const OVERRIDE {
+  void AfterCall() const OVERRIDE {
     codegen_->RecordSafepoint(pointers_, deopt_mode_);
   }
 
@@ -2676,10 +2676,10 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
     DeferredInstanceOfKnownGlobal(LCodeGen* codegen,
                                   LInstanceOfKnownGlobal* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() OVERRIDE {
+    void Generate() OVERRIDE {
       codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
     }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    LInstruction* instr() OVERRIDE { return instr_; }
     Label* map_check() { return &map_check_; }
    private:
     LInstanceOfKnownGlobal* instr_;
@@ -2822,6 +2822,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
     __ Ret((ToInteger32(instr->constant_parameter_count()) + 1) * kPointerSize,
            rcx);
   } else {
+    DCHECK(info()->IsStub());  // Functions would need to drop one more value.
     Register reg = ToRegister(instr->parameter_count());
     // The argument count parameter is a smi
     __ SmiToInteger32(reg, reg);
@@ -2851,13 +2852,17 @@ template <class T>
 void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
   DCHECK(FLAG_vector_ics);
   Register vector_register = ToRegister(instr->temp_vector());
+  Register slot_register = VectorLoadICDescriptor::SlotRegister();
   DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister()));
+  DCHECK(slot_register.is(rax));
+
+  AllowDeferredHandleDereference vector_structure_check;
   Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
   __ Move(vector_register, vector);
   // No need to allocate this register.
-  DCHECK(VectorLoadICDescriptor::SlotRegister().is(rax));
-  int index = vector->GetIndex(instr->hydrogen()->slot());
-  __ Move(VectorLoadICDescriptor::SlotRegister(), Smi::FromInt(index));
+  FeedbackVectorICSlot slot = instr->hydrogen()->slot();
+  int index = vector->GetIndex(slot);
+  __ Move(slot_register, Smi::FromInt(index));
 }
 
 
@@ -2975,6 +2980,7 @@ void LCodeGen::DoLoadNamedField(LLoadNamedField* instr) {
 
   Register object = ToRegister(instr->object());
   if (instr->hydrogen()->representation().IsDouble()) {
+    DCHECK(access.IsInobject());
     XMMRegister result = ToDoubleRegister(instr->result());
     __ movsd(result, FieldOperand(object, offset));
     return;
@@ -3274,11 +3280,9 @@ Operand LCodeGen::BuildFastArrayOperand(
     return Operand(elements_pointer_reg,
                    (constant_value << shift_size) + offset);
   } else {
-    // Take the tag bit into account while computing the shift size.
-    if (key_representation.IsSmi() && (shift_size >= 1)) {
-      DCHECK(SmiValuesAre31Bits());
-      shift_size -= kSmiTagSize;
-    }
+    // Guaranteed by ArrayInstructionInterface::KeyedAccessIndexRequirement().
+    DCHECK(key_representation.IsInteger32());
+
     ScaleFactor scale_factor = static_cast<ScaleFactor>(shift_size);
     return Operand(elements_pointer_reg,
                    ToRegister(key),
@@ -3537,43 +3541,67 @@ void LCodeGen::DoTailCallThroughMegamorphicCache(
   Register name = ToRegister(instr->name());
   DCHECK(receiver.is(LoadDescriptor::ReceiverRegister()));
   DCHECK(name.is(LoadDescriptor::NameRegister()));
-
-  Register scratch = rbx;
+  Register scratch = rdi;
   DCHECK(!scratch.is(receiver) && !scratch.is(name));
+  DCHECK(!FLAG_vector_ics ||
+         !AreAliased(ToRegister(instr->slot()), ToRegister(instr->vector()),
+                     scratch));
 
   // Important for the tail-call.
   bool must_teardown_frame = NeedsEagerFrame();
 
-  // The probe will tail call to a handler if found.
-  isolate()->stub_cache()->GenerateProbe(masm(), instr->hydrogen()->flags(),
-                                         must_teardown_frame, receiver, name,
-                                         scratch, no_reg);
+  if (!instr->hydrogen()->is_just_miss()) {
+    // The probe will tail call to a handler if found.
+    DCHECK(!instr->hydrogen()->is_keyed_load());
+    isolate()->stub_cache()->GenerateProbe(
+        masm(), Code::LOAD_IC, instr->hydrogen()->flags(), must_teardown_frame,
+        receiver, name, scratch, no_reg);
+  }
 
   // Tail call to miss if we ended up here.
   if (must_teardown_frame) __ leave();
-  LoadIC::GenerateMiss(masm());
+  if (instr->hydrogen()->is_keyed_load()) {
+    KeyedLoadIC::GenerateMiss(masm());
+  } else {
+    LoadIC::GenerateMiss(masm());
+  }
 }
 
 
 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
   DCHECK(ToRegister(instr->result()).is(rax));
 
-  LPointerMap* pointers = instr->pointer_map();
-  SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+  if (instr->hydrogen()->IsTailCall()) {
+    if (NeedsEagerFrame()) __ leave();
 
-  if (instr->target()->IsConstantOperand()) {
-    LConstantOperand* target = LConstantOperand::cast(instr->target());
-    Handle<Code> code = Handle<Code>::cast(ToHandle(target));
-    generator.BeforeCall(__ CallSize(code));
-    __ call(code, RelocInfo::CODE_TARGET);
+    if (instr->target()->IsConstantOperand()) {
+      LConstantOperand* target = LConstantOperand::cast(instr->target());
+      Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+      __ jmp(code, RelocInfo::CODE_TARGET);
+    } else {
+      DCHECK(instr->target()->IsRegister());
+      Register target = ToRegister(instr->target());
+      __ addp(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
+      __ jmp(target);
+    }
   } else {
-    DCHECK(instr->target()->IsRegister());
-    Register target = ToRegister(instr->target());
-    generator.BeforeCall(__ CallSize(target));
-    __ addp(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
-    __ call(target);
+    LPointerMap* pointers = instr->pointer_map();
+    SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+
+    if (instr->target()->IsConstantOperand()) {
+      LConstantOperand* target = LConstantOperand::cast(instr->target());
+      Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+      generator.BeforeCall(__ CallSize(code));
+      __ call(code, RelocInfo::CODE_TARGET);
+    } else {
+      DCHECK(instr->target()->IsRegister());
+      Register target = ToRegister(instr->target());
+      generator.BeforeCall(__ CallSize(target));
+      __ addp(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
+      __ call(target);
+    }
+    generator.AfterCall();
   }
-  generator.AfterCall();
 }
 
 
@@ -3682,10 +3710,11 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) {
    public:
     DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() OVERRIDE {
+    void Generate() OVERRIDE {
       codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
     }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
    private:
     LMathAbs* instr_;
   };
@@ -4121,7 +4150,7 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
   DCHECK(!representation.IsSmi() ||
          !instr->value()->IsConstantOperand() ||
          IsInteger32Constant(LConstantOperand::cast(instr->value())));
-  if (representation.IsDouble()) {
+  if (!FLAG_unbox_double_fields && representation.IsDouble()) {
     DCHECK(access.IsInobject());
     DCHECK(!hinstr->has_transition());
     DCHECK(!hinstr->NeedsWriteBarrier());
@@ -4171,7 +4200,12 @@ void LCodeGen::DoStoreNamedField(LStoreNamedField* instr) {
 
   Operand operand = FieldOperand(write_register, offset);
 
-  if (instr->value()->IsRegister()) {
+  if (FLAG_unbox_double_fields && representation.IsDouble()) {
+    DCHECK(access.IsInobject());
+    XMMRegister value = ToDoubleRegister(instr->value());
+    __ movsd(operand, value);
+
+  } else if (instr->value()->IsRegister()) {
     Register value = ToRegister(instr->value());
     __ Store(operand, value, representation);
   } else {
@@ -4545,10 +4579,9 @@ void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
    public:
     DeferredStringCharCodeAt(LCodeGen* codegen, LStringCharCodeAt* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() OVERRIDE {
-      codegen()->DoDeferredStringCharCodeAt(instr_);
-    }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    void Generate() OVERRIDE { codegen()->DoDeferredStringCharCodeAt(instr_); }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
    private:
     LStringCharCodeAt* instr_;
   };
@@ -4600,10 +4633,11 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
    public:
     DeferredStringCharFromCode(LCodeGen* codegen, LStringCharFromCode* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() OVERRIDE {
+    void Generate() OVERRIDE {
       codegen()->DoDeferredStringCharFromCode(instr_);
     }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
    private:
     LStringCharFromCode* instr_;
   };
@@ -4672,11 +4706,12 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
    public:
     DeferredNumberTagI(LCodeGen* codegen, LNumberTagI* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() OVERRIDE {
+    void Generate() OVERRIDE {
       codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
                                        instr_->temp2(), SIGNED_INT32);
     }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
    private:
     LNumberTagI* instr_;
   };
@@ -4702,11 +4737,12 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
    public:
     DeferredNumberTagU(LCodeGen* codegen, LNumberTagU* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() OVERRIDE {
+    void Generate() OVERRIDE {
       codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp1(),
                                        instr_->temp2(), UNSIGNED_INT32);
     }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
    private:
     LNumberTagU* instr_;
   };
@@ -4789,10 +4825,9 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
    public:
     DeferredNumberTagD(LCodeGen* codegen, LNumberTagD* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() OVERRIDE {
-      codegen()->DoDeferredNumberTagD(instr_);
-    }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    void Generate() OVERRIDE { codegen()->DoDeferredNumberTagD(instr_); }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
    private:
     LNumberTagD* instr_;
   };
@@ -4985,10 +5020,9 @@ void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
    public:
     DeferredTaggedToI(LCodeGen* codegen, LTaggedToI* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() OVERRIDE {
-      codegen()->DoDeferredTaggedToI(instr_, done());
-    }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    void Generate() OVERRIDE { codegen()->DoDeferredTaggedToI(instr_, done()); }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
    private:
     LTaggedToI* instr_;
   };
@@ -5175,11 +5209,12 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
         : LDeferredCode(codegen), instr_(instr), object_(object) {
       SetExit(check_maps());
     }
-    virtual void Generate() OVERRIDE {
+    void Generate() OVERRIDE {
       codegen()->DoDeferredInstanceMigration(instr_, object_);
     }
     Label* check_maps() { return &check_maps_; }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
    private:
     LCheckMaps* instr_;
     Label check_maps_;
@@ -5304,10 +5339,9 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
    public:
     DeferredAllocate(LCodeGen* codegen, LAllocate* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() OVERRIDE {
-      codegen()->DoDeferredAllocate(instr_);
-    }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    void Generate() OVERRIDE { codegen()->DoDeferredAllocate(instr_); }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
    private:
     LAllocate* instr_;
   };
@@ -5677,10 +5711,9 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
    public:
     DeferredStackCheck(LCodeGen* codegen, LStackCheck* instr)
         : LDeferredCode(codegen), instr_(instr) { }
-    virtual void Generate() OVERRIDE {
-      codegen()->DoDeferredStackCheck(instr_);
-    }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    void Generate() OVERRIDE { codegen()->DoDeferredStackCheck(instr_); }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
    private:
     LStackCheck* instr_;
   };
@@ -5824,10 +5857,11 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
           object_(object),
           index_(index) {
     }
-    virtual void Generate() OVERRIDE {
+    void Generate() OVERRIDE {
       codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_);
     }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
    private:
     LLoadFieldByIndex* instr_;
     Register object_;
index 541d37a..c1831af 100644 (file)
@@ -1118,9 +1118,17 @@ LInstruction* LChunkBuilder::DoTailCallThroughMegamorphicCache(
       UseFixed(instr->receiver(), LoadDescriptor::ReceiverRegister());
   LOperand* name_register =
       UseFixed(instr->name(), LoadDescriptor::NameRegister());
+  LOperand* slot = NULL;
+  LOperand* vector = NULL;
+  if (FLAG_vector_ics) {
+    slot = UseFixed(instr->slot(), VectorLoadICDescriptor::SlotRegister());
+    vector =
+        UseFixed(instr->vector(), VectorLoadICDescriptor::VectorRegister());
+  }
+
   // Not marked as call. It can't deoptimize, and it never returns.
   return new (zone()) LTailCallThroughMegamorphicCache(
-      context, receiver_register, name_register);
+      context, receiver_register, name_register, slot, vector);
 }
 
 
@@ -2067,7 +2075,7 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
   LOperand* global_object =
       UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
   LOperand* vector = NULL;
-  if (FLAG_vector_ics) {
+  if (instr->HasVectorAndSlot()) {
     vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
   }
 
@@ -2140,7 +2148,7 @@ LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
   LOperand* object =
       UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
   LOperand* vector = NULL;
-  if (FLAG_vector_ics) {
+  if (instr->HasVectorAndSlot()) {
     vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
   }
   LLoadNamedGeneric* result = new(zone()) LLoadNamedGeneric(
@@ -2233,7 +2241,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
       UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
   LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
   LOperand* vector = NULL;
-  if (FLAG_vector_ics) {
+  if (instr->HasVectorAndSlot()) {
     vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
   }
 
index 30b994e..8b7a5bc 100644 (file)
@@ -163,17 +163,13 @@ class LCodeGen;
   V(WrapReceiver)
 
 
-#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)                        \
-  virtual Opcode opcode() const FINAL OVERRIDE {                      \
-    return LInstruction::k##type;                                           \
-  }                                                                         \
-  virtual void CompileToNative(LCodeGen* generator) FINAL OVERRIDE;   \
-  virtual const char* Mnemonic() const FINAL OVERRIDE {               \
-    return mnemonic;                                                        \
-  }                                                                         \
-  static L##type* cast(LInstruction* instr) {                               \
-    DCHECK(instr->Is##type());                                              \
-    return reinterpret_cast<L##type*>(instr);                               \
+#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)            \
+  Opcode opcode() const FINAL { return LInstruction::k##type; } \
+  void CompileToNative(LCodeGen* generator) FINAL;              \
+  const char* Mnemonic() const FINAL { return mnemonic; }       \
+  static L##type* cast(LInstruction* instr) {                   \
+    DCHECK(instr->Is##type());                                  \
+    return reinterpret_cast<L##type*>(instr);                   \
   }
 
 
@@ -292,14 +288,11 @@ class LTemplateResultInstruction : public LInstruction {
  public:
   // Allow 0 or 1 output operands.
   STATIC_ASSERT(R == 0 || R == 1);
-  virtual bool HasResult() const FINAL OVERRIDE {
-    return R != 0 && result() != NULL;
-  }
+  bool HasResult() const FINAL { return R != 0 && result() != NULL; }
   void set_result(LOperand* operand) { results_[0] = operand; }
-  LOperand* result() const { return results_[0]; }
+  LOperand* result() const OVERRIDE { return results_[0]; }
 
-  virtual bool MustSignExtendResult(
-      LPlatformChunk* chunk) const FINAL OVERRIDE;
+  bool MustSignExtendResult(LPlatformChunk* chunk) const FINAL;
 
  protected:
   EmbeddedContainer<LOperand*, R> results_;
@@ -317,11 +310,11 @@ class LTemplateInstruction : public LTemplateResultInstruction<R> {
 
  private:
   // Iterator support.
-  virtual int InputCount() FINAL OVERRIDE { return I; }
-  virtual LOperand* InputAt(int i) FINAL OVERRIDE { return inputs_[i]; }
+  int InputCount() FINAL { return I; }
+  LOperand* InputAt(int i) FINAL { return inputs_[i]; }
 
-  virtual int TempCount() FINAL OVERRIDE { return T; }
-  virtual LOperand* TempAt(int i) FINAL OVERRIDE { return temps_[i]; }
+  int TempCount() FINAL { return T; }
+  LOperand* TempAt(int i) FINAL { return temps_[i]; }
 };
 
 
@@ -336,8 +329,8 @@ class LGap : public LTemplateInstruction<0, 0, 0> {
   }
 
   // Can't use the DECLARE-macro here because of sub-classes.
-  virtual bool IsGap() const FINAL OVERRIDE { return true; }
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  bool IsGap() const FINAL { return true; }
+  void PrintDataTo(StringStream* stream) OVERRIDE;
   static LGap* cast(LInstruction* instr) {
     DCHECK(instr->IsGap());
     return reinterpret_cast<LGap*>(instr);
@@ -378,7 +371,7 @@ class LInstructionGap FINAL : public LGap {
  public:
   explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
 
-  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
+  bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
     return !IsRedundant();
   }
 
@@ -390,10 +383,10 @@ class LGoto FINAL : public LTemplateInstruction<0, 0, 0> {
  public:
   explicit LGoto(HBasicBlock* block) : block_(block) { }
 
-  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE;
+  bool HasInterestingComment(LCodeGen* gen) const OVERRIDE;
   DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
-  virtual bool IsControl() const OVERRIDE { return true; }
+  void PrintDataTo(StringStream* stream) OVERRIDE;
+  bool IsControl() const OVERRIDE { return true; }
 
   int block_id() const { return block_->block_id(); }
 
@@ -436,7 +429,7 @@ class LDummyUse FINAL : public LTemplateInstruction<1, 1, 0> {
 
 class LDeoptimize FINAL : public LTemplateInstruction<0, 0, 0> {
  public:
-  virtual bool IsControl() const OVERRIDE { return true; }
+  bool IsControl() const OVERRIDE { return true; }
   DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
   DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
 };
@@ -447,12 +440,10 @@ class LLabel FINAL : public LGap {
   explicit LLabel(HBasicBlock* block)
       : LGap(block), replacement_(NULL) { }
 
-  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
-    return false;
-  }
+  bool HasInterestingComment(LCodeGen* gen) const OVERRIDE { return false; }
   DECLARE_CONCRETE_INSTRUCTION(Label, "label")
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int block_id() const { return block()->block_id(); }
   bool is_loop_header() const { return block()->IsLoopHeader(); }
@@ -470,9 +461,7 @@ class LLabel FINAL : public LGap {
 
 class LParameter FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
-  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
-    return false;
-  }
+  bool HasInterestingComment(LCodeGen* gen) const OVERRIDE { return false; }
   DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
 };
 
@@ -491,19 +480,23 @@ class LCallStub FINAL : public LTemplateInstruction<1, 1, 0> {
 
 
 class LTailCallThroughMegamorphicCache FINAL
-    : public LTemplateInstruction<0, 3, 0> {
+    : public LTemplateInstruction<0, 5, 0> {
  public:
   explicit LTailCallThroughMegamorphicCache(LOperand* context,
-                                            LOperand* receiver,
-                                            LOperand* name) {
+                                            LOperand* receiver, LOperand* name,
+                                            LOperand* slot, LOperand* vector) {
     inputs_[0] = context;
     inputs_[1] = receiver;
     inputs_[2] = name;
+    inputs_[3] = slot;
+    inputs_[4] = vector;
   }
 
   LOperand* context() { return inputs_[0]; }
   LOperand* receiver() { return inputs_[1]; }
   LOperand* name() { return inputs_[2]; }
+  LOperand* slot() { return inputs_[3]; }
+  LOperand* vector() { return inputs_[4]; }
 
   DECLARE_CONCRETE_INSTRUCTION(TailCallThroughMegamorphicCache,
                                "tail-call-through-megamorphic-cache")
@@ -513,9 +506,7 @@ class LTailCallThroughMegamorphicCache FINAL
 
 class LUnknownOSRValue FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
-  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
-    return false;
-  }
+  bool HasInterestingComment(LCodeGen* gen) const OVERRIDE { return false; }
   DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
 };
 
@@ -525,7 +516,7 @@ class LControlInstruction : public LTemplateInstruction<0, I, T> {
  public:
   LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
 
-  virtual bool IsControl() const FINAL OVERRIDE { return true; }
+  bool IsControl() const FINAL { return true; }
 
   int SuccessorCount() { return hydrogen()->SuccessorCount(); }
   HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
@@ -614,7 +605,7 @@ class LAccessArgumentsAt FINAL : public LTemplateInstruction<1, 3, 0> {
 
   DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -855,7 +846,7 @@ class LCompareNumericAndBranch FINAL : public LControlInstruction<2, 0> {
     return hydrogen()->representation().IsDouble();
   }
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1030,7 +1021,7 @@ class LIsObjectAndBranch FINAL : public LControlInstruction<1, 0> {
   DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(IsObjectAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1047,7 +1038,7 @@ class LIsStringAndBranch FINAL : public LControlInstruction<1, 1> {
   DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1062,7 +1053,7 @@ class LIsSmiAndBranch FINAL : public LControlInstruction<1, 0> {
   DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1080,7 +1071,7 @@ class LIsUndetectableAndBranch FINAL : public LControlInstruction<1, 1> {
                                "is-undetectable-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1102,7 +1093,7 @@ class LStringCompareAndBranch FINAL : public LControlInstruction<3, 0> {
                                "string-compare-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   Token::Value op() const { return hydrogen()->token(); }
 };
@@ -1120,7 +1111,7 @@ class LHasInstanceTypeAndBranch FINAL : public LControlInstruction<1, 0> {
                                "has-instance-type-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1150,7 +1141,7 @@ class LHasCachedArrayIndexAndBranch FINAL
                                "has-cached-array-index-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndexAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1170,7 +1161,7 @@ class LClassOfTestAndBranch FINAL : public LControlInstruction<1, 2> {
                                "class-of-test-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1376,7 +1367,7 @@ class LBranch FINAL : public LControlInstruction<1, 0> {
   DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
   DECLARE_HYDROGEN_ACCESSOR(Branch)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1528,11 +1519,9 @@ class LArithmeticD FINAL : public LTemplateInstruction<1, 2, 0> {
   LOperand* left() { return inputs_[0]; }
   LOperand* right() { return inputs_[1]; }
 
-  virtual Opcode opcode() const OVERRIDE {
-    return LInstruction::kArithmeticD;
-  }
-  virtual void CompileToNative(LCodeGen* generator) OVERRIDE;
-  virtual const char* Mnemonic() const OVERRIDE;
+  Opcode opcode() const OVERRIDE { return LInstruction::kArithmeticD; }
+  void CompileToNative(LCodeGen* generator) OVERRIDE;
+  const char* Mnemonic() const OVERRIDE;
 
  private:
   Token::Value op_;
@@ -1556,11 +1545,9 @@ class LArithmeticT FINAL : public LTemplateInstruction<1, 3, 0> {
   LOperand* left() { return inputs_[1]; }
   LOperand* right() { return inputs_[2]; }
 
-  virtual Opcode opcode() const OVERRIDE {
-    return LInstruction::kArithmeticT;
-  }
-  virtual void CompileToNative(LCodeGen* generator) OVERRIDE;
-  virtual const char* Mnemonic() const OVERRIDE;
+  Opcode opcode() const OVERRIDE { return LInstruction::kArithmeticT; }
+  void CompileToNative(LCodeGen* generator) OVERRIDE;
+  const char* Mnemonic() const OVERRIDE;
 
  private:
   Token::Value op_;
@@ -1686,7 +1673,7 @@ class LLoadKeyed FINAL : public LTemplateInstruction<1, 2, 0> {
   }
   LOperand* elements() { return inputs_[0]; }
   LOperand* key() { return inputs_[1]; }
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
   uint32_t base_offset() const { return hydrogen()->base_offset(); }
   ElementsKind elements_kind() const {
     return hydrogen()->elements_kind();
@@ -1770,7 +1757,7 @@ class LLoadContextSlot FINAL : public LTemplateInstruction<1, 1, 0> {
 
   int slot_index() { return hydrogen()->slot_index(); }
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1791,7 +1778,7 @@ class LStoreContextSlot FINAL : public LTemplateInstruction<0, 2, 1> {
 
   int slot_index() { return hydrogen()->slot_index(); }
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1830,7 +1817,7 @@ class LStoreCodeEntry FINAL: public LTemplateInstruction<0, 2, 0> {
   LOperand* function() { return inputs_[0]; }
   LOperand* code_object() { return inputs_[1]; }
 
-  virtual void PrintDataTo(StringStream* stream);
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry")
   DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry)
@@ -1847,7 +1834,7 @@ class LInnerAllocatedObject FINAL: public LTemplateInstruction<1, 2, 0> {
   LOperand* base_object() const { return inputs_[0]; }
   LOperand* offset() const { return inputs_[1]; }
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object")
 };
@@ -1891,7 +1878,7 @@ class LCallJSFunction FINAL : public LTemplateInstruction<1, 1, 0> {
   DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
   DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int arity() const { return hydrogen()->argument_count() - 1; }
 };
@@ -1913,18 +1900,18 @@ class LCallWithDescriptor FINAL : public LTemplateResultInstruction<1> {
  private:
   DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int arity() const { return hydrogen()->argument_count() - 1; }
 
   ZoneList<LOperand*> inputs_;
 
   // Iterator support.
-  virtual int InputCount() FINAL OVERRIDE { return inputs_.length(); }
-  virtual LOperand* InputAt(int i) FINAL OVERRIDE { return inputs_[i]; }
+  int InputCount() FINAL { return inputs_.length(); }
+  LOperand* InputAt(int i) FINAL { return inputs_[i]; }
 
-  virtual int TempCount() FINAL OVERRIDE { return 0; }
-  virtual LOperand* TempAt(int i) FINAL OVERRIDE { return NULL; }
+  int TempCount() FINAL { return 0; }
+  LOperand* TempAt(int i) FINAL { return NULL; }
 };
 
 
@@ -1941,7 +1928,7 @@ class LInvokeFunction FINAL : public LTemplateInstruction<1, 2, 0> {
   DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
   DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int arity() const { return hydrogen()->argument_count() - 1; }
 };
@@ -1976,7 +1963,7 @@ class LCallNew FINAL : public LTemplateInstruction<1, 2, 0> {
   DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
   DECLARE_HYDROGEN_ACCESSOR(CallNew)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int arity() const { return hydrogen()->argument_count() - 1; }
 };
@@ -1995,7 +1982,7 @@ class LCallNewArray FINAL : public LTemplateInstruction<1, 2, 0> {
   DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
   DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int arity() const { return hydrogen()->argument_count() - 1; }
 };
@@ -2012,7 +1999,7 @@ class LCallRuntime FINAL : public LTemplateInstruction<1, 1, 0> {
   DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
   DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
 
-  virtual bool ClobbersDoubleRegisters(Isolate* isolate) const OVERRIDE {
+  bool ClobbersDoubleRegisters(Isolate* isolate) const OVERRIDE {
     return save_doubles() == kDontSaveFPRegs;
   }
 
@@ -2198,7 +2185,7 @@ class LStoreNamedField FINAL : public LTemplateInstruction<0, 2, 1> {
   DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
   DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   Representation representation() const {
     return hydrogen()->field_representation();
@@ -2221,7 +2208,7 @@ class LStoreNamedGeneric FINAL : public LTemplateInstruction<0, 3, 0> {
   DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
   DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   Handle<Object> name() const { return hydrogen()->name(); }
   StrictMode strict_mode() { return hydrogen()->strict_mode(); }
@@ -2251,7 +2238,7 @@ class LStoreKeyed FINAL : public LTemplateInstruction<0, 3, 0> {
   DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
   DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
   bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
   uint32_t base_offset() const { return hydrogen()->base_offset(); }
 };
@@ -2277,7 +2264,7 @@ class LStoreKeyedGeneric FINAL : public LTemplateInstruction<0, 4, 0> {
   DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
   DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   StrictMode strict_mode() { return hydrogen()->strict_mode(); }
 };
@@ -2304,7 +2291,7 @@ class LTransitionElementsKind FINAL : public LTemplateInstruction<0, 2, 2> {
                                "transition-elements-kind")
   DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
   Handle<Map> transitioned_map() {
@@ -2593,7 +2580,7 @@ class LTypeofIsAndBranch FINAL : public LControlInstruction<1, 0> {
 
   Handle<String> type_literal() { return hydrogen()->type_literal(); }
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -2615,9 +2602,7 @@ class LOsrEntry FINAL : public LTemplateInstruction<0, 0, 0> {
  public:
   LOsrEntry() {}
 
-  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
-    return false;
-  }
+  bool HasInterestingComment(LCodeGen* gen) const OVERRIDE { return false; }
   DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
 };
 
@@ -2827,7 +2812,7 @@ class LChunkBuilder FINAL : public LChunkBuilderBase {
 
   // An input operand in register, stack slot or a constant operand.
   // Will not be moved to a register even if one is freely available.
-  virtual MUST_USE_RESULT LOperand* UseAny(HValue* value) OVERRIDE;
+  MUST_USE_RESULT LOperand* UseAny(HValue* value) OVERRIDE;
 
   // Temporary operand that must be in a register.
   MUST_USE_RESULT LUnallocated* TempRegister();
index 21b0f9b..5b897de 100644 (file)
@@ -2852,6 +2852,21 @@ void MacroAssembler::LoadGlobalCell(Register dst, Handle<Cell> cell) {
 }
 
 
+void MacroAssembler::CmpWeakValue(Register value, Handle<WeakCell> cell,
+                                  Register scratch) {
+  Move(scratch, cell, RelocInfo::EMBEDDED_OBJECT);
+  cmpp(value, FieldOperand(scratch, WeakCell::kValueOffset));
+}
+
+
+void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
+                                   Label* miss) {
+  Move(value, cell, RelocInfo::EMBEDDED_OBJECT);
+  movp(value, FieldOperand(value, WeakCell::kValueOffset));
+  JumpIfSmi(value, miss);
+}
+
+
 void MacroAssembler::Drop(int stack_elements) {
   if (stack_elements > 0) {
     addp(rsp, Immediate(stack_elements * kPointerSize));
@@ -3615,18 +3630,17 @@ void MacroAssembler::EnumLength(Register dst, Register map) {
 }
 
 
-void MacroAssembler::DispatchMap(Register obj,
-                                 Register unused,
-                                 Handle<Map> map,
-                                 Handle<Code> success,
-                                 SmiCheckType smi_check_type) {
+void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
+                                     Register scratch2, Handle<WeakCell> cell,
+                                     Handle<Code> success,
+                                     SmiCheckType smi_check_type) {
   Label fail;
   if (smi_check_type == DO_SMI_CHECK) {
     JumpIfSmi(obj, &fail);
   }
-  Cmp(FieldOperand(obj, HeapObject::kMapOffset), map);
+  movq(scratch1, FieldOperand(obj, HeapObject::kMapOffset));
+  CmpWeakValue(scratch1, cell, scratch2);
   j(equal, success, RelocInfo::CODE_TARGET);
-
   bind(&fail);
 }
 
@@ -4367,10 +4381,10 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
   }
 
   bind(&done);
-  // Check that the value is a normal propety.
+  // Check that the value is a field property.
   const int kDetailsOffset =
       SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
-  DCHECK_EQ(NORMAL, 0);
+  DCHECK_EQ(FIELD, 0);
   Test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
        Smi::FromInt(PropertyDetails::TypeField::kMask));
   j(not_zero, miss);
@@ -5113,18 +5127,6 @@ void MacroAssembler::CheckPageFlag(
 }
 
 
-void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
-                                        Register scratch,
-                                        Label* if_deprecated) {
-  if (map->CanBeDeprecated()) {
-    Move(scratch, map);
-    movl(scratch, FieldOperand(scratch, Map::kBitField3Offset));
-    andl(scratch, Immediate(Map::Deprecated::kMask));
-    j(not_zero, if_deprecated);
-  }
-}
-
-
 void MacroAssembler::JumpIfBlack(Register object,
                                  Register bitmap_scratch,
                                  Register mask_scratch,
index 24eea38..4656665 100644 (file)
@@ -175,10 +175,6 @@ class MacroAssembler: public Assembler {
                      Label* condition_met,
                      Label::Distance condition_met_distance = Label::kFar);
 
-  void CheckMapDeprecated(Handle<Map> map,
-                          Register scratch,
-                          Label* if_deprecated);
-
   // Check if object is in new space.  Jumps if the object is not in new space.
   // The register scratch can be object itself, but scratch will be clobbered.
   void JumpIfNotInNewSpace(Register object,
@@ -847,6 +843,13 @@ class MacroAssembler: public Assembler {
   // Load a global cell into a register.
   void LoadGlobalCell(Register dst, Handle<Cell> cell);
 
+  // Compare the given value and the value of weak cell.
+  void CmpWeakValue(Register value, Handle<WeakCell> cell, Register scratch);
+
+  // Load the value of the weak cell in the value register. Branch to the given
+  // miss label if the weak cell was cleared.
+  void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
+
   // Emit code to discard a non-negative number of pointer-sized elements
   // from the stack, clobbering only the rsp register.
   void Drop(int stack_elements);
@@ -989,14 +992,12 @@ class MacroAssembler: public Assembler {
                 Label* fail,
                 SmiCheckType smi_check_type);
 
-  // Check if the map of an object is equal to a specified map and branch to a
-  // specified target if equal. Skip the smi check if not required (object is
-  // known to be a heap object)
-  void DispatchMap(Register obj,
-                   Register unused,
-                   Handle<Map> map,
-                   Handle<Code> success,
-                   SmiCheckType smi_check_type);
+  // Check if the map of an object is equal to a specified weak map and branch
+  // to a specified target if equal. Skip the smi check if not required
+  // (object is known to be a heap object)
+  void DispatchWeakMap(Register obj, Register scratch1, Register scratch2,
+                       Handle<WeakCell> cell, Handle<Code> success,
+                       SmiCheckType smi_check_type);
 
   // Check if the object in register heap_object is a string. Afterwards the
   // register map contains the object map and the register instance_type
index 861ec7e..51bb3a7 100644 (file)
@@ -160,18 +160,17 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
       if (!is_api_function) {
         Label allocate;
         // The code below relies on these assumptions.
-        STATIC_ASSERT(JSFunction::kNoSlackTracking == 0);
-        STATIC_ASSERT(Map::ConstructionCount::kShift +
-                      Map::ConstructionCount::kSize == 32);
+        STATIC_ASSERT(Map::Counter::kShift + Map::Counter::kSize == 32);
         // Check if slack tracking is enabled.
         __ mov(esi, FieldOperand(eax, Map::kBitField3Offset));
-        __ shr(esi, Map::ConstructionCount::kShift);
-        __ j(zero, &allocate);  // JSFunction::kNoSlackTracking
+        __ shr(esi, Map::Counter::kShift);
+        __ cmp(esi, Map::kSlackTrackingCounterEnd);
+        __ j(less, &allocate);
         // Decrease generous allocation count.
         __ sub(FieldOperand(eax, Map::kBitField3Offset),
-               Immediate(1 << Map::ConstructionCount::kShift));
+               Immediate(1 << Map::Counter::kShift));
 
-        __ cmp(esi, JSFunction::kFinishSlackTracking);
+        __ cmp(esi, Map::kSlackTrackingCounterEnd);
         __ j(not_equal, &allocate);
 
         __ push(eax);
@@ -182,7 +181,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
 
         __ pop(edi);
         __ pop(eax);
-        __ xor_(esi, esi);  // JSFunction::kNoSlackTracking
+        __ mov(esi, Map::kSlackTrackingCounterEnd - 1);
 
         __ bind(&allocate);
       }
@@ -219,8 +218,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
         Label no_inobject_slack_tracking;
 
         // Check if slack tracking is enabled.
-        __ cmp(esi, JSFunction::kNoSlackTracking);
-        __ j(equal, &no_inobject_slack_tracking);
+        __ cmp(esi, Map::kSlackTrackingCounterEnd);
+        __ j(less, &no_inobject_slack_tracking);
 
         // Allocate object with a slack.
         __ movzx_b(esi,
index 202dec6..1264dd9 100644 (file)
@@ -333,8 +333,19 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) {
   Label miss;
   Register receiver = LoadDescriptor::ReceiverRegister();
 
-  NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, eax,
-                                                          ebx, &miss);
+  if (FLAG_vector_ics) {
+    // With careful management, we won't have to save slot and vector on
+    // the stack. Simply handle the possibly missing case first.
+    // TODO(mvstanton): this code can be more efficient.
+    __ cmp(FieldOperand(receiver, JSFunction::kPrototypeOrInitialMapOffset),
+           Immediate(isolate()->factory()->the_hole_value()));
+    __ j(equal, &miss);
+    __ TryGetFunctionPrototype(receiver, eax, ebx, &miss);
+    __ ret(0);
+  } else {
+    NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, eax,
+                                                            ebx, &miss);
+  }
   __ bind(&miss);
   PropertyAccessCompiler::TailCallBuiltin(
       masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC));
@@ -377,10 +388,17 @@ void LoadIndexedStringStub::Generate(MacroAssembler* masm) {
 
   Register receiver = LoadDescriptor::ReceiverRegister();
   Register index = LoadDescriptor::NameRegister();
-  Register scratch = ebx;
+  Register scratch = edi;
   DCHECK(!scratch.is(receiver) && !scratch.is(index));
   Register result = eax;
   DCHECK(!result.is(scratch));
+  DCHECK(!FLAG_vector_ics ||
+         (!scratch.is(VectorLoadICDescriptor::VectorRegister()) &&
+          result.is(VectorLoadICDescriptor::SlotRegister())));
+
+  // StringCharAtGenerator doesn't use the result register until it's passed
+  // the different miss possibilities. If it did, we would have a conflict
+  // when FLAG_vector_ics is true.
 
   StringCharAtGenerator char_at_generator(receiver, index, scratch, result,
                                           &miss,  // When not a string.
@@ -1900,6 +1918,10 @@ void CallICStub::Generate(MacroAssembler* masm) {
   // edi - function
   // edx - slot id
   Isolate* isolate = masm->isolate();
+  const int with_types_offset =
+      FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
+  const int generic_offset =
+      FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
   Label extra_checks_or_miss, slow_start;
   Label slow, non_function, wrap, cont;
   Label have_js_function;
@@ -1939,35 +1961,66 @@ void CallICStub::Generate(MacroAssembler* masm) {
   }
 
   __ bind(&extra_checks_or_miss);
-  Label miss;
+  Label uninitialized, miss;
 
   __ mov(ecx, FieldOperand(ebx, edx, times_half_pointer_size,
                            FixedArray::kHeaderSize));
   __ cmp(ecx, Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate)));
   __ j(equal, &slow_start);
+
+  // The following cases attempt to handle MISS cases without going to the
+  // runtime.
+  if (FLAG_trace_ic) {
+    __ jmp(&miss);
+  }
+
   __ cmp(ecx, Immediate(TypeFeedbackVector::UninitializedSentinel(isolate)));
+  __ j(equal, &uninitialized);
+
+  // We are going megamorphic. If the feedback is a JSFunction, it is fine
+  // to handle it here. More complex cases are dealt with in the runtime.
+  __ AssertNotSmi(ecx);
+  __ CmpObjectType(ecx, JS_FUNCTION_TYPE, ecx);
+  __ j(not_equal, &miss);
+  __ mov(
+      FieldOperand(ebx, edx, times_half_pointer_size, FixedArray::kHeaderSize),
+      Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate)));
+  // We have to update statistics for runtime profiling.
+  __ sub(FieldOperand(ebx, with_types_offset), Immediate(Smi::FromInt(1)));
+  __ add(FieldOperand(ebx, generic_offset), Immediate(Smi::FromInt(1)));
+  __ jmp(&slow_start);
+
+  __ bind(&uninitialized);
+
+  // We are going monomorphic, provided we actually have a JSFunction.
+  __ JumpIfSmi(edi, &miss);
+
+  // Goto miss case if we do not have a function.
+  __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
+  __ j(not_equal, &miss);
+
+  // Make sure the function is not the Array() function, which requires special
+  // behavior on MISS.
+  __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, ecx);
+  __ cmp(edi, ecx);
   __ j(equal, &miss);
 
-  if (!FLAG_trace_ic) {
-    // We are going megamorphic. If the feedback is a JSFunction, it is fine
-    // to handle it here. More complex cases are dealt with in the runtime.
-    __ AssertNotSmi(ecx);
-    __ CmpObjectType(ecx, JS_FUNCTION_TYPE, ecx);
-    __ j(not_equal, &miss);
-    __ mov(FieldOperand(ebx, edx, times_half_pointer_size,
-                        FixedArray::kHeaderSize),
-           Immediate(TypeFeedbackVector::MegamorphicSentinel(isolate)));
-    // We have to update statistics for runtime profiling.
-    const int with_types_offset =
-        FixedArray::OffsetOfElementAt(TypeFeedbackVector::kWithTypesIndex);
-    __ sub(FieldOperand(ebx, with_types_offset), Immediate(Smi::FromInt(1)));
-    const int generic_offset =
-        FixedArray::OffsetOfElementAt(TypeFeedbackVector::kGenericCountIndex);
-    __ add(FieldOperand(ebx, generic_offset), Immediate(Smi::FromInt(1)));
-    __ jmp(&slow_start);
-  }
+  // Update stats.
+  __ add(FieldOperand(ebx, with_types_offset), Immediate(Smi::FromInt(1)));
+
+  // Store the function.
+  __ mov(
+      FieldOperand(ebx, edx, times_half_pointer_size, FixedArray::kHeaderSize),
+      edi);
+
+  // Update the write barrier.
+  __ mov(eax, edi);
+  __ RecordWriteArray(ebx, eax, edx, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+                      OMIT_SMI_CHECK);
+  __ jmp(&have_js_function);
 
-  // We are here because tracing is on or we are going monomorphic.
+  // We are here because tracing is on or we encountered a MISS case we can't
+  // handle here.
   __ bind(&miss);
   GenerateMiss(masm);
 
@@ -2473,18 +2526,18 @@ void InstanceofStub::Generate(MacroAssembler* masm) {
 
 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
   // If the receiver is a smi trigger the non-string case.
-  STATIC_ASSERT(kSmiTag == 0);
-  __ JumpIfSmi(object_, receiver_not_string_);
-
-  // Fetch the instance type of the receiver into result register.
-  __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
-  __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
-  // If the receiver is not a string trigger the non-string case.
-  __ test(result_, Immediate(kIsNotStringMask));
-  __ j(not_zero, receiver_not_string_);
+  if (check_mode_ == RECEIVER_IS_UNKNOWN) {
+    __ JumpIfSmi(object_, receiver_not_string_);
+
+    // Fetch the instance type of the receiver into result register.
+    __ mov(result_, FieldOperand(object_, HeapObject::kMapOffset));
+    __ movzx_b(result_, FieldOperand(result_, Map::kInstanceTypeOffset));
+    // If the receiver is not a string trigger the non-string case.
+    __ test(result_, Immediate(kIsNotStringMask));
+    __ j(not_zero, receiver_not_string_);
+  }
 
   // If the index is non-smi trigger the non-smi case.
-  STATIC_ASSERT(kSmiTag == 0);
   __ JumpIfNotSmi(index_, &index_not_smi_);
   __ bind(&got_smi_index_);
 
@@ -2862,6 +2915,52 @@ void SubStringStub::Generate(MacroAssembler* masm) {
 }
 
 
+void ToNumberStub::Generate(MacroAssembler* masm) {
+  // The ToNumber stub takes one argument in eax.
+  Label not_smi;
+  __ JumpIfNotSmi(eax, &not_smi, Label::kNear);
+  __ Ret();
+  __ bind(&not_smi);
+
+  Label not_heap_number;
+  __ CompareMap(eax, masm->isolate()->factory()->heap_number_map());
+  __ j(not_equal, &not_heap_number, Label::kNear);
+  __ Ret();
+  __ bind(&not_heap_number);
+
+  Label not_string, slow_string;
+  __ CmpObjectType(eax, FIRST_NONSTRING_TYPE, edi);
+  // eax: object
+  // edi: object map
+  __ j(above_equal, &not_string, Label::kNear);
+  // Check if string has a cached array index.
+  __ test(FieldOperand(eax, String::kHashFieldOffset),
+          Immediate(String::kContainsCachedArrayIndexMask));
+  __ j(not_zero, &slow_string, Label::kNear);
+  __ mov(eax, FieldOperand(eax, String::kHashFieldOffset));
+  __ IndexFromHash(eax, eax);
+  __ Ret();
+  __ bind(&slow_string);
+  __ pop(ecx);   // Pop return address.
+  __ push(eax);  // Push argument.
+  __ push(ecx);  // Push return address.
+  __ TailCallRuntime(Runtime::kStringToNumber, 1, 1);
+  __ bind(&not_string);
+
+  Label not_oddball;
+  __ CmpInstanceType(edi, ODDBALL_TYPE);
+  __ j(not_equal, &not_oddball, Label::kNear);
+  __ mov(eax, FieldOperand(eax, Oddball::kToNumberOffset));
+  __ Ret();
+  __ bind(&not_oddball);
+
+  __ pop(ecx);   // Pop return address.
+  __ push(eax);  // Push argument.
+  __ push(ecx);  // Push return address.
+  __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION);
+}
+
+
 void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm,
                                                    Register left,
                                                    Register right,
index 03ff477..a079b6f 100644 (file)
@@ -76,7 +76,7 @@ class NameDictionaryLookupStub: public PlatformCodeStub {
                                      Register r0,
                                      Register r1);
 
-  virtual bool SometimesSetsUpAFrame() { return false; }
+  bool SometimesSetsUpAFrame() OVERRIDE { return false; }
 
  private:
   static const int kInlinedProbes = 4;
@@ -139,7 +139,7 @@ class RecordWriteStub: public PlatformCodeStub {
     INCREMENTAL_COMPACTION
   };
 
-  virtual bool SometimesSetsUpAFrame() { return false; }
+  bool SometimesSetsUpAFrame() OVERRIDE { return false; }
 
   static const byte kTwoByteNopInstruction = 0x3c;  // Cmpb al, #imm8.
   static const byte kTwoByteJumpInstruction = 0xeb;  // Jmp #imm8.
@@ -328,9 +328,9 @@ class RecordWriteStub: public PlatformCodeStub {
     kUpdateRememberedSetOnNoNeedToInformIncrementalMarker
   };
 
-  virtual inline Major MajorKey() const FINAL OVERRIDE { return RecordWrite; }
+  inline Major MajorKey() const FINAL { return RecordWrite; }
 
-  virtual void Generate(MacroAssembler* masm) OVERRIDE;
+  void Generate(MacroAssembler* masm) OVERRIDE;
   void GenerateIncremental(MacroAssembler* masm, Mode mode);
   void CheckNeedsToInformIncrementalMarker(
       MacroAssembler* masm,
@@ -338,7 +338,7 @@ class RecordWriteStub: public PlatformCodeStub {
       Mode mode);
   void InformIncrementalMarker(MacroAssembler* masm);
 
-  void Activate(Code* code) {
+  void Activate(Code* code) OVERRIDE {
     code->GetHeap()->incremental_marking()->ActivateGeneratedStub(code);
   }
 
index a76c7a7..0e53a0e 100644 (file)
@@ -27,7 +27,7 @@ void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
   HandleScope scope(isolate);
 
   // Compute the size of relocation information needed for the code
-  // patching in Deoptimizer::DeoptimizeFunction.
+  // patching in Deoptimizer::PatchCodeForDeoptimization below.
   int min_reloc_size = 0;
   int prev_pc_offset = 0;
   DeoptimizationInputData* deopt_data =
index 729655d..0f292cc 100644 (file)
@@ -188,10 +188,10 @@ void FullCodeGenerator::Generate() {
     Comment cmnt(masm_, "[ Allocate context");
     bool need_write_barrier = true;
     // Argument to NewContext is the function, which is still in edi.
-    if (FLAG_harmony_scoping && info->scope()->is_global_scope()) {
+    if (FLAG_harmony_scoping && info->scope()->is_script_scope()) {
       __ push(edi);
       __ Push(info->scope()->GetScopeInfo());
-      __ CallRuntime(Runtime::kNewGlobalContext, 2);
+      __ CallRuntime(Runtime::kNewScriptContext, 2);
     } else if (heap_slots <= FastNewContextStub::kMaximumSlots) {
       FastNewContextStub stub(isolate(), heap_slots);
       __ CallStub(&stub);
@@ -865,7 +865,7 @@ void FullCodeGenerator::VisitModuleDeclaration(ModuleDeclaration* declaration) {
   EmitDebugCheckDeclarationContext(variable);
 
   // Load instance object.
-  __ LoadContext(eax, scope_->ContextChainLength(scope_->GlobalScope()));
+  __ LoadContext(eax, scope_->ContextChainLength(scope_->ScriptScope()));
   __ mov(eax, ContextOperand(eax, variable->interface()->Index()));
   __ mov(eax, ContextOperand(eax, Context::EXTENSION_INDEX));
 
@@ -1034,6 +1034,7 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
 
   // Get the object to enumerate over. If the object is null or undefined, skip
   // over the loop.  See ECMA-262 version 5, section 12.6.4.
+  SetExpressionPosition(stmt->enumerable());
   VisitForAccumulatorValue(stmt->enumerable());
   __ cmp(eax, isolate()->factory()->undefined_value());
   __ j(equal, &exit);
@@ -1128,6 +1129,8 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
   // Generate code for doing the condition check.
   PrepareForBailoutForId(stmt->BodyId(), NO_REGISTERS);
   __ bind(&loop);
+  SetExpressionPosition(stmt->each());
+
   __ mov(eax, Operand(esp, 0 * kPointerSize));  // Get the current index.
   __ cmp(eax, Operand(esp, 1 * kPointerSize));  // Compare to the array length.
   __ j(above_equal, loop_statement.break_label());
@@ -1194,48 +1197,6 @@ void FullCodeGenerator::VisitForInStatement(ForInStatement* stmt) {
 }
 
 
-void FullCodeGenerator::VisitForOfStatement(ForOfStatement* stmt) {
-  Comment cmnt(masm_, "[ ForOfStatement");
-  SetStatementPosition(stmt);
-
-  Iteration loop_statement(this, stmt);
-  increment_loop_depth();
-
-  // var iterator = iterable[Symbol.iterator]();
-  VisitForEffect(stmt->assign_iterator());
-
-  // Loop entry.
-  __ bind(loop_statement.continue_label());
-
-  // result = iterator.next()
-  VisitForEffect(stmt->next_result());
-
-  // if (result.done) break;
-  Label result_not_done;
-  VisitForControl(stmt->result_done(),
-                  loop_statement.break_label(),
-                  &result_not_done,
-                  &result_not_done);
-  __ bind(&result_not_done);
-
-  // each = result.value
-  VisitForEffect(stmt->assign_each());
-
-  // Generate code for the body of the loop.
-  Visit(stmt->body());
-
-  // Check stack before looping.
-  PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS);
-  EmitBackEdgeBookkeeping(stmt, loop_statement.continue_label());
-  __ jmp(loop_statement.continue_label());
-
-  // Exit and decrement the loop depth.
-  PrepareForBailoutForId(stmt->ExitId(), NO_REGISTERS);
-  __ bind(loop_statement.break_label());
-  decrement_loop_depth();
-}
-
-
 void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
                                        bool pretenure) {
   // Use the fast case closure allocation code that allocates in new
@@ -1295,6 +1256,19 @@ void FullCodeGenerator::EmitLoadHomeObject(SuperReference* expr) {
 }
 
 
+void FullCodeGenerator::EmitSetHomeObjectIfNeeded(Expression* initializer,
+                                                  int offset) {
+  if (NeedsHomeObject(initializer)) {
+    __ mov(StoreDescriptor::ReceiverRegister(), Operand(esp, 0));
+    __ mov(StoreDescriptor::NameRegister(),
+           Immediate(isolate()->factory()->home_object_symbol()));
+    __ mov(StoreDescriptor::ValueRegister(),
+           Operand(esp, offset * kPointerSize));
+    CallStoreIC();
+  }
+}
+
+
 void FullCodeGenerator::EmitLoadGlobalCheckExtensions(VariableProxy* proxy,
                                                       TypeofState typeof_state,
                                                       Label* slow) {
@@ -1659,6 +1633,14 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
             __ mov(StoreDescriptor::ReceiverRegister(), Operand(esp, 0));
             CallStoreIC(key->LiteralFeedbackId());
             PrepareForBailoutForId(key->id(), NO_REGISTERS);
+
+            if (NeedsHomeObject(value)) {
+              __ mov(StoreDescriptor::ReceiverRegister(), eax);
+              __ mov(StoreDescriptor::NameRegister(),
+                     Immediate(isolate()->factory()->home_object_symbol()));
+              __ mov(StoreDescriptor::ValueRegister(), Operand(esp, 0));
+              CallStoreIC();
+            }
           } else {
             VisitForEffect(value);
           }
@@ -1668,6 +1650,7 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
         VisitForStackValue(key);
         VisitForStackValue(value);
         if (property->emit_store()) {
+          EmitSetHomeObjectIfNeeded(value, 2);
           __ push(Immediate(Smi::FromInt(SLOPPY)));  // Strict mode
           __ CallRuntime(Runtime::kSetProperty, 4);
         } else {
@@ -1700,7 +1683,9 @@ void FullCodeGenerator::VisitObjectLiteral(ObjectLiteral* expr) {
     __ push(Operand(esp, 0));  // Duplicate receiver.
     VisitForStackValue(it->first);
     EmitAccessor(it->second->getter);
+    EmitSetHomeObjectIfNeeded(it->second->getter, 2);
     EmitAccessor(it->second->setter);
+    EmitSetHomeObjectIfNeeded(it->second->setter, 3);
     __ push(Immediate(Smi::FromInt(NONE)));
     __ CallRuntime(Runtime::kDefineAccessorPropertyUnchecked, 5);
   }
@@ -2127,15 +2112,6 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
   VisitForAccumulatorValue(value);
   __ pop(ebx);
 
-  // Check generator state.
-  Label wrong_state, closed_state, done;
-  STATIC_ASSERT(JSGeneratorObject::kGeneratorExecuting < 0);
-  STATIC_ASSERT(JSGeneratorObject::kGeneratorClosed == 0);
-  __ cmp(FieldOperand(ebx, JSGeneratorObject::kContinuationOffset),
-         Immediate(Smi::FromInt(0)));
-  __ j(equal, &closed_state);
-  __ j(less, &wrong_state);
-
   // Load suspended function and context.
   __ mov(esi, FieldOperand(ebx, JSGeneratorObject::kContextOffset));
   __ mov(edi, FieldOperand(ebx, JSGeneratorObject::kFunctionOffset));
@@ -2157,7 +2133,7 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
 
   // Enter a new JavaScript frame, and initialize its slots as they were when
   // the generator was suspended.
-  Label resume_frame;
+  Label resume_frame, done;
   __ bind(&push_frame);
   __ call(&resume_frame);
   __ jmp(&done);
@@ -2204,25 +2180,6 @@ void FullCodeGenerator::EmitGeneratorResume(Expression *generator,
   // Not reached: the runtime call returns elsewhere.
   __ Abort(kGeneratorFailedToResume);
 
-  // Reach here when generator is closed.
-  __ bind(&closed_state);
-  if (resume_mode == JSGeneratorObject::NEXT) {
-    // Return completed iterator result when generator is closed.
-    __ push(Immediate(isolate()->factory()->undefined_value()));
-    // Pop value from top-of-stack slot; box result into result register.
-    EmitCreateIteratorResult(true);
-  } else {
-    // Throw the provided value.
-    __ push(eax);
-    __ CallRuntime(Runtime::kThrow, 1);
-  }
-  __ jmp(&done);
-
-  // Throw error if we attempt to operate on a running generator.
-  __ bind(&wrong_state);
-  __ push(ebx);
-  __ CallRuntime(Runtime::kThrowGeneratorStateError, 1);
-
   __ bind(&done);
   context()->Plug(result_register());
 }
@@ -2435,6 +2392,7 @@ void FullCodeGenerator::EmitClassDefineProperties(ClassLiteral* lit) {
     }
     VisitForStackValue(key);
     VisitForStackValue(value);
+    EmitSetHomeObjectIfNeeded(value, 2);
 
     switch (property->kind()) {
       case ObjectLiteral::Property::CONSTANT:
@@ -2604,7 +2562,6 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
     __ CallRuntime(Runtime::kThrowReferenceError, 1);
     __ bind(&assign);
     EmitStoreToStackLocalOrContextSlot(var, location);
-
   } else if (!var->is_const_mode() || op == Token::INIT_CONST) {
     if (var->IsLookupSlot()) {
       // Assignment to var.
@@ -2626,8 +2583,9 @@ void FullCodeGenerator::EmitVariableAssignment(Variable* var,
       }
       EmitStoreToStackLocalOrContextSlot(var, location);
     }
+  } else if (IsSignallingAssignmentToConst(var, op, strict_mode())) {
+    __ CallRuntime(Runtime::kThrowConstAssignError, 0);
   }
-  // Non-initializing assignments to consts are ignored.
 }
 
 
@@ -5017,7 +4975,7 @@ void FullCodeGenerator::LoadContextField(Register dst, int context_index) {
 
 void FullCodeGenerator::PushFunctionArgumentForContextAllocation() {
   Scope* declaration_scope = scope()->DeclarationScope();
-  if (declaration_scope->is_global_scope() ||
+  if (declaration_scope->is_script_scope() ||
       declaration_scope->is_module_scope()) {
     // Contexts nested in the native context have a canonical empty function
     // as their closure, not the anonymous closure containing the global
index f8872d7..c292bb9 100644 (file)
@@ -32,9 +32,9 @@ class SafepointGenerator FINAL : public CallWrapper {
         deopt_mode_(mode) {}
   virtual ~SafepointGenerator() {}
 
-  virtual void BeforeCall(int call_size) const OVERRIDE {}
+  void BeforeCall(int call_size) const OVERRIDE {}
 
-  virtual void AfterCall() const OVERRIDE {
+  void AfterCall() const OVERRIDE {
     codegen_->RecordSafepoint(pointers_, deopt_mode_);
   }
 
@@ -2924,10 +2924,10 @@ void LCodeGen::DoInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr) {
                                   LInstanceOfKnownGlobal* instr,
                                   const X87Stack& x87_stack)
         : LDeferredCode(codegen, x87_stack), instr_(instr) { }
-    virtual void Generate() OVERRIDE {
+    void Generate() OVERRIDE {
       codegen()->DoDeferredInstanceOfKnownGlobal(instr_, &map_check_);
     }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    LInstruction* instr() OVERRIDE { return instr_; }
     Label* map_check() { return &map_check_; }
    private:
     LInstanceOfKnownGlobal* instr_;
@@ -3050,6 +3050,7 @@ void LCodeGen::EmitReturn(LReturn* instr, bool dynamic_frame_alignment) {
     }
     __ Ret((parameter_count + extra_value_count) * kPointerSize, ecx);
   } else {
+    DCHECK(info()->IsStub());  // Functions would need to drop one more value.
     Register reg = ToRegister(instr->parameter_count());
     // The argument count parameter is a smi
     __ SmiUntag(reg);
@@ -3125,14 +3126,17 @@ template <class T>
 void LCodeGen::EmitVectorLoadICRegisters(T* instr) {
   DCHECK(FLAG_vector_ics);
   Register vector_register = ToRegister(instr->temp_vector());
+  Register slot_register = VectorLoadICDescriptor::SlotRegister();
   DCHECK(vector_register.is(VectorLoadICDescriptor::VectorRegister()));
+  DCHECK(slot_register.is(eax));
+
+  AllowDeferredHandleDereference vector_structure_check;
   Handle<TypeFeedbackVector> vector = instr->hydrogen()->feedback_vector();
   __ mov(vector_register, vector);
   // No need to allocate this register.
-  DCHECK(VectorLoadICDescriptor::SlotRegister().is(eax));
-  int index = vector->GetIndex(instr->hydrogen()->slot());
-  __ mov(VectorLoadICDescriptor::SlotRegister(),
-         Immediate(Smi::FromInt(index)));
+  FeedbackVectorICSlot slot = instr->hydrogen()->slot();
+  int index = vector->GetIndex(slot);
+  __ mov(slot_register, Immediate(Smi::FromInt(index)));
 }
 
 
@@ -3737,45 +3741,81 @@ void LCodeGen::DoTailCallThroughMegamorphicCache(
   Register name = ToRegister(instr->name());
   DCHECK(receiver.is(LoadDescriptor::ReceiverRegister()));
   DCHECK(name.is(LoadDescriptor::NameRegister()));
+  Register slot = FLAG_vector_ics ? ToRegister(instr->slot()) : no_reg;
+  Register vector = FLAG_vector_ics ? ToRegister(instr->vector()) : no_reg;
 
   Register scratch = ebx;
-  Register extra = eax;
+  Register extra = edi;
+  DCHECK(!extra.is(slot) && !extra.is(vector));
   DCHECK(!scratch.is(receiver) && !scratch.is(name));
   DCHECK(!extra.is(receiver) && !extra.is(name));
 
   // Important for the tail-call.
   bool must_teardown_frame = NeedsEagerFrame();
 
-  // The probe will tail call to a handler if found.
-  isolate()->stub_cache()->GenerateProbe(masm(), instr->hydrogen()->flags(),
-                                         must_teardown_frame, receiver, name,
-                                         scratch, extra);
+  if (!instr->hydrogen()->is_just_miss()) {
+    if (FLAG_vector_ics) {
+      __ push(slot);
+      __ push(vector);
+    }
+
+    // The probe will tail call to a handler if found.
+    // If --vector-ics is on, then it knows to pop the two args first.
+    DCHECK(!instr->hydrogen()->is_keyed_load());
+    isolate()->stub_cache()->GenerateProbe(
+        masm(), Code::LOAD_IC, instr->hydrogen()->flags(), must_teardown_frame,
+        receiver, name, scratch, extra);
+
+    if (FLAG_vector_ics) {
+      __ pop(vector);
+      __ pop(slot);
+    }
+  }
 
   // Tail call to miss if we ended up here.
   if (must_teardown_frame) __ leave();
-  LoadIC::GenerateMiss(masm());
+  if (instr->hydrogen()->is_keyed_load()) {
+    KeyedLoadIC::GenerateMiss(masm());
+  } else {
+    LoadIC::GenerateMiss(masm());
+  }
 }
 
 
 void LCodeGen::DoCallWithDescriptor(LCallWithDescriptor* instr) {
   DCHECK(ToRegister(instr->result()).is(eax));
 
-  LPointerMap* pointers = instr->pointer_map();
-  SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+  if (instr->hydrogen()->IsTailCall()) {
+    if (NeedsEagerFrame()) __ leave();
 
-  if (instr->target()->IsConstantOperand()) {
-    LConstantOperand* target = LConstantOperand::cast(instr->target());
-    Handle<Code> code = Handle<Code>::cast(ToHandle(target));
-    generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
-    __ call(code, RelocInfo::CODE_TARGET);
+    if (instr->target()->IsConstantOperand()) {
+      LConstantOperand* target = LConstantOperand::cast(instr->target());
+      Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+      __ jmp(code, RelocInfo::CODE_TARGET);
+    } else {
+      DCHECK(instr->target()->IsRegister());
+      Register target = ToRegister(instr->target());
+      __ add(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
+      __ jmp(target);
+    }
   } else {
-    DCHECK(instr->target()->IsRegister());
-    Register target = ToRegister(instr->target());
-    generator.BeforeCall(__ CallSize(Operand(target)));
-    __ add(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
-    __ call(target);
+    LPointerMap* pointers = instr->pointer_map();
+    SafepointGenerator generator(this, pointers, Safepoint::kLazyDeopt);
+
+    if (instr->target()->IsConstantOperand()) {
+      LConstantOperand* target = LConstantOperand::cast(instr->target());
+      Handle<Code> code = Handle<Code>::cast(ToHandle(target));
+      generator.BeforeCall(__ CallSize(code, RelocInfo::CODE_TARGET));
+      __ call(code, RelocInfo::CODE_TARGET);
+    } else {
+      DCHECK(instr->target()->IsRegister());
+      Register target = ToRegister(instr->target());
+      generator.BeforeCall(__ CallSize(Operand(target)));
+      __ add(target, Immediate(Code::kHeaderSize - kHeapObjectTag));
+      __ call(target);
+    }
+    generator.AfterCall();
   }
-  generator.AfterCall();
 }
 
 
@@ -3872,10 +3912,11 @@ void LCodeGen::DoMathAbs(LMathAbs* instr) {
                                     LMathAbs* instr,
                                     const X87Stack& x87_stack)
         : LDeferredCode(codegen, x87_stack), instr_(instr) { }
-    virtual void Generate() OVERRIDE {
+    void Generate() OVERRIDE {
       codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
     }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
    private:
     LMathAbs* instr_;
   };
@@ -4063,8 +4104,8 @@ void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
     __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
     __ push(temp_result);
     __ CallRuntimeSaveDoubles(Runtime::kMathSqrtRT);
-    RecordSafepointWithRegisters(
-        instr->pointer_map(), 0, Safepoint::kNoLazyDeopt);
+    RecordSafepointWithRegisters(instr->pointer_map(), 1,
+                                 Safepoint::kNoLazyDeopt);
     __ StoreToSafepointRegisterSlot(temp_result, eax);
   }
   X87PrepareToWrite(result_reg);
@@ -4278,7 +4319,7 @@ void LCodeGen::DoMathExp(LMathExp* instr) {
     __ mov(esi, Operand(ebp, StandardFrameConstants::kContextOffset));
     __ push(temp_result);
     __ CallRuntimeSaveDoubles(Runtime::kMathExpRT);
-    RecordSafepointWithRegisters(instr->pointer_map(), 0,
+    RecordSafepointWithRegisters(instr->pointer_map(), 1,
                                  Safepoint::kNoLazyDeopt);
     __ StoreToSafepointRegisterSlot(temp_result, eax);
   }
@@ -4780,10 +4821,9 @@ void LCodeGen::DoStringCharCodeAt(LStringCharCodeAt* instr) {
                              LStringCharCodeAt* instr,
                              const X87Stack& x87_stack)
         : LDeferredCode(codegen, x87_stack), instr_(instr) { }
-    virtual void Generate() OVERRIDE {
-      codegen()->DoDeferredStringCharCodeAt(instr_);
-    }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    void Generate() OVERRIDE { codegen()->DoDeferredStringCharCodeAt(instr_); }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
    private:
     LStringCharCodeAt* instr_;
   };
@@ -4839,10 +4879,11 @@ void LCodeGen::DoStringCharFromCode(LStringCharFromCode* instr) {
                                LStringCharFromCode* instr,
                                const X87Stack& x87_stack)
         : LDeferredCode(codegen, x87_stack), instr_(instr) { }
-    virtual void Generate() OVERRIDE {
+    void Generate() OVERRIDE {
       codegen()->DoDeferredStringCharFromCode(instr_);
     }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
    private:
     LStringCharFromCode* instr_;
   };
@@ -4928,11 +4969,12 @@ void LCodeGen::DoNumberTagI(LNumberTagI* instr) {
                        LNumberTagI* instr,
                        const X87Stack& x87_stack)
         : LDeferredCode(codegen, x87_stack), instr_(instr) { }
-    virtual void Generate() OVERRIDE {
+    void Generate() OVERRIDE {
       codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp(),
                                        SIGNED_INT32);
     }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
    private:
     LNumberTagI* instr_;
   };
@@ -4956,11 +4998,12 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
                        LNumberTagU* instr,
                        const X87Stack& x87_stack)
         : LDeferredCode(codegen, x87_stack), instr_(instr) { }
-    virtual void Generate() OVERRIDE {
+    void Generate() OVERRIDE {
       codegen()->DoDeferredNumberTagIU(instr_, instr_->value(), instr_->temp(),
                                        UNSIGNED_INT32);
     }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
    private:
     LNumberTagU* instr_;
   };
@@ -5045,10 +5088,9 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
                        LNumberTagD* instr,
                        const X87Stack& x87_stack)
         : LDeferredCode(codegen, x87_stack), instr_(instr) { }
-    virtual void Generate() OVERRIDE {
-      codegen()->DoDeferredNumberTagD(instr_);
-    }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    void Generate() OVERRIDE { codegen()->DoDeferredNumberTagD(instr_); }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
    private:
     LNumberTagD* instr_;
   };
@@ -5291,10 +5333,9 @@ void LCodeGen::DoTaggedToI(LTaggedToI* instr) {
                       LTaggedToI* instr,
                       const X87Stack& x87_stack)
         : LDeferredCode(codegen, x87_stack), instr_(instr) { }
-    virtual void Generate() OVERRIDE {
-      codegen()->DoDeferredTaggedToI(instr_, done());
-    }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    void Generate() OVERRIDE { codegen()->DoDeferredTaggedToI(instr_, done()); }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
    private:
     LTaggedToI* instr_;
   };
@@ -5356,9 +5397,8 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
     Label lost_precision, is_nan, minus_zero, done;
     X87Register input_reg = ToX87Register(input);
     X87Fxch(input_reg);
-    Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
     __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(),
-                 &lost_precision, &is_nan, &minus_zero, dist);
+                 &lost_precision, &is_nan, &minus_zero);
     __ jmp(&done);
     __ bind(&lost_precision);
     DeoptimizeIf(no_condition, instr, "lost precision");
@@ -5381,9 +5421,8 @@ void LCodeGen::DoDoubleToSmi(LDoubleToSmi* instr) {
   Label lost_precision, is_nan, minus_zero, done;
   X87Register input_reg = ToX87Register(input);
   X87Fxch(input_reg);
-  Label::Distance dist = DeoptEveryNTimes() ? Label::kFar : Label::kNear;
   __ X87TOSToI(result_reg, instr->hydrogen()->GetMinusZeroMode(),
-               &lost_precision, &is_nan, &minus_zero, dist);
+               &lost_precision, &is_nan, &minus_zero);
   __ jmp(&done);
   __ bind(&lost_precision);
   DeoptimizeIf(no_condition, instr, "lost precision");
@@ -5497,11 +5536,12 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
         : LDeferredCode(codegen, x87_stack), instr_(instr), object_(object) {
       SetExit(check_maps());
     }
-    virtual void Generate() OVERRIDE {
+    void Generate() OVERRIDE {
       codegen()->DoDeferredInstanceMigration(instr_, object_);
     }
     Label* check_maps() { return &check_maps_; }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
    private:
     LCheckMaps* instr_;
     Label check_maps_;
@@ -5720,10 +5760,9 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
                      LAllocate* instr,
                      const X87Stack& x87_stack)
         : LDeferredCode(codegen, x87_stack), instr_(instr) { }
-    virtual void Generate() OVERRIDE {
-      codegen()->DoDeferredAllocate(instr_);
-    }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    void Generate() OVERRIDE { codegen()->DoDeferredAllocate(instr_); }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
    private:
     LAllocate* instr_;
   };
@@ -6089,10 +6128,9 @@ void LCodeGen::DoStackCheck(LStackCheck* instr) {
                        LStackCheck* instr,
                        const X87Stack& x87_stack)
         : LDeferredCode(codegen, x87_stack), instr_(instr) { }
-    virtual void Generate() OVERRIDE {
-      codegen()->DoDeferredStackCheck(instr_);
-    }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    void Generate() OVERRIDE { codegen()->DoDeferredStackCheck(instr_); }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
    private:
     LStackCheck* instr_;
   };
@@ -6240,10 +6278,11 @@ void LCodeGen::DoLoadFieldByIndex(LLoadFieldByIndex* instr) {
           object_(object),
           index_(index) {
     }
-    virtual void Generate() OVERRIDE {
+    void Generate() OVERRIDE {
       codegen()->DoDeferredLoadMutableDouble(instr_, object_, index_);
     }
-    virtual LInstruction* instr() OVERRIDE { return instr_; }
+    LInstruction* instr() OVERRIDE { return instr_; }
+
    private:
     LLoadFieldByIndex* instr_;
     Register object_;
index b00a0d9..3a52dc4 100644 (file)
@@ -1157,9 +1157,17 @@ LInstruction* LChunkBuilder::DoTailCallThroughMegamorphicCache(
       UseFixed(instr->receiver(), LoadDescriptor::ReceiverRegister());
   LOperand* name_register =
       UseFixed(instr->name(), LoadDescriptor::NameRegister());
+  LOperand* slot = NULL;
+  LOperand* vector = NULL;
+  if (FLAG_vector_ics) {
+    slot = UseFixed(instr->slot(), VectorLoadICDescriptor::SlotRegister());
+    vector =
+        UseFixed(instr->vector(), VectorLoadICDescriptor::VectorRegister());
+  }
+
   // Not marked as call. It can't deoptimize, and it never returns.
   return new (zone()) LTailCallThroughMegamorphicCache(
-      context, receiver_register, name_register);
+      context, receiver_register, name_register, slot, vector);
 }
 
 
@@ -2113,7 +2121,7 @@ LInstruction* LChunkBuilder::DoLoadGlobalGeneric(HLoadGlobalGeneric* instr) {
   LOperand* global_object =
       UseFixed(instr->global_object(), LoadDescriptor::ReceiverRegister());
   LOperand* vector = NULL;
-  if (FLAG_vector_ics) {
+  if (instr->HasVectorAndSlot()) {
     vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
   }
 
@@ -2174,7 +2182,7 @@ LInstruction* LChunkBuilder::DoLoadNamedGeneric(HLoadNamedGeneric* instr) {
   LOperand* object =
       UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
   LOperand* vector = NULL;
-  if (FLAG_vector_ics) {
+  if (instr->HasVectorAndSlot()) {
     vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
   }
   LLoadNamedGeneric* result = new(zone()) LLoadNamedGeneric(
@@ -2239,7 +2247,7 @@ LInstruction* LChunkBuilder::DoLoadKeyedGeneric(HLoadKeyedGeneric* instr) {
       UseFixed(instr->object(), LoadDescriptor::ReceiverRegister());
   LOperand* key = UseFixed(instr->key(), LoadDescriptor::NameRegister());
   LOperand* vector = NULL;
-  if (FLAG_vector_ics) {
+  if (instr->HasVectorAndSlot()) {
     vector = FixedTemp(VectorLoadICDescriptor::VectorRegister());
   }
   LLoadKeyedGeneric* result =
index dbb18ec..5d27fcb 100644 (file)
@@ -168,17 +168,13 @@ class LCodeGen;
   V(WrapReceiver)
 
 
-#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)                        \
-  virtual Opcode opcode() const FINAL OVERRIDE {                      \
-    return LInstruction::k##type;                                           \
-  }                                                                         \
-  virtual void CompileToNative(LCodeGen* generator) FINAL OVERRIDE;   \
-  virtual const char* Mnemonic() const FINAL OVERRIDE {               \
-    return mnemonic;                                                        \
-  }                                                                         \
-  static L##type* cast(LInstruction* instr) {                               \
-    DCHECK(instr->Is##type());                                              \
-    return reinterpret_cast<L##type*>(instr);                               \
+#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic)            \
+  Opcode opcode() const FINAL { return LInstruction::k##type; } \
+  void CompileToNative(LCodeGen* generator) FINAL;              \
+  const char* Mnemonic() const FINAL { return mnemonic; }       \
+  static L##type* cast(LInstruction* instr) {                   \
+    DCHECK(instr->Is##type());                                  \
+    return reinterpret_cast<L##type*>(instr);                   \
   }
 
 
@@ -297,11 +293,9 @@ class LTemplateResultInstruction : public LInstruction {
  public:
   // Allow 0 or 1 output operands.
   STATIC_ASSERT(R == 0 || R == 1);
-  virtual bool HasResult() const FINAL OVERRIDE {
-    return R != 0 && result() != NULL;
-  }
+  bool HasResult() const FINAL { return R != 0 && result() != NULL; }
   void set_result(LOperand* operand) { results_[0] = operand; }
-  LOperand* result() const { return results_[0]; }
+  LOperand* result() const OVERRIDE { return results_[0]; }
 
  protected:
   EmbeddedContainer<LOperand*, R> results_;
@@ -319,11 +313,11 @@ class LTemplateInstruction : public LTemplateResultInstruction<R> {
 
  private:
   // Iterator support.
-  virtual int InputCount() FINAL OVERRIDE { return I; }
-  virtual LOperand* InputAt(int i) FINAL OVERRIDE { return inputs_[i]; }
+  int InputCount() FINAL { return I; }
+  LOperand* InputAt(int i) FINAL { return inputs_[i]; }
 
-  virtual int TempCount() FINAL OVERRIDE { return T; }
-  virtual LOperand* TempAt(int i) FINAL OVERRIDE { return temps_[i]; }
+  int TempCount() FINAL { return T; }
+  LOperand* TempAt(int i) FINAL { return temps_[i]; }
 };
 
 
@@ -337,8 +331,8 @@ class LGap : public LTemplateInstruction<0, 0, 0> {
   }
 
   // Can't use the DECLARE-macro here because of sub-classes.
-  virtual bool IsGap() const FINAL OVERRIDE { return true; }
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  bool IsGap() const FINAL { return true; }
+  void PrintDataTo(StringStream* stream) OVERRIDE;
   static LGap* cast(LInstruction* instr) {
     DCHECK(instr->IsGap());
     return reinterpret_cast<LGap*>(instr);
@@ -378,7 +372,7 @@ class LInstructionGap FINAL : public LGap {
  public:
   explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
 
-  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
+  bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
     return !IsRedundant();
   }
 
@@ -390,9 +384,7 @@ class LClobberDoubles FINAL : public LTemplateInstruction<0, 0, 0> {
  public:
   explicit LClobberDoubles(Isolate* isolate) { }
 
-  virtual bool ClobbersDoubleRegisters(Isolate* isolate) const OVERRIDE {
-    return true;
-  }
+  bool ClobbersDoubleRegisters(Isolate* isolate) const OVERRIDE { return true; }
 
   DECLARE_CONCRETE_INSTRUCTION(ClobberDoubles, "clobber-d")
 };
@@ -402,13 +394,13 @@ class LGoto FINAL : public LTemplateInstruction<0, 0, 0> {
  public:
   explicit LGoto(HBasicBlock* block) : block_(block) { }
 
-  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE;
+  bool HasInterestingComment(LCodeGen* gen) const OVERRIDE;
   DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
-  virtual bool IsControl() const OVERRIDE { return true; }
+  void PrintDataTo(StringStream* stream) OVERRIDE;
+  bool IsControl() const OVERRIDE { return true; }
 
   int block_id() const { return block_->block_id(); }
-  virtual bool ClobbersDoubleRegisters(Isolate* isolate) const OVERRIDE {
+  bool ClobbersDoubleRegisters(Isolate* isolate) const OVERRIDE {
     return false;
   }
 
@@ -444,7 +436,7 @@ class LDummyUse FINAL : public LTemplateInstruction<1, 1, 0> {
 
 class LDeoptimize FINAL : public LTemplateInstruction<0, 0, 0> {
  public:
-  virtual bool IsControl() const OVERRIDE { return true; }
+  bool IsControl() const OVERRIDE { return true; }
   DECLARE_CONCRETE_INSTRUCTION(Deoptimize, "deoptimize")
   DECLARE_HYDROGEN_ACCESSOR(Deoptimize)
 };
@@ -455,12 +447,10 @@ class LLabel FINAL : public LGap {
   explicit LLabel(HBasicBlock* block)
       : LGap(block), replacement_(NULL) { }
 
-  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
-    return false;
-  }
+  bool HasInterestingComment(LCodeGen* gen) const OVERRIDE { return false; }
   DECLARE_CONCRETE_INSTRUCTION(Label, "label")
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int block_id() const { return block()->block_id(); }
   bool is_loop_header() const { return block()->IsLoopHeader(); }
@@ -478,9 +468,7 @@ class LLabel FINAL : public LGap {
 
 class LParameter FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
-  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
-    return false;
-  }
+  bool HasInterestingComment(LCodeGen* gen) const OVERRIDE { return false; }
   DECLARE_CONCRETE_INSTRUCTION(Parameter, "parameter")
 };
 
@@ -499,19 +487,23 @@ class LCallStub FINAL : public LTemplateInstruction<1, 1, 0> {
 
 
 class LTailCallThroughMegamorphicCache FINAL
-    : public LTemplateInstruction<0, 3, 0> {
+    : public LTemplateInstruction<0, 5, 0> {
  public:
-  explicit LTailCallThroughMegamorphicCache(LOperand* context,
-                                            LOperand* receiver,
-                                            LOperand* name) {
+  LTailCallThroughMegamorphicCache(LOperand* context, LOperand* receiver,
+                                   LOperand* name, LOperand* slot,
+                                   LOperand* vector) {
     inputs_[0] = context;
     inputs_[1] = receiver;
     inputs_[2] = name;
+    inputs_[3] = slot;
+    inputs_[4] = vector;
   }
 
   LOperand* context() { return inputs_[0]; }
   LOperand* receiver() { return inputs_[1]; }
   LOperand* name() { return inputs_[2]; }
+  LOperand* slot() { return inputs_[3]; }
+  LOperand* vector() { return inputs_[4]; }
 
   DECLARE_CONCRETE_INSTRUCTION(TailCallThroughMegamorphicCache,
                                "tail-call-through-megamorphic-cache")
@@ -521,9 +513,7 @@ class LTailCallThroughMegamorphicCache FINAL
 
 class LUnknownOSRValue FINAL : public LTemplateInstruction<1, 0, 0> {
  public:
-  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
-    return false;
-  }
+  bool HasInterestingComment(LCodeGen* gen) const OVERRIDE { return false; }
   DECLARE_CONCRETE_INSTRUCTION(UnknownOSRValue, "unknown-osr-value")
 };
 
@@ -533,7 +523,7 @@ class LControlInstruction: public LTemplateInstruction<0, I, T> {
  public:
   LControlInstruction() : false_label_(NULL), true_label_(NULL) { }
 
-  virtual bool IsControl() const FINAL OVERRIDE { return true; }
+  bool IsControl() const FINAL { return true; }
 
   int SuccessorCount() { return hydrogen()->SuccessorCount(); }
   HBasicBlock* SuccessorAt(int i) { return hydrogen()->SuccessorAt(i); }
@@ -626,7 +616,7 @@ class LAccessArgumentsAt FINAL : public LTemplateInstruction<1, 3, 0> {
 
   DECLARE_CONCRETE_INSTRUCTION(AccessArgumentsAt, "access-arguments-at")
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -875,7 +865,7 @@ class LCompareNumericAndBranch FINAL : public LControlInstruction<2, 0> {
     return hydrogen()->representation().IsDouble();
   }
 
-  virtual void PrintDataTo(StringStream* stream);
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1052,7 +1042,7 @@ class LIsObjectAndBranch FINAL : public LControlInstruction<1, 1> {
 
   DECLARE_CONCRETE_INSTRUCTION(IsObjectAndBranch, "is-object-and-branch")
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1069,7 +1059,7 @@ class LIsStringAndBranch FINAL : public LControlInstruction<1, 1> {
   DECLARE_CONCRETE_INSTRUCTION(IsStringAndBranch, "is-string-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(IsStringAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1084,7 +1074,7 @@ class LIsSmiAndBranch FINAL : public LControlInstruction<1, 0> {
   DECLARE_CONCRETE_INSTRUCTION(IsSmiAndBranch, "is-smi-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(IsSmiAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1102,7 +1092,7 @@ class LIsUndetectableAndBranch FINAL : public LControlInstruction<1, 1> {
                                "is-undetectable-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(IsUndetectableAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1122,7 +1112,7 @@ class LStringCompareAndBranch FINAL : public LControlInstruction<3, 0> {
                                "string-compare-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(StringCompareAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   Token::Value op() const { return hydrogen()->token(); }
 };
@@ -1142,7 +1132,7 @@ class LHasInstanceTypeAndBranch FINAL : public LControlInstruction<1, 1> {
                                "has-instance-type-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(HasInstanceTypeAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1171,7 +1161,7 @@ class LHasCachedArrayIndexAndBranch FINAL
   DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndexAndBranch,
                                "has-cached-array-index-and-branch")
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1204,7 +1194,7 @@ class LClassOfTestAndBranch FINAL : public LControlInstruction<1, 2> {
                                "class-of-test-and-branch")
   DECLARE_HYDROGEN_ACCESSOR(ClassOfTestAndBranch)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1399,7 +1389,7 @@ class LBranch FINAL : public LControlInstruction<1, 1> {
   DECLARE_CONCRETE_INSTRUCTION(Branch, "branch")
   DECLARE_HYDROGEN_ACCESSOR(Branch)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1552,11 +1542,9 @@ class LArithmeticD FINAL : public LTemplateInstruction<1, 2, 0> {
 
   Token::Value op() const { return op_; }
 
-  virtual Opcode opcode() const OVERRIDE {
-    return LInstruction::kArithmeticD;
-  }
-  virtual void CompileToNative(LCodeGen* generator) OVERRIDE;
-  virtual const char* Mnemonic() const OVERRIDE;
+  Opcode opcode() const OVERRIDE { return LInstruction::kArithmeticD; }
+  void CompileToNative(LCodeGen* generator) OVERRIDE;
+  const char* Mnemonic() const OVERRIDE;
 
  private:
   Token::Value op_;
@@ -1579,11 +1567,9 @@ class LArithmeticT FINAL : public LTemplateInstruction<1, 3, 0> {
   LOperand* left() { return inputs_[1]; }
   LOperand* right() { return inputs_[2]; }
 
-  virtual Opcode opcode() const OVERRIDE {
-    return LInstruction::kArithmeticT;
-  }
-  virtual void CompileToNative(LCodeGen* generator) OVERRIDE;
-  virtual const char* Mnemonic() const OVERRIDE;
+  Opcode opcode() const OVERRIDE { return LInstruction::kArithmeticT; }
+  void CompileToNative(LCodeGen* generator) OVERRIDE;
+  const char* Mnemonic() const OVERRIDE;
 
   Token::Value op() const { return op_; }
 
@@ -1696,7 +1682,7 @@ class LLoadKeyed FINAL : public LTemplateInstruction<1, 2, 0> {
   DECLARE_CONCRETE_INSTRUCTION(LoadKeyed, "load-keyed")
   DECLARE_HYDROGEN_ACCESSOR(LoadKeyed)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
   uint32_t base_offset() const { return hydrogen()->base_offset(); }
   bool key_is_smi() {
     return hydrogen()->key()->representation().IsTagged();
@@ -1794,7 +1780,7 @@ class LLoadContextSlot FINAL : public LTemplateInstruction<1, 1, 0> {
 
   int slot_index() { return hydrogen()->slot_index(); }
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1815,7 +1801,7 @@ class LStoreContextSlot FINAL : public LTemplateInstruction<0, 2, 1> {
 
   int slot_index() { return hydrogen()->slot_index(); }
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -1854,7 +1840,7 @@ class LStoreCodeEntry FINAL: public LTemplateInstruction<0, 2, 0> {
   LOperand* function() { return inputs_[0]; }
   LOperand* code_object() { return inputs_[1]; }
 
-  virtual void PrintDataTo(StringStream* stream);
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   DECLARE_CONCRETE_INSTRUCTION(StoreCodeEntry, "store-code-entry")
   DECLARE_HYDROGEN_ACCESSOR(StoreCodeEntry)
@@ -1871,7 +1857,7 @@ class LInnerAllocatedObject FINAL: public LTemplateInstruction<1, 2, 0> {
   LOperand* base_object() const { return inputs_[0]; }
   LOperand* offset() const { return inputs_[1]; }
 
-  virtual void PrintDataTo(StringStream* stream);
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   DECLARE_CONCRETE_INSTRUCTION(InnerAllocatedObject, "inner-allocated-object")
 };
@@ -1915,7 +1901,7 @@ class LCallJSFunction FINAL : public LTemplateInstruction<1, 1, 0> {
   DECLARE_CONCRETE_INSTRUCTION(CallJSFunction, "call-js-function")
   DECLARE_HYDROGEN_ACCESSOR(CallJSFunction)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int arity() const { return hydrogen()->argument_count() - 1; }
 };
@@ -1937,18 +1923,18 @@ class LCallWithDescriptor FINAL : public LTemplateResultInstruction<1> {
  private:
   DECLARE_CONCRETE_INSTRUCTION(CallWithDescriptor, "call-with-descriptor")
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int arity() const { return hydrogen()->argument_count() - 1; }
 
   ZoneList<LOperand*> inputs_;
 
   // Iterator support.
-  virtual int InputCount() FINAL OVERRIDE { return inputs_.length(); }
-  virtual LOperand* InputAt(int i) FINAL OVERRIDE { return inputs_[i]; }
+  int InputCount() FINAL { return inputs_.length(); }
+  LOperand* InputAt(int i) FINAL { return inputs_[i]; }
 
-  virtual int TempCount() FINAL OVERRIDE { return 0; }
-  virtual LOperand* TempAt(int i) FINAL OVERRIDE { return NULL; }
+  int TempCount() FINAL { return 0; }
+  LOperand* TempAt(int i) FINAL { return NULL; }
 };
 
 
@@ -1965,7 +1951,7 @@ class LInvokeFunction FINAL : public LTemplateInstruction<1, 2, 0> {
   DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
   DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int arity() const { return hydrogen()->argument_count() - 1; }
 };
@@ -2001,7 +1987,7 @@ class LCallNew FINAL : public LTemplateInstruction<1, 2, 0> {
   DECLARE_CONCRETE_INSTRUCTION(CallNew, "call-new")
   DECLARE_HYDROGEN_ACCESSOR(CallNew)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int arity() const { return hydrogen()->argument_count() - 1; }
 };
@@ -2020,7 +2006,7 @@ class LCallNewArray FINAL : public LTemplateInstruction<1, 2, 0> {
   DECLARE_CONCRETE_INSTRUCTION(CallNewArray, "call-new-array")
   DECLARE_HYDROGEN_ACCESSOR(CallNewArray)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   int arity() const { return hydrogen()->argument_count() - 1; }
 };
@@ -2037,7 +2023,7 @@ class LCallRuntime FINAL : public LTemplateInstruction<1, 1, 0> {
   DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
   DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
 
-  virtual bool ClobbersDoubleRegisters(Isolate* isolate) const OVERRIDE {
+  bool ClobbersDoubleRegisters(Isolate* isolate) const OVERRIDE {
     return save_doubles() == kDontSaveFPRegs;
   }
 
@@ -2225,7 +2211,7 @@ class LStoreNamedField FINAL : public LTemplateInstruction<0, 2, 2> {
   DECLARE_CONCRETE_INSTRUCTION(StoreNamedField, "store-named-field")
   DECLARE_HYDROGEN_ACCESSOR(StoreNamedField)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
@@ -2244,7 +2230,7 @@ class LStoreNamedGeneric FINAL : public LTemplateInstruction<0, 3, 0> {
   DECLARE_CONCRETE_INSTRUCTION(StoreNamedGeneric, "store-named-generic")
   DECLARE_HYDROGEN_ACCESSOR(StoreNamedGeneric)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
   Handle<Object> name() const { return hydrogen()->name(); }
   StrictMode strict_mode() { return hydrogen()->strict_mode(); }
 };
@@ -2275,7 +2261,7 @@ class LStoreKeyed FINAL : public LTemplateInstruction<0, 3, 0> {
   DECLARE_CONCRETE_INSTRUCTION(StoreKeyed, "store-keyed")
   DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
   uint32_t base_offset() const { return hydrogen()->base_offset(); }
   bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
 };
@@ -2301,7 +2287,7 @@ class LStoreKeyedGeneric FINAL : public LTemplateInstruction<0, 4, 0> {
   DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
   DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   StrictMode strict_mode() { return hydrogen()->strict_mode(); }
 };
@@ -2328,7 +2314,7 @@ class LTransitionElementsKind FINAL : public LTemplateInstruction<0, 2, 2> {
                                "transition-elements-kind")
   DECLARE_HYDROGEN_ACCESSOR(TransitionElementsKind)
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 
   Handle<Map> original_map() { return hydrogen()->original_map().handle(); }
   Handle<Map> transitioned_map() {
@@ -2628,15 +2614,13 @@ class LTypeofIsAndBranch FINAL : public LControlInstruction<1, 0> {
 
   Handle<String> type_literal() { return hydrogen()->type_literal(); }
 
-  virtual void PrintDataTo(StringStream* stream) OVERRIDE;
+  void PrintDataTo(StringStream* stream) OVERRIDE;
 };
 
 
 class LOsrEntry FINAL : public LTemplateInstruction<0, 0, 0> {
  public:
-  virtual bool HasInterestingComment(LCodeGen* gen) const OVERRIDE {
-    return false;
-  }
+  bool HasInterestingComment(LCodeGen* gen) const OVERRIDE { return false; }
   DECLARE_CONCRETE_INSTRUCTION(OsrEntry, "osr-entry")
 };
 
@@ -2843,7 +2827,7 @@ class LChunkBuilder FINAL : public LChunkBuilderBase {
 
   // An input operand in register, stack slot or a constant operand.
   // Will not be moved to a register even if one is freely available.
-  virtual MUST_USE_RESULT LOperand* UseAny(HValue* value) OVERRIDE;
+  MUST_USE_RESULT LOperand* UseAny(HValue* value) OVERRIDE;
 
   // Temporary operand that must be in a register.
   MUST_USE_RESULT LUnallocated* TempRegister();
index 3f522fc..008b2af 100644 (file)
@@ -654,16 +654,16 @@ void MacroAssembler::CheckMap(Register obj,
 }
 
 
-void MacroAssembler::DispatchMap(Register obj,
-                                 Register unused,
-                                 Handle<Map> map,
-                                 Handle<Code> success,
-                                 SmiCheckType smi_check_type) {
+void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
+                                     Register scratch2, Handle<WeakCell> cell,
+                                     Handle<Code> success,
+                                     SmiCheckType smi_check_type) {
   Label fail;
   if (smi_check_type == DO_SMI_CHECK) {
     JumpIfSmi(obj, &fail);
   }
-  cmp(FieldOperand(obj, HeapObject::kMapOffset), Immediate(map));
+  mov(scratch1, FieldOperand(obj, HeapObject::kMapOffset));
+  CmpWeakValue(scratch1, cell, scratch2);
   j(equal, success);
 
   bind(&fail);
@@ -1321,10 +1321,10 @@ void MacroAssembler::LoadFromNumberDictionary(Label* miss,
   }
 
   bind(&done);
-  // Check that the value is a normal propety.
+  // Check that the value is a field property.
   const int kDetailsOffset =
       SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
-  DCHECK_EQ(NORMAL, 0);
+  DCHECK_EQ(FIELD, 0);
   test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset),
        Immediate(PropertyDetails::TypeField::kMask << kSmiTagSize));
   j(not_zero, miss);
@@ -2544,6 +2544,21 @@ void MacroAssembler::PushHeapObject(Handle<HeapObject> object) {
 }
 
 
+void MacroAssembler::CmpWeakValue(Register value, Handle<WeakCell> cell,
+                                  Register scratch) {
+  mov(scratch, cell);
+  cmp(value, FieldOperand(scratch, WeakCell::kValueOffset));
+}
+
+
+void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
+                                   Label* miss) {
+  mov(value, cell);
+  mov(value, FieldOperand(value, WeakCell::kValueOffset));
+  JumpIfSmi(value, miss);
+}
+
+
 void MacroAssembler::Ret() {
   ret(0);
 }
@@ -3072,18 +3087,6 @@ void MacroAssembler::CheckPageFlagForMap(
 }
 
 
-void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
-                                        Register scratch,
-                                        Label* if_deprecated) {
-  if (map->CanBeDeprecated()) {
-    mov(scratch, map);
-    mov(scratch, FieldOperand(scratch, Map::kBitField3Offset));
-    and_(scratch, Immediate(Map::Deprecated::kMask));
-    j(not_zero, if_deprecated);
-  }
-}
-
-
 void MacroAssembler::JumpIfBlack(Register object,
                                  Register scratch0,
                                  Register scratch1,
index ad308a4..b797265 100644 (file)
@@ -93,10 +93,6 @@ class MacroAssembler: public Assembler {
       Label* condition_met,
       Label::Distance condition_met_distance = Label::kFar);
 
-  void CheckMapDeprecated(Handle<Map> map,
-                          Register scratch,
-                          Label* if_deprecated);
-
   // Check if object is in new space.  Jumps if the object is not in new space.
   // The register scratch can be object itself, but scratch will be clobbered.
   void JumpIfNotInNewSpace(Register object,
@@ -277,6 +273,9 @@ class MacroAssembler: public Assembler {
     }
   }
 
+  void CmpWeakValue(Register value, Handle<WeakCell> cell, Register scratch);
+  void LoadWeakValue(Register value, Handle<WeakCell> cell, Label* miss);
+
   // ---------------------------------------------------------------------------
   // JavaScript invokes
 
@@ -379,14 +378,12 @@ class MacroAssembler: public Assembler {
                 Label* fail,
                 SmiCheckType smi_check_type);
 
-  // Check if the map of an object is equal to a specified map and branch to a
-  // specified target if equal. Skip the smi check if not required (object is
-  // known to be a heap object)
-  void DispatchMap(Register obj,
-                   Register unused,
-                   Handle<Map> map,
-                   Handle<Code> success,
-                   SmiCheckType smi_check_type);
+  // Check if the map of an object is equal to a specified weak map and branch
+  // to a specified target if equal. Skip the smi check if not required
+  // (object is known to be a heap object)
+  void DispatchWeakMap(Register obj, Register scratch1, Register scratch2,
+                       Handle<WeakCell> cell, Handle<Code> success,
+                       SmiCheckType smi_check_type);
 
   // Check if the object in register heap_object is a string. Afterwards the
   // register map contains the object map and the register instance_type
index ab0ae9c..1eb69b8 100644 (file)
@@ -50,10 +50,10 @@ class zone_allocator {
   }
   void destroy(pointer p) { p->~T(); }
 
-  bool operator==(zone_allocator const& other) {
+  bool operator==(zone_allocator const& other) const {
     return zone_ == other.zone_;
   }
-  bool operator!=(zone_allocator const& other) {
+  bool operator!=(zone_allocator const& other) const {
     return zone_ != other.zone_;
   }
 
index 4998cbf..b0ff7b6 100644 (file)
@@ -6,7 +6,9 @@
 #define V8_ZONE_CONTAINERS_H_
 
 #include <deque>
+#include <list>
 #include <queue>
+#include <stack>
 #include <vector>
 
 #include "src/zone-allocator.h"
@@ -17,48 +19,75 @@ namespace internal {
 // A wrapper subclass for std::vector to make it easy to construct one
 // that uses a zone allocator.
 template <typename T>
-class ZoneVector : public std::vector<T, zone_allocator<T> > {
+class ZoneVector : public std::vector<T, zone_allocator<T>> {
  public:
   // Constructs an empty vector.
   explicit ZoneVector(Zone* zone)
-      : std::vector<T, zone_allocator<T> >(zone_allocator<T>(zone)) {}
+      : std::vector<T, zone_allocator<T>>(zone_allocator<T>(zone)) {}
 
   // Constructs a new vector and fills it with {size} elements, each
   // constructed via the default constructor.
   ZoneVector(int size, Zone* zone)
-      : std::vector<T, zone_allocator<T> >(size, T(), zone_allocator<T>(zone)) {
-  }
+      : std::vector<T, zone_allocator<T>>(size, T(), zone_allocator<T>(zone)) {}
 
   // Constructs a new vector and fills it with {size} elements, each
   // having the value {def}.
   ZoneVector(int size, T def, Zone* zone)
-      : std::vector<T, zone_allocator<T> >(size, def, zone_allocator<T>(zone)) {
-  }
+      : std::vector<T, zone_allocator<T>>(size, def, zone_allocator<T>(zone)) {}
 };
 
+
 // A wrapper subclass std::deque to make it easy to construct one
 // that uses a zone allocator.
 template <typename T>
-class ZoneDeque : public std::deque<T, zone_allocator<T> > {
+class ZoneDeque : public std::deque<T, zone_allocator<T>> {
  public:
+  // Constructs an empty deque.
   explicit ZoneDeque(Zone* zone)
-      : std::deque<T, zone_allocator<T> >(zone_allocator<T>(zone)) {}
+      : std::deque<T, zone_allocator<T>>(zone_allocator<T>(zone)) {}
+};
+
+
+// A wrapper subclass std::list to make it easy to construct one
+// that uses a zone allocator.
+// TODO(mstarzinger): This should be renamed to ZoneList once we got rid of our
+// own home-grown ZoneList that actually is a ZoneVector.
+template <typename T>
+class ZoneLinkedList : public std::list<T, zone_allocator<T>> {
+ public:
+  // Constructs an empty list.
+  explicit ZoneLinkedList(Zone* zone)
+      : std::list<T, zone_allocator<T>>(zone_allocator<T>(zone)) {}
 };
 
+
 // A wrapper subclass for std::queue to make it easy to construct one
 // that uses a zone allocator.
 template <typename T>
-class ZoneQueue : public std::queue<T, std::deque<T, zone_allocator<T> > > {
+class ZoneQueue : public std::queue<T, ZoneDeque<T>> {
  public:
   // Constructs an empty queue.
   explicit ZoneQueue(Zone* zone)
-      : std::queue<T, std::deque<T, zone_allocator<T> > >(
-            std::deque<T, zone_allocator<T> >(zone_allocator<T>(zone))) {}
+      : std::queue<T, ZoneDeque<T>>(ZoneDeque<T>(zone)) {}
 };
 
+
+// A wrapper subclass for std::stack to make it easy to construct one that uses
+// a zone allocator.
+template <typename T>
+class ZoneStack : public std::stack<T, ZoneDeque<T>> {
+ public:
+  // Constructs an empty stack.
+  explicit ZoneStack(Zone* zone)
+      : std::stack<T, ZoneDeque<T>>(ZoneDeque<T>(zone)) {}
+};
+
+
 // Typedefs to shorten commonly used vectors.
 typedef ZoneVector<bool> BoolVector;
 typedef ZoneVector<int> IntVector;
-} }  // namespace v8::internal
+
+}  // namespace internal
+}  // namespace v8
 
 #endif  // V8_ZONE_CONTAINERS_H_
index cf037b5..63efe16 100644 (file)
@@ -31,7 +31,6 @@ bool Zone::excess_allocation() {
 
 void Zone::adjust_segment_bytes_allocated(int delta) {
   segment_bytes_allocated_ += delta;
-  isolate_->counters()->zone_segment_bytes()->Set(segment_bytes_allocated_);
 }
 
 
index b05f0a7..4d1c467 100644 (file)
         'compiler/test-js-context-specialization.cc',
         'compiler/test-js-constant-cache.cc',
         'compiler/test-js-typed-lowering.cc',
+        'compiler/test-jump-threading.cc',
         'compiler/test-linkage.cc',
         'compiler/test-loop-assignment-analysis.cc',
+        'compiler/test-loop-analysis.cc',
         'compiler/test-machine-operator-reducer.cc',
         'compiler/test-node-algorithm.cc',
         'compiler/test-node-cache.cc',
         'compiler/test-node.cc',
         'compiler/test-operator.cc',
-        'compiler/test-phi-reducer.cc',
         'compiler/test-pipeline.cc',
         'compiler/test-representation-change.cc',
         'compiler/test-run-deopt.cc',
         'test-transitions.cc',
         'test-types.cc',
         'test-unbound-queue.cc',
+        'test-unboxed-doubles.cc',
         'test-unique.cc',
         'test-unscopables-hidden-prototype.cc',
         'test-utils.cc',
           # cctest can't be built against a shared library, so we need to
           # depend on the underlying static target in that case.
           'conditions': [
-            ['v8_use_snapshot=="true"', {
+            ['v8_use_snapshot=="true" and v8_use_external_startup_data==0', {
               'dependencies': ['../../tools/gyp/v8.gyp:v8_snapshot'],
-            },
-            {
-              'dependencies': [
-                '../../tools/gyp/v8.gyp:v8_nosnapshot',
-              ],
+            }],
+            ['v8_use_snapshot=="true" and v8_use_external_startup_data==1', {
+              'dependencies': ['../../tools/gyp/v8.gyp:v8_external_snapshot'],
+            }],
+            ['v8_use_snapshot!="true"', {
+              'dependencies': ['../../tools/gyp/v8.gyp:v8_nosnapshot'],
             }],
           ],
         }, {
             '../../tools/js2c.py',
             '<@(_outputs)',
             'TEST',  # type
-            'off',  # compression
             '<@(file_list)',
           ],
         }
index 0b76995..cc5414d 100644 (file)
   ##############################################################################
   # TurboFan compiler failures.
 
-  # TODO(sigurds): The schedule is borked with multiple inlinees,
-  # and cannot handle free-floating loops yet
-  'test-run-inlining/InlineTwiceDependentDiamond': [SKIP],
-  'test-run-inlining/InlineTwiceDependentDiamondDifferent': [SKIP],
-  'test-run-inlining/InlineLoop': [SKIP],
-
   # Some tests are just too slow to run for now.
   'test-api/Threading*': [PASS, NO_VARIANTS],
   'test-heap/IncrementalMarkingStepMakesBigProgressWithLargeObjects': [PASS, NO_VARIANTS],
   'test-heap-profiler/ManyLocalsInSharedContext': [PASS, NO_VARIANTS],
   'test-debug/ThreadedDebugging': [PASS, NO_VARIANTS],
   'test-debug/DebugBreakLoop': [PASS, NO_VARIANTS],
-
-  # Support for breakpoints requires using LoadICs and StoreICs.
-  'test-debug/BreakPointICStore': [PASS, NO_VARIANTS],
-  'test-debug/BreakPointICLoad': [PASS, NO_VARIANTS],
-  'test-debug/BreakPointICCall': [PASS, NO_VARIANTS],
-  'test-debug/BreakPointICCallWithGC': [PASS, NO_VARIANTS],
-  'test-debug/BreakPointConstructCallWithGC': [PASS, NO_VARIANTS],
-  'test-debug/BreakPointReturn': [PASS, NO_VARIANTS],
-  'test-debug/BreakPointThroughJavaScript': [PASS, NO_VARIANTS],
-  'test-debug/ScriptBreakPointByNameThroughJavaScript': [PASS, NO_VARIANTS],
-  'test-debug/ScriptBreakPointByIdThroughJavaScript': [PASS, NO_VARIANTS],
-  'test-debug/DebugStepLinear': [PASS, NO_VARIANTS],
-  'test-debug/DebugStepKeyedLoadLoop': [PASS, NO_VARIANTS],
-  'test-debug/DebugStepKeyedStoreLoop': [PASS, NO_VARIANTS],
-  'test-debug/DebugStepNamedLoadLoop': [PASS, NO_VARIANTS],
-  'test-debug/DebugStepNamedStoreLoop': [PASS, NO_VARIANTS],
-  'test-debug/DebugStepLinearMixedICs': [PASS, NO_VARIANTS],
-  'test-debug/DebugStepDeclarations': [PASS, NO_VARIANTS],
-  'test-debug/DebugStepLocals': [PASS, NO_VARIANTS],
-  'test-debug/DebugStepIf': [PASS, NO_VARIANTS],
-  'test-debug/DebugStepSwitch': [PASS, NO_VARIANTS],
-  'test-debug/DebugStepWhile': [PASS, NO_VARIANTS],
-  'test-debug/DebugStepDoWhile': [PASS, NO_VARIANTS],
-  'test-debug/DebugStepFor': [PASS, NO_VARIANTS],
-  'test-debug/DebugStepForContinue': [PASS, NO_VARIANTS],
-  'test-debug/DebugStepForBreak': [PASS, NO_VARIANTS],
-  'test-debug/DebugStepForIn': [PASS, NO_VARIANTS],
-  'test-debug/DebugStepWith': [PASS, NO_VARIANTS],
-  'test-debug/DebugConditional': [PASS, NO_VARIANTS],
-  'test-debug/StepInOutSimple': [PASS, NO_VARIANTS],
-  'test-debug/StepInOutTree': [PASS, NO_VARIANTS],
-  'test-debug/StepInOutBranch': [PASS, NO_VARIANTS],
-  'test-debug/DebugBreak': [PASS, NO_VARIANTS],
-  'test-debug/DebugBreakStackInspection': [PASS, NO_VARIANTS],
-  'test-debug/BreakMessageWhenMessageHandlerIsReset': [PASS, NO_VARIANTS],
-  'test-debug/NoDebugBreakInAfterCompileMessageHandler': [PASS, NO_VARIANTS],
-  'test-debug/DisableBreak': [PASS, NO_VARIANTS],
-  'test-debug/RegExpDebugBreak': [PASS, NO_VARIANTS],
-  'test-debug/DebugBreakFunctionApply': [PASS, NO_VARIANTS],
-  'test-debug/DeoptimizeDuringDebugBreak': [PASS, NO_VARIANTS],
+  # BUG(3742).
+  'test-mark-compact/MarkCompactCollector': [PASS, ['arch==arm', NO_VARIANTS]],
 
   # Support for %GetFrameDetails is missing and requires checkpoints.
   'test-api/Regress385349': [PASS, NO_VARIANTS],
   'test-debug/CallingContextIsNotDebugContext': [PASS, NO_VARIANTS],
   'test-debug/DebugEventContext': [PASS, NO_VARIANTS],
   'test-debug/DebugBreakInline': [PASS, NO_VARIANTS],
+  'test-debug/BreakMessageWhenMessageHandlerIsReset': [PASS, NO_VARIANTS],
+  'test-debug/DebugBreak': [PASS, NO_VARIANTS],
+  'test-debug/DebugBreakFunctionApply': [PASS, NO_VARIANTS],
+  'test-debug/DebugBreakStackInspection': [PASS, NO_VARIANTS],
+  'test-debug/DeoptimizeDuringDebugBreak': [PASS, NO_VARIANTS],
+  'test-debug/DisableBreak': [PASS, NO_VARIANTS],
+  'test-debug/NoDebugBreakInAfterCompileMessageHandler': [PASS, NO_VARIANTS],
+  'test-debug/RegExpDebugBreak': [PASS, NO_VARIANTS],
+
+  # TODO(titzer): Triggers bug in late control reduction.
+  'test-run-inlining/InlineLoopGuardedEmpty': [SKIP],
 
   ############################################################################
   # Slow tests.
   # TODO(mips-team): Currently fails on mips board.
   'test-simplified-lowering/RunNumberMultiply_TruncatingToUint32': [SKIP],
   'test-parsing/TooManyArguments': [SKIP],
+  'test-api/Threading3': [SKIP],
+  'test-api/RequestInterruptTestWithNativeAccessor': [SKIP],
+  'test-api/RequestInterruptTestWithAccessor': [SKIP],
 }],  # 'arch == mips'
 
 ##############################################################################
   'test-log/LogAccessorCallbacks': [SKIP],
   'test-log/LogCallbacks': [SKIP],
   'test-log/ProfLazyMode': [SKIP],
-
-  # platform-tls.h does not contain an ANDROID-related header.
-  'test-platform-tls/FastTLS': [SKIP],
-
-  # This test times out.
-  'test-threads/ThreadJoinSelf': [SKIP],
 }],  # 'arch == android_arm or arch == android_ia32'
 
 ##############################################################################
index 60b1d25..cad171e 100644 (file)
@@ -207,6 +207,42 @@ class CallHelper {
         Simulator::CallArgument::End()};
     return ReturnValueTraits<R>::Cast(CallSimulator(FUNCTION_ADDR(f), args));
   }
+#elif USE_SIMULATOR && V8_TARGET_ARCH_MIPS64
+  uintptr_t CallSimulator(byte* f, int64_t p1 = 0, int64_t p2 = 0,
+                          int64_t p3 = 0, int64_t p4 = 0) {
+    Simulator* simulator = Simulator::current(isolate_);
+    return static_cast<uintptr_t>(simulator->Call(f, 4, p1, p2, p3, p4));
+  }
+
+  template <typename R, typename F>
+  R DoCall(F* f) {
+    return ReturnValueTraits<R>::Cast(CallSimulator(FUNCTION_ADDR(f)));
+  }
+  template <typename R, typename F, typename P1>
+  R DoCall(F* f, P1 p1) {
+    return ReturnValueTraits<R>::Cast(
+        CallSimulator(FUNCTION_ADDR(f), ParameterTraits<P1>::Cast(p1)));
+  }
+  template <typename R, typename F, typename P1, typename P2>
+  R DoCall(F* f, P1 p1, P2 p2) {
+    return ReturnValueTraits<R>::Cast(
+        CallSimulator(FUNCTION_ADDR(f), ParameterTraits<P1>::Cast(p1),
+                      ParameterTraits<P2>::Cast(p2)));
+  }
+  template <typename R, typename F, typename P1, typename P2, typename P3>
+  R DoCall(F* f, P1 p1, P2 p2, P3 p3) {
+    return ReturnValueTraits<R>::Cast(CallSimulator(
+        FUNCTION_ADDR(f), ParameterTraits<P1>::Cast(p1),
+        ParameterTraits<P2>::Cast(p2), ParameterTraits<P3>::Cast(p3)));
+  }
+  template <typename R, typename F, typename P1, typename P2, typename P3,
+            typename P4>
+  R DoCall(F* f, P1 p1, P2 p2, P3 p3, P4 p4) {
+    return ReturnValueTraits<R>::Cast(CallSimulator(
+        FUNCTION_ADDR(f), ParameterTraits<P1>::Cast(p1),
+        ParameterTraits<P2>::Cast(p2), ParameterTraits<P3>::Cast(p3),
+        ParameterTraits<P4>::Cast(p4)));
+  }
 #elif USE_SIMULATOR && (V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS)
   uintptr_t CallSimulator(byte* f, int32_t p1 = 0, int32_t p2 = 0,
                           int32_t p3 = 0, int32_t p4 = 0) {
index 51970cc..5311001 100644 (file)
@@ -4,7 +4,6 @@
 
 #include "src/v8.h"
 
-#include "src/compiler/generic-node-inl.h"
 #include "test/cctest/cctest.h"
 #include "test/cctest/compiler/codegen-tester.h"
 #include "test/cctest/compiler/value-helper.h"
index 72d658f..283d533 100644 (file)
@@ -68,10 +68,8 @@ class MachineAssemblerTester : public HandleAndZoneScope,
       Schedule* schedule = this->Export();
       CallDescriptor* call_descriptor = this->call_descriptor();
       Graph* graph = this->graph();
-      CompilationInfo info(graph->zone()->isolate(), graph->zone());
-      Linkage linkage(graph->zone(), call_descriptor);
-      Pipeline pipeline(&info);
-      code_ = pipeline.GenerateCodeForMachineGraph(&linkage, graph, schedule);
+      code_ =
+          Pipeline::GenerateCodeForTesting(call_descriptor, graph, schedule);
     }
     return this->code_.ToHandleChecked()->entry();
   }
index 600f6a3..7e16eea 100644 (file)
@@ -216,9 +216,7 @@ class FunctionTester : public InitializedHandleScope {
     CHECK(Compiler::Analyze(&info));
     CHECK(Compiler::EnsureDeoptimizationSupport(&info));
 
-    Pipeline pipeline(&info);
-    Linkage linkage(info.zone(), &info);
-    Handle<Code> code = pipeline.GenerateCodeForMachineGraph(&linkage, graph);
+    Handle<Code> code = Pipeline::GenerateCodeForTesting(&info, graph);
     CHECK(!code.is_null());
     function->ReplaceCode(*code);
     return function;
index 9c4379c..b0f470b 100644 (file)
@@ -35,11 +35,9 @@ byte* MachineCallHelper::Generate() {
   if (!Pipeline::SupportedBackend()) return NULL;
   if (code_.is_null()) {
     Zone* zone = graph_->zone();
-    CompilationInfo info(zone->isolate(), zone);
-    Linkage linkage(zone,
-                    Linkage::GetSimplifiedCDescriptor(zone, machine_sig_));
-    Pipeline pipeline(&info);
-    code_ = pipeline.GenerateCodeForMachineGraph(&linkage, graph_);
+    CallDescriptor* desc =
+        Linkage::GetSimplifiedCDescriptor(zone, machine_sig_);
+    code_ = Pipeline::GenerateCodeForTesting(desc, graph_);
   }
   return code_.ToHandleChecked()->entry();
 }
index 1bc5be7..772de4d 100644 (file)
@@ -61,6 +61,7 @@ class GraphAndBuilders {
   explicit GraphAndBuilders(Zone* zone)
       : main_graph_(new (zone) Graph(zone)),
         main_common_(zone),
+        main_machine_(zone),
         main_simplified_(zone) {}
 
  protected:
index 8d6844f..baa03fb 100644 (file)
@@ -5,7 +5,6 @@
 #include "test/cctest/compiler/simplified-graph-builder.h"
 
 #include "src/compiler/operator-properties.h"
-#include "src/compiler/operator-properties-inl.h"
 
 namespace v8 {
 namespace internal {
@@ -46,7 +45,7 @@ void SimplifiedGraphBuilder::End() {
 Node* SimplifiedGraphBuilder::MakeNode(const Operator* op,
                                        int value_input_count,
                                        Node** value_inputs, bool incomplete) {
-  DCHECK(op->InputCount() == value_input_count);
+  DCHECK(op->ValueInputCount() == value_input_count);
 
   DCHECK(!OperatorProperties::HasContextInput(op));
   DCHECK(!OperatorProperties::HasFrameStateInput(op));
index ad062e6..537094a 100644 (file)
@@ -127,14 +127,12 @@ class SimplifiedGraphBuilder : public GraphBuilder {
   Node* StoreField(const FieldAccess& access, Node* object, Node* value) {
     return NewNode(simplified()->StoreField(access), object, value);
   }
-  Node* LoadElement(const ElementAccess& access, Node* object, Node* index,
-                    Node* length) {
-    return NewNode(simplified()->LoadElement(access), object, index, length);
+  Node* LoadElement(const ElementAccess& access, Node* object, Node* index) {
+    return NewNode(simplified()->LoadElement(access), object, index);
   }
   Node* StoreElement(const ElementAccess& access, Node* object, Node* index,
-                     Node* length, Node* value) {
-    return NewNode(simplified()->StoreElement(access), object, index, length,
-                   value);
+                     Node* value) {
+    return NewNode(simplified()->StoreElement(access), object, index, value);
   }
 
  protected:
index dd96499..703fc17 100644 (file)
@@ -5,7 +5,6 @@
 #include "src/v8.h"
 
 #include "src/basic-block-profiler.h"
-#include "src/compiler/generic-node-inl.h"
 #include "test/cctest/cctest.h"
 #include "test/cctest/compiler/codegen-tester.h"
 
index 52590c0..cd3472d 100644 (file)
@@ -4,7 +4,6 @@
 
 #include "src/v8.h"
 
-#include "src/compiler/generic-node-inl.h"
 #include "test/cctest/cctest.h"
 #include "test/cctest/compiler/codegen-tester.h"
 #include "test/cctest/compiler/value-helper.h"
index 02ba4a7..5795754 100644 (file)
@@ -6,12 +6,12 @@
 
 #include "src/compiler/change-lowering.h"
 #include "src/compiler/control-builders.h"
-#include "src/compiler/generic-node-inl.h"
 #include "src/compiler/js-graph.h"
 #include "src/compiler/node-properties-inl.h"
 #include "src/compiler/pipeline.h"
 #include "src/compiler/select-lowering.h"
 #include "src/compiler/simplified-lowering.h"
+#include "src/compiler/typer.h"
 #include "src/compiler/verifier.h"
 #include "src/execution.h"
 #include "src/globals.h"
@@ -127,9 +127,11 @@ class ChangesLoweringTester : public GraphBuilderTester<ReturnType> {
     // Run the graph reducer with changes lowering on a single node.
     CompilationInfo info(this->isolate(), this->zone());
     Linkage linkage(this->zone(), &info);
+    Typer typer(this->graph(), info.context());
+    typer.Run();
     ChangeLowering change_lowering(&jsgraph, &linkage);
     SelectLowering select_lowering(this->graph(), this->common());
-    GraphReducer reducer(this->graph());
+    GraphReducer reducer(this->graph(), this->zone());
     reducer.AddReducer(&change_lowering);
     reducer.AddReducer(&select_lowering);
     reducer.ReduceNode(change);
index 974b423..56afe7b 100644 (file)
@@ -56,46 +56,14 @@ class DeoptCodegenTester {
     graph = new (scope_->main_zone()) Graph(scope_->main_zone());
   }
 
-  virtual ~DeoptCodegenTester() { delete code; }
+  virtual ~DeoptCodegenTester() {}
 
   void GenerateCodeFromSchedule(Schedule* schedule) {
     OFStream os(stdout);
     if (FLAG_trace_turbo) {
       os << *schedule;
     }
-
-    // Initialize the codegen and generate code.
-    Linkage* linkage = new (scope_->main_zone()) Linkage(info.zone(), &info);
-    InstructionBlocks* instruction_blocks =
-        TestInstrSeq::InstructionBlocksFor(scope_->main_zone(), schedule);
-    code = new TestInstrSeq(scope_->main_zone(), instruction_blocks);
-    SourcePositionTable source_positions(graph);
-    InstructionSelector selector(scope_->main_zone(), graph, linkage, code,
-                                 schedule, &source_positions);
-    selector.SelectInstructions();
-
-    if (FLAG_trace_turbo) {
-      PrintableInstructionSequence printable = {
-          RegisterConfiguration::ArchDefault(), code};
-      os << "----- Instruction sequence before register allocation -----\n"
-         << printable;
-    }
-
-    Frame frame;
-    RegisterAllocator allocator(RegisterConfiguration::ArchDefault(),
-                                scope_->main_zone(), &frame, code);
-    CHECK(allocator.Allocate());
-
-    if (FLAG_trace_turbo) {
-      PrintableInstructionSequence printable = {
-          RegisterConfiguration::ArchDefault(), code};
-      os << "----- Instruction sequence after register allocation -----\n"
-         << printable;
-    }
-
-    compiler::CodeGenerator generator(&frame, linkage, code, &info);
-    result_code = generator.GenerateCode();
-
+    result_code = Pipeline::GenerateCodeForTesting(&info, graph, schedule);
 #ifdef OBJECT_PRINT
     if (FLAG_print_opt_code || FLAG_trace_turbo) {
       result_code->Print();
index 67fdb68..03aa50b 100644 (file)
@@ -130,11 +130,8 @@ class ControlReducerTester : HandleAndZoneScope {
   }
 
   Node* SetSelfReferences(Node* node) {
-    Node::Inputs inputs = node->inputs();
-    for (Node::Inputs::iterator iter(inputs.begin()); iter != inputs.end();
-         ++iter) {
-      Node* input = *iter;
-      if (input == self) node->ReplaceInput(iter.index(), node);
+    for (Edge edge : node->input_edges()) {
+      if (edge.to() == self) node->ReplaceInput(edge.index(), node);
     }
     return node;
   }
@@ -772,7 +769,7 @@ TEST(CMergeReduce_edit_phi1) {
                                 R.leaf[1], R.leaf[2], merge);
     R.ReduceMerge(merge, merge);
     CHECK_EQ(IrOpcode::kPhi, phi->opcode());
-    CHECK_EQ(2, phi->op()->InputCount());
+    CHECK_EQ(2, phi->op()->ValueInputCount());
     CHECK_EQ(3, phi->InputCount());
     CHECK_EQ(R.leaf[i < 1 ? 1 : 0], phi->InputAt(0));
     CHECK_EQ(R.leaf[i < 2 ? 2 : 1], phi->InputAt(1));
@@ -791,7 +788,7 @@ TEST(CMergeReduce_edit_effect_phi1) {
                                 R.leaf[2], merge);
     R.ReduceMerge(merge, merge);
     CHECK_EQ(IrOpcode::kEffectPhi, phi->opcode());
-    CHECK_EQ(0, phi->op()->InputCount());
+    CHECK_EQ(0, phi->op()->ValueInputCount());
     CHECK_EQ(2, phi->op()->EffectInputCount());
     CHECK_EQ(3, phi->InputCount());
     CHECK_EQ(R.leaf[i < 1 ? 1 : 0], phi->InputAt(0));
@@ -883,7 +880,7 @@ TEST(CMergeReduce_exhaustive_4) {
       selector.CheckNode(ephi, IrOpcode::kEffectPhi, effects, merge);
       CHECK_EQ(phi, phi_use->InputAt(0));
       CHECK_EQ(ephi, ephi_use->InputAt(0));
-      CHECK_EQ(count, phi->op()->InputCount());
+      CHECK_EQ(count, phi->op()->ValueInputCount());
       CHECK_EQ(count + 1, phi->InputCount());
       CHECK_EQ(count, ephi->op()->EffectInputCount());
       CHECK_EQ(count + 1, ephi->InputCount());
@@ -909,7 +906,7 @@ TEST(CMergeReduce_edit_many_phis1) {
     for (int j = 0; j < kPhiCount; j++) {
       Node* phi = phis[j];
       CHECK_EQ(IrOpcode::kPhi, phi->opcode());
-      CHECK_EQ(2, phi->op()->InputCount());
+      CHECK_EQ(2, phi->op()->ValueInputCount());
       CHECK_EQ(3, phi->InputCount());
       CHECK_EQ(R.leaf[i < 1 ? 1 : 0], phi->InputAt(0));
       CHECK_EQ(R.leaf[i < 2 ? 2 : 1], phi->InputAt(1));
index 7f217d7..70b57b9 100644 (file)
@@ -5,7 +5,6 @@
 #include "src/v8.h"
 
 #include "graph-tester.h"
-#include "src/compiler/generic-node-inl.h"
 #include "src/compiler/graph-reducer.h"
 
 using namespace v8::internal;
@@ -202,7 +201,7 @@ TEST(ReduceGraphFromEnd1) {
   Node* end = graph.NewNode(&OPA1, n1);
   graph.SetEnd(end);
 
-  GraphReducer reducer(&graph);
+  GraphReducer reducer(&graph, graph.zone());
   ReducerRecorder recorder(graph.zone());
   reducer.AddReducer(&recorder);
   reducer.ReduceGraph();
@@ -220,7 +219,7 @@ TEST(ReduceGraphFromEnd2) {
   Node* end = graph.NewNode(&OPA2, n2, n3);
   graph.SetEnd(end);
 
-  GraphReducer reducer(&graph);
+  GraphReducer reducer(&graph, graph.zone());
   ReducerRecorder recorder(graph.zone());
   reducer.AddReducer(&recorder);
   reducer.ReduceGraph();
@@ -238,7 +237,7 @@ TEST(ReduceInPlace1) {
   Node* end = graph.NewNode(&OPA1, n1);
   graph.SetEnd(end);
 
-  GraphReducer reducer(&graph);
+  GraphReducer reducer(&graph, graph.zone());
   InPlaceABReducer r;
   reducer.AddReducer(&r);
 
@@ -263,7 +262,7 @@ TEST(ReduceInPlace2) {
   Node* end = graph.NewNode(&OPA2, n2, n3);
   graph.SetEnd(end);
 
-  GraphReducer reducer(&graph);
+  GraphReducer reducer(&graph, graph.zone());
   InPlaceABReducer r;
   reducer.AddReducer(&r);
 
@@ -293,7 +292,7 @@ TEST(ReduceNew1) {
   Node* end = graph.NewNode(&OPA2, n2, n3);
   graph.SetEnd(end);
 
-  GraphReducer reducer(&graph);
+  GraphReducer reducer(&graph, graph.zone());
   NewABReducer r(&graph);
   reducer.AddReducer(&r);
 
@@ -330,7 +329,7 @@ TEST(Wrapping1) {
   graph.SetEnd(end);
   CHECK_EQ(1, graph.NodeCount());
 
-  GraphReducer reducer(&graph);
+  GraphReducer reducer(&graph, graph.zone());
   A0Wrapper r(&graph);
   reducer.AddReducer(&r);
 
@@ -352,7 +351,7 @@ TEST(Wrapping2) {
   graph.SetEnd(end);
   CHECK_EQ(1, graph.NodeCount());
 
-  GraphReducer reducer(&graph);
+  GraphReducer reducer(&graph, graph.zone());
   B0Wrapper r(&graph);
   reducer.AddReducer(&r);
 
@@ -379,7 +378,7 @@ TEST(Forwarding1) {
   Node* end = graph.NewNode(&OPA1, n1);
   graph.SetEnd(end);
 
-  GraphReducer reducer(&graph);
+  GraphReducer reducer(&graph, graph.zone());
   A1Forwarder r;
   reducer.AddReducer(&r);
 
@@ -403,7 +402,7 @@ TEST(Forwarding2) {
   Node* end = graph.NewNode(&OPA2, n2, n3);
   graph.SetEnd(end);
 
-  GraphReducer reducer(&graph);
+  GraphReducer reducer(&graph, graph.zone());
   A1Forwarder r;
   reducer.AddReducer(&r);
 
@@ -434,7 +433,7 @@ TEST(Forwarding3) {
     }
     graph.SetEnd(end);
 
-    GraphReducer reducer(&graph);
+    GraphReducer reducer(&graph, graph.zone());
     A1Forwarder r;
     reducer.AddReducer(&r);
 
@@ -458,7 +457,7 @@ TEST(ReduceForward1) {
   Node* end = graph.NewNode(&OPA2, n2, n3);
   graph.SetEnd(end);
 
-  GraphReducer reducer(&graph);
+  GraphReducer reducer(&graph, graph.zone());
   InPlaceABReducer r;
   B1Forwarder f;
   reducer.AddReducer(&r);
@@ -501,7 +500,7 @@ TEST(Sorter1) {
 
     graph.SetEnd(end);
 
-    GraphReducer reducer(&graph);
+    GraphReducer reducer(&graph, graph.zone());
     reducer.AddReducer(&r);
 
     int before = graph.NodeCount();
@@ -560,7 +559,7 @@ TEST(SortForwardReduce) {
 
         GenDAG(&graph, p3, p2, p1);
 
-        GraphReducer reducer(&graph);
+        GraphReducer reducer(&graph, graph.zone());
         AB2Sorter r1;
         A1Forwarder r2;
         InPlaceABReducer r3;
@@ -599,7 +598,7 @@ TEST(Order) {
     Node* end = graph.NewNode(&OPA1, n1);
     graph.SetEnd(end);
 
-    GraphReducer reducer(&graph);
+    GraphReducer reducer(&graph, graph.zone());
     InPlaceABReducer abr;
     InPlaceBCReducer bcr;
     if (i == 0) {
index 9d394bb..ce3e6b7 100644 (file)
@@ -6,8 +6,6 @@
 #include "test/cctest/cctest.h"
 
 #include "src/compiler/common-operator.h"
-#include "src/compiler/generic-node-inl.h"
-#include "src/compiler/generic-node.h"
 #include "src/compiler/graph.h"
 #include "src/compiler/graph-visualizer.h"
 #include "src/compiler/js-operator.h"
index 425a46c..294812f 100644 (file)
@@ -33,10 +33,9 @@ class InstructionTester : public HandleAndZoneScope {
         info(static_cast<HydrogenCodeStub*>(NULL), main_isolate()),
         linkage(zone(), &info),
         common(zone()),
+        machine(zone()),
         code(NULL) {}
 
-  ~InstructionTester() { delete code; }
-
   Isolate* isolate;
   Graph graph;
   Schedule schedule;
@@ -51,13 +50,12 @@ class InstructionTester : public HandleAndZoneScope {
   void allocCode() {
     if (schedule.rpo_order()->size() == 0) {
       // Compute the RPO order.
-      ZonePool zone_pool(isolate);
-      Scheduler::ComputeSpecialRPO(&zone_pool, &schedule);
+      Scheduler::ComputeSpecialRPO(main_zone(), &schedule);
       DCHECK(schedule.rpo_order()->size() > 0);
     }
     InstructionBlocks* instruction_blocks =
         TestInstrSeq::InstructionBlocksFor(main_zone(), &schedule);
-    code = new TestInstrSeq(main_zone(), instruction_blocks);
+    code = new (main_zone()) TestInstrSeq(main_zone(), instruction_blocks);
   }
 
   Node* Int32Constant(int32_t val) {
index 2a591c1..8588f66 100644 (file)
@@ -22,7 +22,7 @@ class JSCacheTesterHelper {
         main_common_(zone),
         main_javascript_(zone),
         main_typer_(&main_graph_, MaybeHandle<Context>()),
-        main_machine_() {}
+        main_machine_(zone) {}
   Graph main_graph_;
   CommonOperatorBuilder main_common_;
   JSOperatorBuilder main_javascript_;
index 264fee0..fb7bd94 100644 (file)
@@ -21,7 +21,7 @@ class ContextSpecializationTester : public HandleAndZoneScope,
       : DirectGraphBuilder(new (main_zone()) Graph(main_zone())),
         common_(main_zone()),
         javascript_(main_zone()),
-        machine_(),
+        machine_(main_zone()),
         simplified_(main_zone()),
         jsgraph_(graph(), common(), &javascript_, &machine_),
         info_(main_isolate(), main_zone()) {}
@@ -203,7 +203,7 @@ TEST(SpecializeToContext) {
   JSContextSpecializer spec(t.info(), t.jsgraph(), const_context);
 
   {
-    // Check that SpecializeToContext() replaces values and forwards effects
+    // Check that specialization replaces values and forwards effects
     // correctly, and folds values from constant and non-constant contexts
     Node* effect_in = start;
     Node* load = t.NewNode(t.javascript()->LoadContext(0, slot, true),
@@ -229,8 +229,10 @@ TEST(SpecializeToContext) {
     CheckEffectInput(effect_in, load);
     CheckEffectInput(load, effect_use);
 
-    // Perform the substitution on the entire graph.
-    spec.SpecializeToContext();
+    // Perform the reduction on the entire graph.
+    GraphReducer graph_reducer(t.graph(), t.main_zone());
+    graph_reducer.AddReducer(&spec);
+    graph_reducer.ReduceGraph();
 
     // Effects should have been forwarded (not replaced with a value).
     CheckEffectInput(effect_in, effect_use);
index 28cc90a..70b1312 100644 (file)
@@ -2,14 +2,14 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/v8.h"
-#include "test/cctest/cctest.h"
-
 #include "src/compiler/graph-inl.h"
+#include "src/compiler/js-graph.h"
 #include "src/compiler/js-typed-lowering.h"
+#include "src/compiler/machine-operator.h"
 #include "src/compiler/node-properties-inl.h"
 #include "src/compiler/opcodes.h"
 #include "src/compiler/typer.h"
+#include "test/cctest/cctest.h"
 
 using namespace v8::internal;
 using namespace v8::internal::compiler;
@@ -21,6 +21,7 @@ class JSTypedLoweringTester : public HandleAndZoneScope {
         binop(NULL),
         unop(NULL),
         javascript(main_zone()),
+        machine(main_zone()),
         simplified(main_zone()),
         common(main_zone()),
         graph(main_zone()),
@@ -75,7 +76,7 @@ class JSTypedLoweringTester : public HandleAndZoneScope {
 
   Node* reduce(Node* node) {
     JSGraph jsgraph(&graph, &common, &javascript, &machine);
-    JSTypedLowering reducer(&jsgraph);
+    JSTypedLowering reducer(&jsgraph, main_zone());
     Reduction reduction = reducer.Reduce(node);
     if (reduction.Changed()) return reduction.replacement();
     return node;
@@ -166,17 +167,19 @@ static Type* kStringTypes[] = {Type::InternalizedString(), Type::OtherString(),
 
 
 static Type* kInt32Types[] = {
-    Type::UnsignedSmall(),   Type::OtherSignedSmall(), Type::OtherUnsigned31(),
-    Type::OtherUnsigned32(), Type::OtherSigned32(),    Type::SignedSmall(),
-    Type::Signed32(),        Type::Unsigned32(),       Type::Integral32()};
+    Type::UnsignedSmall(),       Type::NegativeSigned32(),
+    Type::NonNegativeSigned32(), Type::SignedSmall(),
+    Type::Signed32(),            Type::Unsigned32(),
+    Type::Integral32()};
 
 
 static Type* kNumberTypes[] = {
-    Type::UnsignedSmall(),   Type::OtherSignedSmall(), Type::OtherUnsigned31(),
-    Type::OtherUnsigned32(), Type::OtherSigned32(),    Type::SignedSmall(),
-    Type::Signed32(),        Type::Unsigned32(),       Type::Integral32(),
-    Type::MinusZero(),       Type::NaN(),              Type::OtherNumber(),
-    Type::OrderedNumber(),   Type::Number()};
+    Type::UnsignedSmall(),       Type::NegativeSigned32(),
+    Type::NonNegativeSigned32(), Type::SignedSmall(),
+    Type::Signed32(),            Type::Unsigned32(),
+    Type::Integral32(),          Type::MinusZero(),
+    Type::NaN(),                 Type::OrderedNumber(),
+    Type::PlainNumber(),         Type::Number()};
 
 
 static Type* kJSTypes[] = {Type::Undefined(), Type::Null(),   Type::Boolean(),
@@ -303,12 +306,13 @@ class JSBitwiseShiftTypedLoweringTester : public JSTypedLoweringTester {
 TEST(Int32BitwiseShifts) {
   JSBitwiseShiftTypedLoweringTester R;
 
-  Type* types[] = {
-      Type::SignedSmall(), Type::UnsignedSmall(), Type::OtherSigned32(),
-      Type::Unsigned32(),  Type::Signed32(),      Type::MinusZero(),
-      Type::NaN(),         Type::OtherNumber(),   Type::Undefined(),
-      Type::Null(),        Type::Boolean(),       Type::Number(),
-      Type::String()};
+  Type* types[] = {Type::SignedSmall(),      Type::UnsignedSmall(),
+                   Type::NegativeSigned32(), Type::NonNegativeSigned32(),
+                   Type::Unsigned32(),       Type::Signed32(),
+                   Type::MinusZero(),        Type::NaN(),
+                   Type::Undefined(),        Type::Null(),
+                   Type::Boolean(),          Type::Number(),
+                   Type::PlainNumber(),      Type::String()};
 
   for (size_t i = 0; i < arraysize(types); ++i) {
     Node* p0 = R.Parameter(types[i], 0);
@@ -326,9 +330,13 @@ TEST(Int32BitwiseShifts) {
 
         CheckToI32(p0, r0, R.signedness[k]);
 
-        R.CheckPureBinop(IrOpcode::kWord32And, r1);
-        CheckToI32(p1, r1->InputAt(0), R.signedness[k + 1]);
-        R.CheckInt32Constant(0x1F, r1->InputAt(1));
+        if (r1->opcode() == IrOpcode::kWord32And) {
+          R.CheckPureBinop(IrOpcode::kWord32And, r1);
+          CheckToI32(p1, r1->InputAt(0), R.signedness[k + 1]);
+          R.CheckInt32Constant(0x1F, r1->InputAt(1));
+        } else {
+          CheckToI32(p1, r1, R.signedness[k]);
+        }
       }
     }
   }
@@ -364,10 +372,11 @@ TEST(Int32BitwiseBinops) {
   JSBitwiseTypedLoweringTester R;
 
   Type* types[] = {
-      Type::SignedSmall(), Type::UnsignedSmall(), Type::Unsigned32(),
-      Type::Signed32(),    Type::MinusZero(),     Type::NaN(),
-      Type::OtherNumber(), Type::Undefined(),     Type::Null(),
-      Type::Boolean(),     Type::Number(),        Type::String()};
+      Type::SignedSmall(),   Type::UnsignedSmall(), Type::Unsigned32(),
+      Type::Signed32(),      Type::MinusZero(),     Type::NaN(),
+      Type::OrderedNumber(), Type::PlainNumber(),   Type::Undefined(),
+      Type::Null(),          Type::Boolean(),       Type::Number(),
+      Type::String()};
 
   for (size_t i = 0; i < arraysize(types); ++i) {
     Node* p0 = R.Parameter(types[i], 0);
@@ -542,12 +551,7 @@ TEST(JSToBoolean_replacement) {
 
   for (size_t i = 0; i < arraysize(types); i++) {
     Node* n = R.Parameter(types[i]);
-    Node* c = R.graph.NewNode(R.javascript.ToBoolean(), n, R.context(),
-                              R.start(), R.start());
-    Node* effect_use = R.UseForEffect(c);
-    Node* add = R.graph.NewNode(R.simplified.ReferenceEqual(Type::Any()), n, c);
-
-    R.CheckEffectInput(c, effect_use);
+    Node* c = R.graph.NewNode(R.javascript.ToBoolean(), n, R.context());
     Node* r = R.reduce(c);
 
     if (types[i]->Is(Type::Boolean())) {
@@ -557,10 +561,6 @@ TEST(JSToBoolean_replacement) {
     } else {
       CHECK_EQ(IrOpcode::kHeapConstant, r->opcode());
     }
-
-    CHECK_EQ(n, add->InputAt(0));
-    CHECK_EQ(r, add->InputAt(1));
-    R.CheckEffectInput(R.start(), effect_use);
   }
 }
 
@@ -752,15 +752,12 @@ TEST(UnaryNot) {
 
   for (size_t i = 0; i < arraysize(kJSTypes); i++) {
     Node* orig = R.Unop(opnot, R.Parameter(kJSTypes[i]));
-    Node* use = R.graph.NewNode(R.common.Return(), orig);
     Node* r = R.reduce(orig);
-    // TODO(titzer): test will break if/when js-typed-lowering constant folds.
-    CHECK_EQ(IrOpcode::kBooleanNot, use->InputAt(0)->opcode());
 
     if (r == orig && orig->opcode() == IrOpcode::kJSToBoolean) {
       // The original node was turned into a ToBoolean.
       CHECK_EQ(IrOpcode::kJSToBoolean, r->opcode());
-    } else {
+    } else if (r->opcode() != IrOpcode::kHeapConstant) {
       CHECK_EQ(IrOpcode::kBooleanNot, r->opcode());
     }
   }
@@ -815,7 +812,7 @@ TEST(RemoveToNumberEffects) {
     if (effect_use != NULL) {
       R.CheckEffectInput(R.start(), effect_use);
       // Check that value uses of ToNumber() do not go to start().
-      for (int i = 0; i < effect_use->op()->InputCount(); i++) {
+      for (int i = 0; i < effect_use->op()->ValueInputCount(); i++) {
         CHECK_NE(R.start(), effect_use->InputAt(i));
       }
     }
@@ -993,7 +990,7 @@ TEST(OrderNumberBinopEffects1) {
   };
 
   for (size_t j = 0; j < arraysize(ops); j += 2) {
-    BinopEffectsTester B(ops[j], Type::String(), Type::String());
+    BinopEffectsTester B(ops[j], Type::Symbol(), Type::Symbol());
     CHECK_EQ(ops[j + 1]->opcode(), B.result->op()->opcode());
 
     Node* i0 = B.CheckConvertedInput(IrOpcode::kJSToNumber, 0, true);
@@ -1066,8 +1063,8 @@ TEST(OrderCompareEffects) {
     CHECK_EQ(B.p1, i0->InputAt(0));
     CHECK_EQ(B.p0, i1->InputAt(0));
 
-    // But effects should be ordered start -> i1 -> i0 -> effect_use
-    B.CheckEffectOrdering(i1, i0);
+    // But effects should be ordered start -> i1 -> effect_use
+    B.CheckEffectOrdering(i1);
   }
 
   for (size_t j = 0; j < arraysize(ops); j += 2) {
@@ -1181,33 +1178,6 @@ TEST(Int32BinopEffects) {
 }
 
 
-TEST(UnaryNotEffects) {
-  JSTypedLoweringTester R;
-  const Operator* opnot = R.javascript.UnaryNot();
-
-  for (size_t i = 0; i < arraysize(kJSTypes); i++) {
-    Node* p0 = R.Parameter(kJSTypes[i], 0);
-    Node* orig = R.Unop(opnot, p0);
-    Node* effect_use = R.UseForEffect(orig);
-    Node* value_use = R.graph.NewNode(R.common.Return(), orig);
-    Node* r = R.reduce(orig);
-    // TODO(titzer): test will break if/when js-typed-lowering constant folds.
-    CHECK_EQ(IrOpcode::kBooleanNot, value_use->InputAt(0)->opcode());
-
-    if (r == orig && orig->opcode() == IrOpcode::kJSToBoolean) {
-      // The original node was turned into a ToBoolean, which has an effect.
-      CHECK_EQ(IrOpcode::kJSToBoolean, r->opcode());
-      R.CheckEffectInput(R.start(), orig);
-      R.CheckEffectInput(orig, effect_use);
-    } else {
-      // effect should have been removed from this node.
-      CHECK_EQ(IrOpcode::kBooleanNot, r->opcode());
-      R.CheckEffectInput(R.start(), effect_use);
-    }
-  }
-}
-
-
 TEST(Int32AddNarrowing) {
   {
     JSBitwiseTypedLoweringTester R;
@@ -1226,11 +1196,7 @@ TEST(Int32AddNarrowing) {
             Node* r = R.reduce(or_node);
 
             CHECK_EQ(R.ops[o + 1]->opcode(), r->op()->opcode());
-            CHECK_EQ(IrOpcode::kInt32Add, add_node->opcode());
-            bool is_signed = l ? R.signedness[o] : R.signedness[o + 1];
-
-            Type* add_type = NodeProperties::GetBounds(add_node).upper;
-            CHECK(add_type->Is(I32Type(is_signed)));
+            CHECK_EQ(IrOpcode::kNumberAdd, add_node->opcode());
           }
         }
       }
@@ -1253,40 +1219,33 @@ TEST(Int32AddNarrowing) {
             Node* r = R.reduce(or_node);
 
             CHECK_EQ(R.ops[o + 1]->opcode(), r->op()->opcode());
-            CHECK_EQ(IrOpcode::kInt32Add, add_node->opcode());
-            bool is_signed = l ? R.signedness[o] : R.signedness[o + 1];
-
-            Type* add_type = NodeProperties::GetBounds(add_node).upper;
-            CHECK(add_type->Is(I32Type(is_signed)));
+            CHECK_EQ(IrOpcode::kNumberAdd, add_node->opcode());
           }
         }
       }
     }
   }
-}
-
-
-TEST(Int32AddNarrowingNotOwned) {
-  JSBitwiseTypedLoweringTester R;
+  {
+    JSBitwiseTypedLoweringTester R;
 
-  for (int o = 0; o < R.kNumberOps; o += 2) {
-    Node* n0 = R.Parameter(I32Type(R.signedness[o]));
-    Node* n1 = R.Parameter(I32Type(R.signedness[o + 1]));
-    Node* one = R.graph.NewNode(R.common.NumberConstant(1));
-
-    Node* add_node = R.Binop(R.simplified.NumberAdd(), n0, n1);
-    Node* or_node = R.Binop(R.ops[o], add_node, one);
-    Node* other_use = R.Binop(R.simplified.NumberAdd(), add_node, one);
-    Node* r = R.reduce(or_node);
-    CHECK_EQ(R.ops[o + 1]->opcode(), r->op()->opcode());
-    // Should not be reduced to Int32Add because of the other number add.
-    CHECK_EQ(IrOpcode::kNumberAdd, add_node->opcode());
-    // Conversion to int32 should be done.
-    CheckToI32(add_node, r->InputAt(0), R.signedness[o]);
-    CheckToI32(one, r->InputAt(1), R.signedness[o + 1]);
-    // The other use should also not be touched.
-    CHECK_EQ(add_node, other_use->InputAt(0));
-    CHECK_EQ(one, other_use->InputAt(1));
+    for (int o = 0; o < R.kNumberOps; o += 2) {
+      Node* n0 = R.Parameter(I32Type(R.signedness[o]));
+      Node* n1 = R.Parameter(I32Type(R.signedness[o + 1]));
+      Node* one = R.graph.NewNode(R.common.NumberConstant(1));
+
+      Node* add_node = R.Binop(R.simplified.NumberAdd(), n0, n1);
+      Node* or_node = R.Binop(R.ops[o], add_node, one);
+      Node* other_use = R.Binop(R.simplified.NumberAdd(), add_node, one);
+      Node* r = R.reduce(or_node);
+      CHECK_EQ(R.ops[o + 1]->opcode(), r->op()->opcode());
+      CHECK_EQ(IrOpcode::kNumberAdd, add_node->opcode());
+      // Conversion to int32 should be done.
+      CheckToI32(add_node, r->InputAt(0), R.signedness[o]);
+      CheckToI32(one, r->InputAt(1), R.signedness[o + 1]);
+      // The other use should also not be touched.
+      CHECK_EQ(add_node, other_use->InputAt(0));
+      CHECK_EQ(one, other_use->InputAt(1));
+    }
   }
 }
 
diff --git a/deps/v8/test/cctest/compiler/test-jump-threading.cc b/deps/v8/test/cctest/compiler/test-jump-threading.cc
new file mode 100644 (file)
index 0000000..74bf43d
--- /dev/null
@@ -0,0 +1,764 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+#include "test/cctest/cctest.h"
+
+#include "src/compiler/instruction.h"
+#include "src/compiler/instruction-codes.h"
+#include "src/compiler/jump-threading.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+typedef BasicBlock::RpoNumber RpoNumber;
+
+class TestCode : public HandleAndZoneScope {
+ public:
+  TestCode()
+      : HandleAndZoneScope(),
+        blocks_(main_zone()),
+        sequence_(main_zone(), &blocks_),
+        rpo_number_(RpoNumber::FromInt(0)),
+        current_(NULL) {}
+
+  ZoneVector<InstructionBlock*> blocks_;
+  InstructionSequence sequence_;
+  RpoNumber rpo_number_;
+  InstructionBlock* current_;
+
+  int Jump(int target) {
+    Start();
+    InstructionOperand* ops[] = {UseRpo(target)};
+    sequence_.AddInstruction(Instruction::New(main_zone(), kArchJmp, 0, NULL, 1,
+                                              ops, 0, NULL)->MarkAsControl());
+    int pos = static_cast<int>(sequence_.instructions().size() - 1);
+    End();
+    return pos;
+  }
+  void Fallthru() {
+    Start();
+    End();
+  }
+  int Branch(int ttarget, int ftarget) {
+    Start();
+    InstructionOperand* ops[] = {UseRpo(ttarget), UseRpo(ftarget)};
+    InstructionCode code = 119 | FlagsModeField::encode(kFlags_branch) |
+                           FlagsConditionField::encode(kEqual);
+    sequence_.AddInstruction(Instruction::New(main_zone(), code, 0, NULL, 2,
+                                              ops, 0, NULL)->MarkAsControl());
+    int pos = static_cast<int>(sequence_.instructions().size() - 1);
+    End();
+    return pos;
+  }
+  void Nop() {
+    Start();
+    sequence_.AddInstruction(Instruction::New(main_zone(), kArchNop));
+  }
+  void RedundantMoves() {
+    Start();
+    sequence_.AddInstruction(Instruction::New(main_zone(), kArchNop));
+    int index = static_cast<int>(sequence_.instructions().size()) - 1;
+    sequence_.AddGapMove(index, RegisterOperand::Create(13, main_zone()),
+                         RegisterOperand::Create(13, main_zone()));
+  }
+  void NonRedundantMoves() {
+    Start();
+    sequence_.AddInstruction(Instruction::New(main_zone(), kArchNop));
+    int index = static_cast<int>(sequence_.instructions().size()) - 1;
+    sequence_.AddGapMove(index, ImmediateOperand::Create(11, main_zone()),
+                         RegisterOperand::Create(11, main_zone()));
+  }
+  void Other() {
+    Start();
+    sequence_.AddInstruction(Instruction::New(main_zone(), 155));
+  }
+  void End() {
+    Start();
+    sequence_.EndBlock(current_->rpo_number());
+    current_ = NULL;
+    rpo_number_ = RpoNumber::FromInt(rpo_number_.ToInt() + 1);
+  }
+  InstructionOperand* UseRpo(int num) {
+    int index = sequence_.AddImmediate(Constant(RpoNumber::FromInt(num)));
+    return ImmediateOperand::Create(index, main_zone());
+  }
+  void Start(bool deferred = false) {
+    if (current_ == NULL) {
+      current_ = new (main_zone()) InstructionBlock(
+          main_zone(), BasicBlock::Id::FromInt(rpo_number_.ToInt()),
+          rpo_number_, RpoNumber::Invalid(), RpoNumber::Invalid(), deferred);
+      blocks_.push_back(current_);
+      sequence_.StartBlock(rpo_number_);
+    }
+  }
+  void Defer() {
+    CHECK(current_ == NULL);
+    Start(true);
+  }
+};
+
+
+void VerifyForwarding(TestCode& code, int count, int* expected) {
+  Zone local_zone(code.main_isolate());
+  ZoneVector<RpoNumber> result(&local_zone);
+  JumpThreading::ComputeForwarding(&local_zone, result, &code.sequence_);
+
+  CHECK(count == static_cast<int>(result.size()));
+  for (int i = 0; i < count; i++) {
+    CHECK(expected[i] == result[i].ToInt());
+  }
+}
+
+
+TEST(FwEmpty1) {
+  TestCode code;
+
+  // B0
+  code.Jump(1);
+  // B1
+  code.Jump(2);
+  // B2
+  code.End();
+
+  static int expected[] = {2, 2, 2};
+  VerifyForwarding(code, 3, expected);
+}
+
+
+TEST(FwEmptyN) {
+  for (int i = 0; i < 9; i++) {
+    TestCode code;
+
+    // B0
+    code.Jump(1);
+    // B1
+    for (int j = 0; j < i; j++) code.Nop();
+    code.Jump(2);
+    // B2
+    code.End();
+
+    static int expected[] = {2, 2, 2};
+    VerifyForwarding(code, 3, expected);
+  }
+}
+
+
+TEST(FwNone1) {
+  TestCode code;
+
+  // B0
+  code.End();
+
+  static int expected[] = {0};
+  VerifyForwarding(code, 1, expected);
+}
+
+
+TEST(FwMoves1) {
+  TestCode code;
+
+  // B0
+  code.RedundantMoves();
+  code.End();
+
+  static int expected[] = {0};
+  VerifyForwarding(code, 1, expected);
+}
+
+
+TEST(FwMoves2) {
+  TestCode code;
+
+  // B0
+  code.RedundantMoves();
+  code.Fallthru();
+  // B1
+  code.End();
+
+  static int expected[] = {1, 1};
+  VerifyForwarding(code, 2, expected);
+}
+
+
+TEST(FwMoves2b) {
+  TestCode code;
+
+  // B0
+  code.NonRedundantMoves();
+  code.Fallthru();
+  // B1
+  code.End();
+
+  static int expected[] = {0, 1};
+  VerifyForwarding(code, 2, expected);
+}
+
+
+TEST(FwOther2) {
+  TestCode code;
+
+  // B0
+  code.Other();
+  code.Fallthru();
+  // B1
+  code.End();
+
+  static int expected[] = {0, 1};
+  VerifyForwarding(code, 2, expected);
+}
+
+
+TEST(FwNone2a) {
+  TestCode code;
+
+  // B0
+  code.Fallthru();
+  // B1
+  code.End();
+
+  static int expected[] = {1, 1};
+  VerifyForwarding(code, 2, expected);
+}
+
+
+TEST(FwNone2b) {
+  TestCode code;
+
+  // B0
+  code.Jump(1);
+  // B1
+  code.End();
+
+  static int expected[] = {1, 1};
+  VerifyForwarding(code, 2, expected);
+}
+
+
+TEST(FwLoop1) {
+  TestCode code;
+
+  // B0
+  code.Jump(0);
+
+  static int expected[] = {0};
+  VerifyForwarding(code, 1, expected);
+}
+
+
+TEST(FwLoop2) {
+  TestCode code;
+
+  // B0
+  code.Fallthru();
+  // B1
+  code.Jump(0);
+
+  static int expected[] = {0, 0};
+  VerifyForwarding(code, 2, expected);
+}
+
+
+TEST(FwLoop3) {
+  TestCode code;
+
+  // B0
+  code.Fallthru();
+  // B1
+  code.Fallthru();
+  // B2
+  code.Jump(0);
+
+  static int expected[] = {0, 0, 0};
+  VerifyForwarding(code, 3, expected);
+}
+
+
+TEST(FwLoop1b) {
+  TestCode code;
+
+  // B0
+  code.Fallthru();
+  // B1
+  code.Jump(1);
+
+  static int expected[] = {1, 1};
+  VerifyForwarding(code, 2, expected);
+}
+
+
+TEST(FwLoop2b) {
+  TestCode code;
+
+  // B0
+  code.Fallthru();
+  // B1
+  code.Fallthru();
+  // B2
+  code.Jump(1);
+
+  static int expected[] = {1, 1, 1};
+  VerifyForwarding(code, 3, expected);
+}
+
+
+TEST(FwLoop3b) {
+  TestCode code;
+
+  // B0
+  code.Fallthru();
+  // B1
+  code.Fallthru();
+  // B2
+  code.Fallthru();
+  // B3
+  code.Jump(1);
+
+  static int expected[] = {1, 1, 1, 1};
+  VerifyForwarding(code, 4, expected);
+}
+
+
+TEST(FwLoop2_1a) {
+  TestCode code;
+
+  // B0
+  code.Fallthru();
+  // B1
+  code.Fallthru();
+  // B2
+  code.Fallthru();
+  // B3
+  code.Jump(1);
+  // B4
+  code.Jump(2);
+
+  static int expected[] = {1, 1, 1, 1, 1};
+  VerifyForwarding(code, 5, expected);
+}
+
+
+TEST(FwLoop2_1b) {
+  TestCode code;
+
+  // B0
+  code.Fallthru();
+  // B1
+  code.Fallthru();
+  // B2
+  code.Jump(4);
+  // B3
+  code.Jump(1);
+  // B4
+  code.Jump(2);
+
+  static int expected[] = {2, 2, 2, 2, 2};
+  VerifyForwarding(code, 5, expected);
+}
+
+
+TEST(FwLoop2_1c) {
+  TestCode code;
+
+  // B0
+  code.Fallthru();
+  // B1
+  code.Fallthru();
+  // B2
+  code.Jump(4);
+  // B3
+  code.Jump(2);
+  // B4
+  code.Jump(1);
+
+  static int expected[] = {1, 1, 1, 1, 1};
+  VerifyForwarding(code, 5, expected);
+}
+
+
+TEST(FwLoop2_1d) {
+  TestCode code;
+
+  // B0
+  code.Fallthru();
+  // B1
+  code.Fallthru();
+  // B2
+  code.Jump(1);
+  // B3
+  code.Jump(1);
+  // B4
+  code.Jump(1);
+
+  static int expected[] = {1, 1, 1, 1, 1};
+  VerifyForwarding(code, 5, expected);
+}
+
+
+TEST(FwLoop3_1a) {
+  TestCode code;
+
+  // B0
+  code.Fallthru();
+  // B1
+  code.Fallthru();
+  // B2
+  code.Fallthru();
+  // B3
+  code.Jump(2);
+  // B4
+  code.Jump(1);
+  // B5
+  code.Jump(0);
+
+  static int expected[] = {2, 2, 2, 2, 2, 2};
+  VerifyForwarding(code, 6, expected);
+}
+
+
+TEST(FwDiamonds) {
+  for (int i = 0; i < 2; i++) {
+    for (int j = 0; j < 2; j++) {
+      TestCode code;
+      // B0
+      code.Branch(1, 2);
+      // B1
+      if (i) code.Other();
+      code.Jump(3);
+      // B2
+      if (j) code.Other();
+      code.Jump(3);
+      // B3
+      code.End();
+
+      int expected[] = {0, i ? 1 : 3, j ? 2 : 3, 3};
+      VerifyForwarding(code, 4, expected);
+    }
+  }
+}
+
+
+TEST(FwDiamonds2) {
+  for (int i = 0; i < 2; i++) {
+    for (int j = 0; j < 2; j++) {
+      for (int k = 0; k < 2; k++) {
+        TestCode code;
+        // B0
+        code.Branch(1, 2);
+        // B1
+        if (i) code.Other();
+        code.Jump(3);
+        // B2
+        if (j) code.Other();
+        code.Jump(3);
+        // B3
+        if (k) code.NonRedundantMoves();
+        code.Jump(4);
+        // B4
+        code.End();
+
+        int merge = k ? 3 : 4;
+        int expected[] = {0, i ? 1 : merge, j ? 2 : merge, merge, 4};
+        VerifyForwarding(code, 5, expected);
+      }
+    }
+  }
+}
+
+
+TEST(FwDoubleDiamonds) {
+  for (int i = 0; i < 2; i++) {
+    for (int j = 0; j < 2; j++) {
+      for (int x = 0; x < 2; x++) {
+        for (int y = 0; y < 2; y++) {
+          TestCode code;
+          // B0
+          code.Branch(1, 2);
+          // B1
+          if (i) code.Other();
+          code.Jump(3);
+          // B2
+          if (j) code.Other();
+          code.Jump(3);
+          // B3
+          code.Branch(4, 5);
+          // B4
+          if (x) code.Other();
+          code.Jump(6);
+          // B5
+          if (y) code.Other();
+          code.Jump(6);
+          // B6
+          code.End();
+
+          int expected[] = {0,         i ? 1 : 3, j ? 2 : 3, 3,
+                            x ? 4 : 6, y ? 5 : 6, 6};
+          VerifyForwarding(code, 7, expected);
+        }
+      }
+    }
+  }
+}
+
+template <int kSize>
+void RunPermutationsRecursive(int outer[kSize], int start,
+                              void (*run)(int*, int)) {
+  int permutation[kSize];
+
+  for (int i = 0; i < kSize; i++) permutation[i] = outer[i];
+
+  int count = kSize - start;
+  if (count == 0) return run(permutation, kSize);
+  for (int i = start; i < kSize; i++) {
+    permutation[start] = outer[i];
+    permutation[i] = outer[start];
+    RunPermutationsRecursive<kSize>(permutation, start + 1, run);
+    permutation[i] = outer[i];
+    permutation[start] = outer[start];
+  }
+}
+
+
+template <int kSize>
+void RunAllPermutations(void (*run)(int*, int)) {
+  int permutation[kSize];
+  for (int i = 0; i < kSize; i++) permutation[i] = i;
+  RunPermutationsRecursive<kSize>(permutation, 0, run);
+}
+
+
+void PrintPermutation(int* permutation, int size) {
+  printf("{ ");
+  for (int i = 0; i < size; i++) {
+    if (i > 0) printf(", ");
+    printf("%d", permutation[i]);
+  }
+  printf(" }\n");
+}
+
+
+int find(int x, int* permutation, int size) {
+  for (int i = 0; i < size; i++) {
+    if (permutation[i] == x) return i;
+  }
+  return size;
+}
+
+
+void RunPermutedChain(int* permutation, int size) {
+  TestCode code;
+  int cur = -1;
+  for (int i = 0; i < size; i++) {
+    code.Jump(find(cur + 1, permutation, size) + 1);
+    cur = permutation[i];
+  }
+  code.Jump(find(cur + 1, permutation, size) + 1);
+  code.End();
+
+  int expected[] = {size + 1, size + 1, size + 1, size + 1,
+                    size + 1, size + 1, size + 1};
+  VerifyForwarding(code, size + 2, expected);
+}
+
+
+TEST(FwPermuted_chain) {
+  RunAllPermutations<3>(RunPermutedChain);
+  RunAllPermutations<4>(RunPermutedChain);
+  RunAllPermutations<5>(RunPermutedChain);
+}
+
+
+void RunPermutedDiamond(int* permutation, int size) {
+  TestCode code;
+  int br = 1 + find(0, permutation, size);
+  code.Jump(br);
+  for (int i = 0; i < size; i++) {
+    switch (permutation[i]) {
+      case 0:
+        code.Branch(1 + find(1, permutation, size),
+                    1 + find(2, permutation, size));
+        break;
+      case 1:
+        code.Jump(1 + find(3, permutation, size));
+        break;
+      case 2:
+        code.Jump(1 + find(3, permutation, size));
+        break;
+      case 3:
+        code.Jump(5);
+        break;
+    }
+  }
+  code.End();
+
+  int expected[] = {br, 5, 5, 5, 5, 5};
+  expected[br] = br;
+  VerifyForwarding(code, 6, expected);
+}
+
+
+TEST(FwPermuted_diamond) { RunAllPermutations<4>(RunPermutedDiamond); }
+
+
+void ApplyForwarding(TestCode& code, int size, int* forward) {
+  ZoneVector<RpoNumber> vector(code.main_zone());
+  for (int i = 0; i < size; i++) {
+    vector.push_back(RpoNumber::FromInt(forward[i]));
+  }
+  JumpThreading::ApplyForwarding(vector, &code.sequence_);
+}
+
+
+void CheckJump(TestCode& code, int pos, int target) {
+  Instruction* instr = code.sequence_.InstructionAt(pos);
+  CHECK_EQ(kArchJmp, instr->arch_opcode());
+  CHECK_EQ(1, static_cast<int>(instr->InputCount()));
+  CHECK_EQ(0, static_cast<int>(instr->OutputCount()));
+  CHECK_EQ(0, static_cast<int>(instr->TempCount()));
+  CHECK_EQ(target, code.sequence_.InputRpo(instr, 0).ToInt());
+}
+
+
+void CheckNop(TestCode& code, int pos) {
+  Instruction* instr = code.sequence_.InstructionAt(pos);
+  CHECK_EQ(kArchNop, instr->arch_opcode());
+  CHECK_EQ(0, static_cast<int>(instr->InputCount()));
+  CHECK_EQ(0, static_cast<int>(instr->OutputCount()));
+  CHECK_EQ(0, static_cast<int>(instr->TempCount()));
+}
+
+
+void CheckBranch(TestCode& code, int pos, int t1, int t2) {
+  Instruction* instr = code.sequence_.InstructionAt(pos);
+  CHECK_EQ(2, static_cast<int>(instr->InputCount()));
+  CHECK_EQ(0, static_cast<int>(instr->OutputCount()));
+  CHECK_EQ(0, static_cast<int>(instr->TempCount()));
+  CHECK_EQ(t1, code.sequence_.InputRpo(instr, 0).ToInt());
+  CHECK_EQ(t2, code.sequence_.InputRpo(instr, 1).ToInt());
+}
+
+
+void CheckAssemblyOrder(TestCode& code, int size, int* expected) {
+  int i = 0;
+  for (auto const block : code.sequence_.instruction_blocks()) {
+    CHECK_EQ(expected[i++], block->ao_number().ToInt());
+  }
+}
+
+
+TEST(Rewire1) {
+  TestCode code;
+
+  // B0
+  int j1 = code.Jump(1);
+  // B1
+  int j2 = code.Jump(2);
+  // B2
+  code.End();
+
+  static int forward[] = {2, 2, 2};
+  ApplyForwarding(code, 3, forward);
+  CheckJump(code, j1, 2);
+  CheckNop(code, j2);
+
+  static int assembly[] = {0, 1, 1};
+  CheckAssemblyOrder(code, 3, assembly);
+}
+
+
+TEST(Rewire1_deferred) {
+  TestCode code;
+
+  // B0
+  int j1 = code.Jump(1);
+  // B1
+  int j2 = code.Jump(2);
+  // B2
+  code.Defer();
+  int j3 = code.Jump(3);
+  // B3
+  code.End();
+
+  static int forward[] = {3, 3, 3, 3};
+  ApplyForwarding(code, 4, forward);
+  CheckJump(code, j1, 3);
+  CheckNop(code, j2);
+  CheckNop(code, j3);
+
+  static int assembly[] = {0, 1, 2, 1};
+  CheckAssemblyOrder(code, 4, assembly);
+}
+
+
+TEST(Rewire2_deferred) {
+  TestCode code;
+
+  // B0
+  code.Other();
+  int j1 = code.Jump(1);
+  // B1
+  code.Defer();
+  code.Fallthru();
+  // B2
+  code.Defer();
+  int j2 = code.Jump(3);
+  // B3
+  code.End();
+
+  static int forward[] = {0, 1, 2, 3};
+  ApplyForwarding(code, 4, forward);
+  CheckJump(code, j1, 1);
+  CheckJump(code, j2, 3);
+
+  static int assembly[] = {0, 2, 3, 1};
+  CheckAssemblyOrder(code, 4, assembly);
+}
+
+
+TEST(Rewire_diamond) {
+  for (int i = 0; i < 2; i++) {
+    for (int j = 0; j < 2; j++) {
+      TestCode code;
+      // B0
+      int j1 = code.Jump(1);
+      // B1
+      int b1 = code.Branch(2, 3);
+      // B2
+      int j2 = code.Jump(4);
+      // B3
+      int j3 = code.Jump(4);
+      // B5
+      code.End();
+
+      int forward[] = {0, 1, i ? 4 : 2, j ? 4 : 3, 4};
+      ApplyForwarding(code, 5, forward);
+      CheckJump(code, j1, 1);
+      CheckBranch(code, b1, i ? 4 : 2, j ? 4 : 3);
+      if (i) {
+        CheckNop(code, j2);
+      } else {
+        CheckJump(code, j2, 4);
+      }
+      if (j) {
+        CheckNop(code, j3);
+      } else {
+        CheckJump(code, j3, 4);
+      }
+
+      int assembly[] = {0, 1, 2, 3, 4};
+      if (i) {
+        for (int k = 3; k < 5; k++) assembly[k]--;
+      }
+      if (j) {
+        for (int k = 4; k < 5; k++) assembly[k]--;
+      }
+      CheckAssemblyOrder(code, 5, assembly);
+    }
+  }
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
index 0d5ee8f..117caf2 100644 (file)
@@ -8,7 +8,6 @@
 #include "src/zone.h"
 
 #include "src/compiler/common-operator.h"
-#include "src/compiler/generic-node-inl.h"
 #include "src/compiler/graph.h"
 #include "src/compiler/linkage.h"
 #include "src/compiler/machine-operator.h"
@@ -65,8 +64,8 @@ TEST(TestLinkageJSFunctionIncoming) {
     CallDescriptor* descriptor = linkage.GetIncomingDescriptor();
     CHECK_NE(NULL, descriptor);
 
-    CHECK_EQ(1 + i, descriptor->JSParameterCount());
-    CHECK_EQ(1, descriptor->ReturnCount());
+    CHECK_EQ(1 + i, static_cast<int>(descriptor->JSParameterCount()));
+    CHECK_EQ(1, static_cast<int>(descriptor->ReturnCount()));
     CHECK_EQ(Operator::kNoProperties, descriptor->properties());
     CHECK_EQ(true, descriptor->IsJSFunctionCall());
   }
@@ -93,8 +92,8 @@ TEST(TestLinkageJSCall) {
     CallDescriptor* descriptor =
         linkage.GetJSCallDescriptor(i, CallDescriptor::kNoFlags);
     CHECK_NE(NULL, descriptor);
-    CHECK_EQ(i, descriptor->JSParameterCount());
-    CHECK_EQ(1, descriptor->ReturnCount());
+    CHECK_EQ(i, static_cast<int>(descriptor->JSParameterCount()));
+    CHECK_EQ(1, static_cast<int>(descriptor->ReturnCount()));
     CHECK_EQ(Operator::kNoProperties, descriptor->properties());
     CHECK_EQ(true, descriptor->IsJSFunctionCall());
   }
diff --git a/deps/v8/test/cctest/compiler/test-loop-analysis.cc b/deps/v8/test/cctest/compiler/test-loop-analysis.cc
new file mode 100644 (file)
index 0000000..9c11268
--- /dev/null
@@ -0,0 +1,862 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/v8.h"
+
+#include "src/compiler/access-builder.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/graph-visualizer.h"
+#include "src/compiler/js-graph.h"
+#include "src/compiler/js-operator.h"
+#include "src/compiler/loop-analysis.h"
+#include "src/compiler/node.h"
+#include "src/compiler/opcodes.h"
+#include "src/compiler/operator.h"
+#include "src/compiler/schedule.h"
+#include "src/compiler/scheduler.h"
+#include "src/compiler/simplified-operator.h"
+#include "src/compiler/verifier.h"
+#include "test/cctest/cctest.h"
+
+using namespace v8::internal;
+using namespace v8::internal::compiler;
+
+static Operator kIntAdd(IrOpcode::kInt32Add, Operator::kPure, "Int32Add", 2, 0,
+                        0, 1, 0, 0);
+static Operator kIntLt(IrOpcode::kInt32LessThan, Operator::kPure,
+                       "Int32LessThan", 2, 0, 0, 1, 0, 0);
+static Operator kStore(IrOpcode::kStore, Operator::kNoProperties, "Store", 0, 2,
+                       1, 0, 1, 0);
+
+static const int kNumLeafs = 4;
+
+// A helper for all tests dealing with LoopFinder.
+class LoopFinderTester : HandleAndZoneScope {
+ public:
+  LoopFinderTester()
+      : isolate(main_isolate()),
+        common(main_zone()),
+        graph(main_zone()),
+        jsgraph(&graph, &common, NULL, NULL),
+        start(graph.NewNode(common.Start(1))),
+        end(graph.NewNode(common.End(), start)),
+        p0(graph.NewNode(common.Parameter(0), start)),
+        zero(jsgraph.Int32Constant(0)),
+        one(jsgraph.OneConstant()),
+        half(jsgraph.Constant(0.5)),
+        self(graph.NewNode(common.Int32Constant(0xaabbccdd))),
+        dead(graph.NewNode(common.Dead())),
+        loop_tree(NULL) {
+    graph.SetEnd(end);
+    graph.SetStart(start);
+    leaf[0] = zero;
+    leaf[1] = one;
+    leaf[2] = half;
+    leaf[3] = p0;
+  }
+
+  Isolate* isolate;
+  CommonOperatorBuilder common;
+  Graph graph;
+  JSGraph jsgraph;
+  Node* start;
+  Node* end;
+  Node* p0;
+  Node* zero;
+  Node* one;
+  Node* half;
+  Node* self;
+  Node* dead;
+  Node* leaf[kNumLeafs];
+  LoopTree* loop_tree;
+
+  Node* Phi(Node* a) {
+    return SetSelfReferences(graph.NewNode(op(1, false), a, start));
+  }
+
+  Node* Phi(Node* a, Node* b) {
+    return SetSelfReferences(graph.NewNode(op(2, false), a, b, start));
+  }
+
+  Node* Phi(Node* a, Node* b, Node* c) {
+    return SetSelfReferences(graph.NewNode(op(3, false), a, b, c, start));
+  }
+
+  Node* Phi(Node* a, Node* b, Node* c, Node* d) {
+    return SetSelfReferences(graph.NewNode(op(4, false), a, b, c, d, start));
+  }
+
+  Node* EffectPhi(Node* a) {
+    return SetSelfReferences(graph.NewNode(op(1, true), a, start));
+  }
+
+  Node* EffectPhi(Node* a, Node* b) {
+    return SetSelfReferences(graph.NewNode(op(2, true), a, b, start));
+  }
+
+  Node* EffectPhi(Node* a, Node* b, Node* c) {
+    return SetSelfReferences(graph.NewNode(op(3, true), a, b, c, start));
+  }
+
+  Node* EffectPhi(Node* a, Node* b, Node* c, Node* d) {
+    return SetSelfReferences(graph.NewNode(op(4, true), a, b, c, d, start));
+  }
+
+  Node* SetSelfReferences(Node* node) {
+    for (Edge edge : node->input_edges()) {
+      if (edge.to() == self) node->ReplaceInput(edge.index(), node);
+    }
+    return node;
+  }
+
+  const Operator* op(int count, bool effect) {
+    return effect ? common.EffectPhi(count) : common.Phi(kMachAnyTagged, count);
+  }
+
+  Node* Return(Node* val, Node* effect, Node* control) {
+    Node* ret = graph.NewNode(common.Return(), val, effect, control);
+    end->ReplaceInput(0, ret);
+    return ret;
+  }
+
+  LoopTree* GetLoopTree() {
+    if (loop_tree == NULL) {
+      if (FLAG_trace_turbo_graph) {
+        OFStream os(stdout);
+        os << AsRPO(graph);
+      }
+      Zone zone(isolate);
+      loop_tree = LoopFinder::BuildLoopTree(&graph, &zone);
+    }
+    return loop_tree;
+  }
+
+  void CheckLoop(Node** header, int header_count, Node** body, int body_count) {
+    LoopTree* tree = GetLoopTree();
+    LoopTree::Loop* loop = tree->ContainingLoop(header[0]);
+    CHECK_NE(NULL, loop);
+
+    CHECK(header_count == static_cast<int>(loop->HeaderSize()));
+    for (int i = 0; i < header_count; i++) {
+      // Each header node should be in the loop.
+      CHECK_EQ(loop, tree->ContainingLoop(header[i]));
+      CheckRangeContains(tree->HeaderNodes(loop), header[i]);
+    }
+
+    CHECK_EQ(body_count, static_cast<int>(loop->BodySize()));
+    for (int i = 0; i < body_count; i++) {
+      // Each body node should be contained in the loop.
+      CHECK(tree->Contains(loop, body[i]));
+      CheckRangeContains(tree->BodyNodes(loop), body[i]);
+    }
+  }
+
+  void CheckRangeContains(NodeRange range, Node* node) {
+    // O(n) ftw.
+    CHECK_NE(range.end(), std::find(range.begin(), range.end(), node));
+  }
+
+  void CheckNestedLoops(Node** chain, int chain_count) {
+    LoopTree* tree = GetLoopTree();
+    for (int i = 0; i < chain_count; i++) {
+      Node* header = chain[i];
+      // Each header should be in a loop.
+      LoopTree::Loop* loop = tree->ContainingLoop(header);
+      CHECK_NE(NULL, loop);
+      // Check parentage.
+      LoopTree::Loop* parent =
+          i == 0 ? NULL : tree->ContainingLoop(chain[i - 1]);
+      CHECK_EQ(parent, loop->parent());
+      for (int j = i - 1; j >= 0; j--) {
+        // This loop should be nested inside all the outer loops.
+        Node* outer_header = chain[j];
+        LoopTree::Loop* outer = tree->ContainingLoop(outer_header);
+        CHECK(tree->Contains(outer, header));
+        CHECK(!tree->Contains(loop, outer_header));
+      }
+    }
+  }
+};
+
+
+struct While {
+  LoopFinderTester& t;
+  Node* branch;
+  Node* if_true;
+  Node* exit;
+  Node* loop;
+
+  While(LoopFinderTester& R, Node* cond) : t(R) {
+    loop = t.graph.NewNode(t.common.Loop(2), t.start, t.start);
+    branch = t.graph.NewNode(t.common.Branch(), cond, loop);
+    if_true = t.graph.NewNode(t.common.IfTrue(), branch);
+    exit = t.graph.NewNode(t.common.IfFalse(), branch);
+    loop->ReplaceInput(1, if_true);
+  }
+
+  void chain(Node* control) { loop->ReplaceInput(0, control); }
+  void nest(While& that) {
+    that.loop->ReplaceInput(1, exit);
+    this->loop->ReplaceInput(0, that.if_true);
+  }
+};
+
+
+struct Counter {
+  Node* base;
+  Node* inc;
+  Node* phi;
+  Node* add;
+
+  Counter(While& w, int32_t b, int32_t k)
+      : base(w.t.jsgraph.Int32Constant(b)), inc(w.t.jsgraph.Int32Constant(k)) {
+    Build(w);
+  }
+
+  Counter(While& w, Node* b, Node* k) : base(b), inc(k) { Build(w); }
+
+  void Build(While& w) {
+    phi = w.t.graph.NewNode(w.t.op(2, false), base, base, w.loop);
+    add = w.t.graph.NewNode(&kIntAdd, phi, inc);
+    phi->ReplaceInput(1, add);
+  }
+};
+
+
+struct StoreLoop {
+  Node* base;
+  Node* val;
+  Node* phi;
+  Node* store;
+
+  explicit StoreLoop(While& w)
+      : base(w.t.jsgraph.Int32Constant(12)),
+        val(w.t.jsgraph.Int32Constant(13)) {
+    Build(w);
+  }
+
+  StoreLoop(While& w, Node* b, Node* v) : base(b), val(v) { Build(w); }
+
+  void Build(While& w) {
+    phi = w.t.graph.NewNode(w.t.op(2, true), base, base, w.loop);
+    store = w.t.graph.NewNode(&kStore, phi, val, w.loop);
+    phi->ReplaceInput(1, store);
+  }
+};
+
+
+TEST(LaLoop1) {
+  // One loop.
+  LoopFinderTester t;
+  While w(t, t.p0);
+  t.Return(t.p0, t.start, w.exit);
+
+  Node* chain[] = {w.loop};
+  t.CheckNestedLoops(chain, 1);
+
+  Node* header[] = {w.loop};
+  Node* body[] = {w.branch, w.if_true};
+  t.CheckLoop(header, 1, body, 2);
+}
+
+
+TEST(LaLoop1c) {
+  // One loop with a counter.
+  LoopFinderTester t;
+  While w(t, t.p0);
+  Counter c(w, 0, 1);
+  t.Return(c.phi, t.start, w.exit);
+
+  Node* chain[] = {w.loop};
+  t.CheckNestedLoops(chain, 1);
+
+  Node* header[] = {w.loop, c.phi};
+  Node* body[] = {w.branch, w.if_true, c.add};
+  t.CheckLoop(header, 2, body, 3);
+}
+
+
+TEST(LaLoop1e) {
+  // One loop with an effect phi.
+  LoopFinderTester t;
+  While w(t, t.p0);
+  StoreLoop c(w);
+  t.Return(t.p0, c.phi, w.exit);
+
+  Node* chain[] = {w.loop};
+  t.CheckNestedLoops(chain, 1);
+
+  Node* header[] = {w.loop, c.phi};
+  Node* body[] = {w.branch, w.if_true, c.store};
+  t.CheckLoop(header, 2, body, 3);
+}
+
+
+TEST(LaLoop1d) {
+  // One loop with two counters.
+  LoopFinderTester t;
+  While w(t, t.p0);
+  Counter c1(w, 0, 1);
+  Counter c2(w, 1, 1);
+  t.Return(t.graph.NewNode(&kIntAdd, c1.phi, c2.phi), t.start, w.exit);
+
+  Node* chain[] = {w.loop};
+  t.CheckNestedLoops(chain, 1);
+
+  Node* header[] = {w.loop, c1.phi, c2.phi};
+  Node* body[] = {w.branch, w.if_true, c1.add, c2.add};
+  t.CheckLoop(header, 3, body, 4);
+}
+
+
+TEST(LaLoop2) {
+  // One loop following another.
+  LoopFinderTester t;
+  While w1(t, t.p0);
+  While w2(t, t.p0);
+  w2.chain(w1.exit);
+  t.Return(t.p0, t.start, w2.exit);
+
+  {
+    Node* chain[] = {w1.loop};
+    t.CheckNestedLoops(chain, 1);
+
+    Node* header[] = {w1.loop};
+    Node* body[] = {w1.branch, w1.if_true};
+    t.CheckLoop(header, 1, body, 2);
+  }
+
+  {
+    Node* chain[] = {w2.loop};
+    t.CheckNestedLoops(chain, 1);
+
+    Node* header[] = {w2.loop};
+    Node* body[] = {w2.branch, w2.if_true};
+    t.CheckLoop(header, 1, body, 2);
+  }
+}
+
+
+TEST(LaLoop2c) {
+  // One loop following another, each with counters.
+  LoopFinderTester t;
+  While w1(t, t.p0);
+  While w2(t, t.p0);
+  Counter c1(w1, 0, 1);
+  Counter c2(w2, 0, 1);
+  w2.chain(w1.exit);
+  t.Return(t.graph.NewNode(&kIntAdd, c1.phi, c2.phi), t.start, w2.exit);
+
+  {
+    Node* chain[] = {w1.loop};
+    t.CheckNestedLoops(chain, 1);
+
+    Node* header[] = {w1.loop, c1.phi};
+    Node* body[] = {w1.branch, w1.if_true, c1.add};
+    t.CheckLoop(header, 2, body, 3);
+  }
+
+  {
+    Node* chain[] = {w2.loop};
+    t.CheckNestedLoops(chain, 1);
+
+    Node* header[] = {w2.loop, c2.phi};
+    Node* body[] = {w2.branch, w2.if_true, c2.add};
+    t.CheckLoop(header, 2, body, 3);
+  }
+}
+
+
+TEST(LaLoop2cc) {
+  // One loop following another; second loop uses phi from first.
+  for (int i = 0; i < 8; i++) {
+    LoopFinderTester t;
+    While w1(t, t.p0);
+    While w2(t, t.p0);
+    Counter c1(w1, 0, 1);
+
+    // various usage scenarios for the second loop.
+    Counter c2(w2, i & 1 ? t.p0 : c1.phi, i & 2 ? t.p0 : c1.phi);
+    if (i & 3) w2.branch->ReplaceInput(0, c1.phi);
+
+    w2.chain(w1.exit);
+    t.Return(t.graph.NewNode(&kIntAdd, c1.phi, c2.phi), t.start, w2.exit);
+
+    {
+      Node* chain[] = {w1.loop};
+      t.CheckNestedLoops(chain, 1);
+
+      Node* header[] = {w1.loop, c1.phi};
+      Node* body[] = {w1.branch, w1.if_true, c1.add};
+      t.CheckLoop(header, 2, body, 3);
+    }
+
+    {
+      Node* chain[] = {w2.loop};
+      t.CheckNestedLoops(chain, 1);
+
+      Node* header[] = {w2.loop, c2.phi};
+      Node* body[] = {w2.branch, w2.if_true, c2.add};
+      t.CheckLoop(header, 2, body, 3);
+    }
+  }
+}
+
+
+TEST(LaNestedLoop1) {
+  // One loop nested in another.
+  LoopFinderTester t;
+  While w1(t, t.p0);
+  While w2(t, t.p0);
+  w2.nest(w1);
+  t.Return(t.p0, t.start, w1.exit);
+
+  Node* chain[] = {w1.loop, w2.loop};
+  t.CheckNestedLoops(chain, 2);
+
+  Node* h1[] = {w1.loop};
+  Node* b1[] = {w1.branch, w1.if_true, w2.loop, w2.branch, w2.if_true, w2.exit};
+  t.CheckLoop(h1, 1, b1, 6);
+
+  Node* h2[] = {w2.loop};
+  Node* b2[] = {w2.branch, w2.if_true};
+  t.CheckLoop(h2, 1, b2, 2);
+}
+
+
+TEST(LaNestedLoop1c) {
+  // One loop nested in another, each with a counter.
+  LoopFinderTester t;
+  While w1(t, t.p0);
+  While w2(t, t.p0);
+  Counter c1(w1, 0, 1);
+  Counter c2(w2, 0, 1);
+  w2.branch->ReplaceInput(0, c2.phi);
+  w2.nest(w1);
+  t.Return(c1.phi, t.start, w1.exit);
+
+  Node* chain[] = {w1.loop, w2.loop};
+  t.CheckNestedLoops(chain, 2);
+
+  Node* h1[] = {w1.loop, c1.phi};
+  Node* b1[] = {w1.branch, w1.if_true, w2.loop, w2.branch, w2.if_true,
+                w2.exit,   c2.phi,     c1.add,  c2.add};
+  t.CheckLoop(h1, 2, b1, 9);
+
+  Node* h2[] = {w2.loop, c2.phi};
+  Node* b2[] = {w2.branch, w2.if_true, c2.add};
+  t.CheckLoop(h2, 2, b2, 3);
+}
+
+
+TEST(LaNestedLoop2) {
+  // Two loops nested in an outer loop.
+  LoopFinderTester t;
+  While w1(t, t.p0);
+  While w2(t, t.p0);
+  While w3(t, t.p0);
+  w2.nest(w1);
+  w3.nest(w1);
+  w3.chain(w2.exit);
+  t.Return(t.p0, t.start, w1.exit);
+
+  Node* chain1[] = {w1.loop, w2.loop};
+  t.CheckNestedLoops(chain1, 2);
+
+  Node* chain2[] = {w1.loop, w3.loop};
+  t.CheckNestedLoops(chain2, 2);
+
+  Node* h1[] = {w1.loop};
+  Node* b1[] = {w1.branch, w1.if_true, w2.loop,   w2.branch,  w2.if_true,
+                w2.exit,   w3.loop,    w3.branch, w3.if_true, w3.exit};
+  t.CheckLoop(h1, 1, b1, 10);
+
+  Node* h2[] = {w2.loop};
+  Node* b2[] = {w2.branch, w2.if_true};
+  t.CheckLoop(h2, 1, b2, 2);
+
+  Node* h3[] = {w3.loop};
+  Node* b3[] = {w3.branch, w3.if_true};
+  t.CheckLoop(h3, 1, b3, 2);
+}
+
+
+TEST(LaNestedLoop3) {
+  // Three nested loops.
+  LoopFinderTester t;
+  While w1(t, t.p0);
+  While w2(t, t.p0);
+  While w3(t, t.p0);
+  w2.loop->ReplaceInput(0, w1.if_true);
+  w3.loop->ReplaceInput(0, w2.if_true);
+  w2.loop->ReplaceInput(1, w3.exit);
+  w1.loop->ReplaceInput(1, w2.exit);
+  t.Return(t.p0, t.start, w1.exit);
+
+  Node* chain[] = {w1.loop, w2.loop, w3.loop};
+  t.CheckNestedLoops(chain, 3);
+
+  Node* h1[] = {w1.loop};
+  Node* b1[] = {w1.branch, w1.if_true, w2.loop,   w2.branch,  w2.if_true,
+                w2.exit,   w3.loop,    w3.branch, w3.if_true, w3.exit};
+  t.CheckLoop(h1, 1, b1, 10);
+
+  Node* h2[] = {w2.loop};
+  Node* b2[] = {w2.branch, w2.if_true, w3.loop, w3.branch, w3.if_true, w3.exit};
+  t.CheckLoop(h2, 1, b2, 6);
+
+  Node* h3[] = {w3.loop};
+  Node* b3[] = {w3.branch, w3.if_true};
+  t.CheckLoop(h3, 1, b3, 2);
+}
+
+
+TEST(LaNestedLoop3c) {
+  // Three nested loops with counters.
+  LoopFinderTester t;
+  While w1(t, t.p0);
+  Counter c1(w1, 0, 1);
+  While w2(t, t.p0);
+  Counter c2(w2, 0, 1);
+  While w3(t, t.p0);
+  Counter c3(w3, 0, 1);
+  w2.loop->ReplaceInput(0, w1.if_true);
+  w3.loop->ReplaceInput(0, w2.if_true);
+  w2.loop->ReplaceInput(1, w3.exit);
+  w1.loop->ReplaceInput(1, w2.exit);
+  w1.branch->ReplaceInput(0, c1.phi);
+  w2.branch->ReplaceInput(0, c2.phi);
+  w3.branch->ReplaceInput(0, c3.phi);
+  t.Return(c1.phi, t.start, w1.exit);
+
+  Node* chain[] = {w1.loop, w2.loop, w3.loop};
+  t.CheckNestedLoops(chain, 3);
+
+  Node* h1[] = {w1.loop, c1.phi};
+  Node* b1[] = {w1.branch, w1.if_true, c1.add,    c2.add,     c2.add,
+                c2.phi,    c3.phi,     w2.loop,   w2.branch,  w2.if_true,
+                w2.exit,   w3.loop,    w3.branch, w3.if_true, w3.exit};
+  t.CheckLoop(h1, 2, b1, 15);
+
+  Node* h2[] = {w2.loop, c2.phi};
+  Node* b2[] = {w2.branch, w2.if_true, c2.add,     c3.add, c3.phi,
+                w3.loop,   w3.branch,  w3.if_true, w3.exit};
+  t.CheckLoop(h2, 2, b2, 9);
+
+  Node* h3[] = {w3.loop, c3.phi};
+  Node* b3[] = {w3.branch, w3.if_true, c3.add};
+  t.CheckLoop(h3, 2, b3, 3);
+}
+
+
+TEST(LaMultipleExit1) {
+  const int kMaxExits = 10;
+  Node* merge[1 + kMaxExits];
+  Node* body[2 * kMaxExits];
+
+  // A single loop with {i} exits.
+  for (int i = 1; i < kMaxExits; i++) {
+    LoopFinderTester t;
+    Node* cond = t.p0;
+
+    int merge_count = 0;
+    int body_count = 0;
+    Node* loop = t.graph.NewNode(t.common.Loop(2), t.start, t.start);
+    Node* last = loop;
+
+    for (int e = 0; e < i; e++) {
+      Node* branch = t.graph.NewNode(t.common.Branch(), cond, last);
+      Node* if_true = t.graph.NewNode(t.common.IfTrue(), branch);
+      Node* exit = t.graph.NewNode(t.common.IfFalse(), branch);
+      last = if_true;
+
+      body[body_count++] = branch;
+      body[body_count++] = if_true;
+      merge[merge_count++] = exit;
+    }
+
+    loop->ReplaceInput(1, last);  // form loop backedge.
+    Node* end = t.graph.NewNode(t.common.Merge(i), i, merge);  // form exit.
+    t.graph.SetEnd(end);
+
+    Node* h[] = {loop};
+    t.CheckLoop(h, 1, body, body_count);
+  }
+}
+
+
+TEST(LaMultipleBackedge1) {
+  const int kMaxBackedges = 10;
+  Node* loop_inputs[1 + kMaxBackedges];
+  Node* body[3 * kMaxBackedges];
+
+  // A single loop with {i} backedges.
+  for (int i = 1; i < kMaxBackedges; i++) {
+    LoopFinderTester t;
+
+    for (int j = 0; j <= i; j++) loop_inputs[j] = t.start;
+    Node* loop = t.graph.NewNode(t.common.Loop(1 + i), 1 + i, loop_inputs);
+
+    Node* cond = t.p0;
+    int body_count = 0;
+    Node* exit = loop;
+
+    for (int b = 0; b < i; b++) {
+      Node* branch = t.graph.NewNode(t.common.Branch(), cond, exit);
+      Node* if_true = t.graph.NewNode(t.common.IfTrue(), branch);
+      Node* if_false = t.graph.NewNode(t.common.IfFalse(), branch);
+      exit = if_false;
+
+      body[body_count++] = branch;
+      body[body_count++] = if_true;
+      if (b != (i - 1)) body[body_count++] = if_false;
+
+      loop->ReplaceInput(1 + b, if_true);
+    }
+
+    t.graph.SetEnd(exit);
+
+    Node* h[] = {loop};
+    t.CheckLoop(h, 1, body, body_count);
+  }
+}
+
+
+TEST(LaEdgeMatrix1) {
+  // Test various kinds of extra edges added to a simple loop.
+  for (int i = 0; i < 3; i++) {
+    for (int j = 0; j < 3; j++) {
+      for (int k = 0; k < 3; k++) {
+        LoopFinderTester t;
+
+        Node* p1 = t.jsgraph.Int32Constant(11);
+        Node* p2 = t.jsgraph.Int32Constant(22);
+        Node* p3 = t.jsgraph.Int32Constant(33);
+
+        Node* loop = t.graph.NewNode(t.common.Loop(2), t.start, t.start);
+        Node* phi =
+            t.graph.NewNode(t.common.Phi(kMachInt32, 2), t.one, p1, loop);
+        Node* cond = t.graph.NewNode(&kIntAdd, phi, p2);
+        Node* branch = t.graph.NewNode(t.common.Branch(), cond, loop);
+        Node* if_true = t.graph.NewNode(t.common.IfTrue(), branch);
+        Node* exit = t.graph.NewNode(t.common.IfFalse(), branch);
+        loop->ReplaceInput(1, if_true);
+        Node* ret = t.graph.NewNode(t.common.Return(), p3, t.start, exit);
+        t.graph.SetEnd(ret);
+
+        Node* choices[] = {p1, phi, cond};
+        p1->ReplaceUses(choices[i]);
+        p2->ReplaceUses(choices[j]);
+        p3->ReplaceUses(choices[k]);
+
+        Node* header[] = {loop, phi};
+        Node* body[] = {cond, branch, if_true};
+        t.CheckLoop(header, 2, body, 3);
+      }
+    }
+  }
+}
+
+
+void RunEdgeMatrix2(int i) {
+  DCHECK(i >= 0 && i < 5);
+  for (int j = 0; j < 5; j++) {
+    for (int k = 0; k < 5; k++) {
+      LoopFinderTester t;
+
+      Node* p1 = t.jsgraph.Int32Constant(11);
+      Node* p2 = t.jsgraph.Int32Constant(22);
+      Node* p3 = t.jsgraph.Int32Constant(33);
+
+      // outer loop.
+      Node* loop1 = t.graph.NewNode(t.common.Loop(2), t.start, t.start);
+      Node* phi1 =
+          t.graph.NewNode(t.common.Phi(kMachInt32, 2), t.one, p1, loop1);
+      Node* cond1 = t.graph.NewNode(&kIntAdd, phi1, t.one);
+      Node* branch1 = t.graph.NewNode(t.common.Branch(), cond1, loop1);
+      Node* if_true1 = t.graph.NewNode(t.common.IfTrue(), branch1);
+      Node* exit1 = t.graph.NewNode(t.common.IfFalse(), branch1);
+
+      // inner loop.
+      Node* loop2 = t.graph.NewNode(t.common.Loop(2), if_true1, t.start);
+      Node* phi2 =
+          t.graph.NewNode(t.common.Phi(kMachInt32, 2), t.one, p2, loop2);
+      Node* cond2 = t.graph.NewNode(&kIntAdd, phi2, p3);
+      Node* branch2 = t.graph.NewNode(t.common.Branch(), cond2, loop2);
+      Node* if_true2 = t.graph.NewNode(t.common.IfTrue(), branch2);
+      Node* exit2 = t.graph.NewNode(t.common.IfFalse(), branch2);
+      loop2->ReplaceInput(1, if_true2);
+      loop1->ReplaceInput(1, exit2);
+
+      Node* ret = t.graph.NewNode(t.common.Return(), phi1, t.start, exit1);
+      t.graph.SetEnd(ret);
+
+      Node* choices[] = {p1, phi1, cond1, phi2, cond2};
+      p1->ReplaceUses(choices[i]);
+      p2->ReplaceUses(choices[j]);
+      p3->ReplaceUses(choices[k]);
+
+      Node* header1[] = {loop1, phi1};
+      Node* body1[] = {cond1, branch1, if_true1, exit2,   loop2,
+                       phi2,  cond2,   branch2,  if_true2};
+      t.CheckLoop(header1, 2, body1, 9);
+
+      Node* header2[] = {loop2, phi2};
+      Node* body2[] = {cond2, branch2, if_true2};
+      t.CheckLoop(header2, 2, body2, 3);
+
+      Node* chain[] = {loop1, loop2};
+      t.CheckNestedLoops(chain, 2);
+    }
+  }
+}
+
+
+TEST(LaEdgeMatrix2_0) { RunEdgeMatrix2(0); }
+
+
+TEST(LaEdgeMatrix2_1) { RunEdgeMatrix2(1); }
+
+
+TEST(LaEdgeMatrix2_2) { RunEdgeMatrix2(2); }
+
+
+TEST(LaEdgeMatrix2_3) { RunEdgeMatrix2(3); }
+
+
+TEST(LaEdgeMatrix2_4) { RunEdgeMatrix2(4); }
+
+
+// Generates a triply-nested loop with extra edges between the phis and
+// conditions according to the edge choice parameters.
+void RunEdgeMatrix3(int c1a, int c1b, int c1c,    // line break
+                    int c2a, int c2b, int c2c,    // line break
+                    int c3a, int c3b, int c3c) {  // line break
+  LoopFinderTester t;
+
+  Node* p1a = t.jsgraph.Int32Constant(11);
+  Node* p1b = t.jsgraph.Int32Constant(22);
+  Node* p1c = t.jsgraph.Int32Constant(33);
+  Node* p2a = t.jsgraph.Int32Constant(44);
+  Node* p2b = t.jsgraph.Int32Constant(55);
+  Node* p2c = t.jsgraph.Int32Constant(66);
+  Node* p3a = t.jsgraph.Int32Constant(77);
+  Node* p3b = t.jsgraph.Int32Constant(88);
+  Node* p3c = t.jsgraph.Int32Constant(99);
+
+  // L1 depth = 0
+  Node* loop1 = t.graph.NewNode(t.common.Loop(2), t.start, t.start);
+  Node* phi1 = t.graph.NewNode(t.common.Phi(kMachInt32, 2), p1a, p1c, loop1);
+  Node* cond1 = t.graph.NewNode(&kIntAdd, phi1, p1b);
+  Node* branch1 = t.graph.NewNode(t.common.Branch(), cond1, loop1);
+  Node* if_true1 = t.graph.NewNode(t.common.IfTrue(), branch1);
+  Node* exit1 = t.graph.NewNode(t.common.IfFalse(), branch1);
+
+  // L2 depth = 1
+  Node* loop2 = t.graph.NewNode(t.common.Loop(2), if_true1, t.start);
+  Node* phi2 = t.graph.NewNode(t.common.Phi(kMachInt32, 2), p2a, p2c, loop2);
+  Node* cond2 = t.graph.NewNode(&kIntAdd, phi2, p2b);
+  Node* branch2 = t.graph.NewNode(t.common.Branch(), cond2, loop2);
+  Node* if_true2 = t.graph.NewNode(t.common.IfTrue(), branch2);
+  Node* exit2 = t.graph.NewNode(t.common.IfFalse(), branch2);
+
+  // L3 depth = 2
+  Node* loop3 = t.graph.NewNode(t.common.Loop(2), if_true2, t.start);
+  Node* phi3 = t.graph.NewNode(t.common.Phi(kMachInt32, 2), p3a, p3c, loop3);
+  Node* cond3 = t.graph.NewNode(&kIntAdd, phi3, p3b);
+  Node* branch3 = t.graph.NewNode(t.common.Branch(), cond3, loop3);
+  Node* if_true3 = t.graph.NewNode(t.common.IfTrue(), branch3);
+  Node* exit3 = t.graph.NewNode(t.common.IfFalse(), branch3);
+
+  loop3->ReplaceInput(1, if_true3);
+  loop2->ReplaceInput(1, exit3);
+  loop1->ReplaceInput(1, exit2);
+
+  Node* ret = t.graph.NewNode(t.common.Return(), phi1, t.start, exit1);
+  t.graph.SetEnd(ret);
+
+  // Mutate the graph according to the edge choices.
+
+  Node* o1[] = {t.one};
+  Node* o2[] = {t.one, phi1, cond1};
+  Node* o3[] = {t.one, phi1, cond1, phi2, cond2};
+
+  p1a->ReplaceUses(o1[c1a]);
+  p1b->ReplaceUses(o1[c1b]);
+
+  p2a->ReplaceUses(o2[c2a]);
+  p2b->ReplaceUses(o2[c2b]);
+
+  p3a->ReplaceUses(o3[c3a]);
+  p3b->ReplaceUses(o3[c3b]);
+
+  Node* l2[] = {phi1, cond1, phi2, cond2};
+  Node* l3[] = {phi1, cond1, phi2, cond2, phi3, cond3};
+
+  p1c->ReplaceUses(l2[c1c]);
+  p2c->ReplaceUses(l3[c2c]);
+  p3c->ReplaceUses(l3[c3c]);
+
+  // Run the tests and verify loop structure.
+
+  Node* chain[] = {loop1, loop2, loop3};
+  t.CheckNestedLoops(chain, 3);
+
+  Node* header1[] = {loop1, phi1};
+  Node* body1[] = {cond1, branch1, if_true1, exit2,    loop2,
+                   phi2,  cond2,   branch2,  if_true2, exit3,
+                   loop3, phi3,    cond3,    branch3,  if_true3};
+  t.CheckLoop(header1, 2, body1, 15);
+
+  Node* header2[] = {loop2, phi2};
+  Node* body2[] = {cond2, branch2, if_true2, exit3,   loop3,
+                   phi3,  cond3,   branch3,  if_true3};
+  t.CheckLoop(header2, 2, body2, 9);
+
+  Node* header3[] = {loop3, phi3};
+  Node* body3[] = {cond3, branch3, if_true3};
+  t.CheckLoop(header3, 2, body3, 3);
+}
+
+
+// Runs all combinations with a fixed {i}.
+void RunEdgeMatrix3_i(int i) {
+  for (int a = 0; a < 1; a++) {
+    for (int b = 0; b < 1; b++) {
+      for (int c = 0; c < 4; c++) {
+        for (int d = 0; d < 3; d++) {
+          for (int e = 0; e < 3; e++) {
+            for (int f = 0; f < 6; f++) {
+              for (int g = 0; g < 5; g++) {
+                for (int h = 0; h < 5; h++) {
+                  RunEdgeMatrix3(a, b, c, d, e, f, g, h, i);
+                }
+              }
+            }
+          }
+        }
+      }
+    }
+  }
+}
+
+
+// Test all possible legal triply-nested loops with conditions and phis.
+TEST(LaEdgeMatrix3_0) { RunEdgeMatrix3_i(0); }
+
+
+TEST(LaEdgeMatrix3_1) { RunEdgeMatrix3_i(1); }
+
+
+TEST(LaEdgeMatrix3_2) { RunEdgeMatrix3_i(2); }
+
+
+TEST(LaEdgeMatrix3_3) { RunEdgeMatrix3_i(3); }
+
+
+TEST(LaEdgeMatrix3_4) { RunEdgeMatrix3_i(4); }
+
+
+TEST(LaEdgeMatrix3_5) { RunEdgeMatrix3_i(5); }
index 115967a..648e1b9 100644 (file)
@@ -10,7 +10,6 @@
 #include "src/compiler/js-graph.h"
 #include "src/compiler/machine-operator-reducer.h"
 #include "src/compiler/operator-properties.h"
-#include "src/compiler/operator-properties-inl.h"
 #include "src/compiler/typer.h"
 #include "test/cctest/compiler/value-helper.h"
 
@@ -52,10 +51,13 @@ double ValueOfOperator<double>(const Operator* op) {
 
 class ReducerTester : public HandleAndZoneScope {
  public:
-  explicit ReducerTester(int num_parameters = 0)
+  explicit ReducerTester(
+      int num_parameters = 0,
+      MachineOperatorBuilder::Flags flags = MachineOperatorBuilder::kNoFlags)
       : isolate(main_isolate()),
         binop(NULL),
         unop(NULL),
+        machine(main_zone(), kMachPtr, flags),
         common(main_zone()),
         graph(main_zone()),
         javascript(main_zone()),
@@ -357,7 +359,36 @@ TEST(ReduceWord32Sar) {
 }
 
 
-TEST(ReduceWord32Equal) {
+static void CheckJsShift(ReducerTester* R) {
+  DCHECK(R->machine.Word32ShiftIsSafe());
+
+  Node* x = R->Parameter(0);
+  Node* y = R->Parameter(1);
+  Node* thirty_one = R->Constant<int32_t>(0x1f);
+  Node* y_and_thirty_one =
+      R->graph.NewNode(R->machine.Word32And(), y, thirty_one);
+
+  // If the underlying machine shift instructions 'and' their right operand
+  // with 0x1f then:  x << (y & 0x1f) => x << y
+  R->CheckFoldBinop(x, y, x, y_and_thirty_one);
+}
+
+
+TEST(ReduceJsShifts) {
+  ReducerTester R(0, MachineOperatorBuilder::kWord32ShiftIsSafe);
+
+  R.binop = R.machine.Word32Shl();
+  CheckJsShift(&R);
+
+  R.binop = R.machine.Word32Shr();
+  CheckJsShift(&R);
+
+  R.binop = R.machine.Word32Sar();
+  CheckJsShift(&R);
+}
+
+
+TEST(Word32Equal) {
   ReducerTester R;
   R.binop = R.machine.Word32Equal();
 
index e13baa8..842d182 100644 (file)
@@ -8,11 +8,10 @@
 
 #include "graph-tester.h"
 #include "src/compiler/common-operator.h"
-#include "src/compiler/generic-node.h"
-#include "src/compiler/generic-node-inl.h"
 #include "src/compiler/graph.h"
 #include "src/compiler/graph-inl.h"
 #include "src/compiler/graph-visualizer.h"
+#include "src/compiler/node.h"
 #include "src/compiler/operator.h"
 
 using namespace v8::internal;
@@ -41,36 +40,6 @@ class PostNodeVisitor : public NullNodeVisitor {
 };
 
 
-TEST(TestUseNodeVisitEmpty) {
-  GraphWithStartNodeTester graph;
-
-  PreNodeVisitor node_visitor;
-  graph.VisitNodeUsesFromStart(&node_visitor);
-
-  CHECK_EQ(1, static_cast<int>(node_visitor.nodes_.size()));
-}
-
-
-TEST(TestUseNodePreOrderVisitSimple) {
-  GraphWithStartNodeTester graph;
-  Node* n2 = graph.NewNode(&dummy_operator, graph.start());
-  Node* n3 = graph.NewNode(&dummy_operator, n2);
-  Node* n4 = graph.NewNode(&dummy_operator, n2, n3);
-  Node* n5 = graph.NewNode(&dummy_operator, n4, n2);
-  graph.SetEnd(n5);
-
-  PreNodeVisitor node_visitor;
-  graph.VisitNodeUsesFromStart(&node_visitor);
-
-  CHECK_EQ(5, static_cast<int>(node_visitor.nodes_.size()));
-  CHECK(graph.start()->id() == node_visitor.nodes_[0]->id());
-  CHECK(n2->id() == node_visitor.nodes_[1]->id());
-  CHECK(n3->id() == node_visitor.nodes_[2]->id());
-  CHECK(n4->id() == node_visitor.nodes_[3]->id());
-  CHECK(n5->id() == node_visitor.nodes_[4]->id());
-}
-
-
 TEST(TestInputNodePreOrderVisitSimple) {
   GraphWithStartNodeTester graph;
   Node* n2 = graph.NewNode(&dummy_operator, graph.start());
@@ -90,87 +59,6 @@ TEST(TestInputNodePreOrderVisitSimple) {
 }
 
 
-TEST(TestUseNodePostOrderVisitSimple) {
-  GraphWithStartNodeTester graph;
-  Node* n2 = graph.NewNode(&dummy_operator, graph.start());
-  Node* n3 = graph.NewNode(&dummy_operator, graph.start());
-  Node* n4 = graph.NewNode(&dummy_operator, n2);
-  Node* n5 = graph.NewNode(&dummy_operator, n2);
-  Node* n6 = graph.NewNode(&dummy_operator, n2);
-  Node* n7 = graph.NewNode(&dummy_operator, n3);
-  Node* end_dependencies[4] = {n4, n5, n6, n7};
-  Node* n8 = graph.NewNode(&dummy_operator, 4, end_dependencies);
-  graph.SetEnd(n8);
-
-  PostNodeVisitor node_visitor;
-  graph.VisitNodeUsesFromStart(&node_visitor);
-
-  CHECK_EQ(8, static_cast<int>(node_visitor.nodes_.size()));
-  CHECK(graph.end()->id() == node_visitor.nodes_[0]->id());
-  CHECK(n4->id() == node_visitor.nodes_[1]->id());
-  CHECK(n5->id() == node_visitor.nodes_[2]->id());
-  CHECK(n6->id() == node_visitor.nodes_[3]->id());
-  CHECK(n2->id() == node_visitor.nodes_[4]->id());
-  CHECK(n7->id() == node_visitor.nodes_[5]->id());
-  CHECK(n3->id() == node_visitor.nodes_[6]->id());
-  CHECK(graph.start()->id() == node_visitor.nodes_[7]->id());
-}
-
-
-TEST(TestUseNodePostOrderVisitLong) {
-  GraphWithStartNodeTester graph;
-  Node* n2 = graph.NewNode(&dummy_operator, graph.start());
-  Node* n3 = graph.NewNode(&dummy_operator, graph.start());
-  Node* n4 = graph.NewNode(&dummy_operator, n2);
-  Node* n5 = graph.NewNode(&dummy_operator, n2);
-  Node* n6 = graph.NewNode(&dummy_operator, n3);
-  Node* n7 = graph.NewNode(&dummy_operator, n3);
-  Node* n8 = graph.NewNode(&dummy_operator, n5);
-  Node* n9 = graph.NewNode(&dummy_operator, n5);
-  Node* n10 = graph.NewNode(&dummy_operator, n9);
-  Node* n11 = graph.NewNode(&dummy_operator, n9);
-  Node* end_dependencies[6] = {n4, n8, n10, n11, n6, n7};
-  Node* n12 = graph.NewNode(&dummy_operator, 6, end_dependencies);
-  graph.SetEnd(n12);
-
-  PostNodeVisitor node_visitor;
-  graph.VisitNodeUsesFromStart(&node_visitor);
-
-  CHECK_EQ(12, static_cast<int>(node_visitor.nodes_.size()));
-  CHECK(graph.end()->id() == node_visitor.nodes_[0]->id());
-  CHECK(n4->id() == node_visitor.nodes_[1]->id());
-  CHECK(n8->id() == node_visitor.nodes_[2]->id());
-  CHECK(n10->id() == node_visitor.nodes_[3]->id());
-  CHECK(n11->id() == node_visitor.nodes_[4]->id());
-  CHECK(n9->id() == node_visitor.nodes_[5]->id());
-  CHECK(n5->id() == node_visitor.nodes_[6]->id());
-  CHECK(n2->id() == node_visitor.nodes_[7]->id());
-  CHECK(n6->id() == node_visitor.nodes_[8]->id());
-  CHECK(n7->id() == node_visitor.nodes_[9]->id());
-  CHECK(n3->id() == node_visitor.nodes_[10]->id());
-  CHECK(graph.start()->id() == node_visitor.nodes_[11]->id());
-}
-
-
-TEST(TestUseNodePreOrderVisitCycle) {
-  GraphWithStartNodeTester graph;
-  Node* n0 = graph.start_node();
-  Node* n1 = graph.NewNode(&dummy_operator, n0);
-  Node* n2 = graph.NewNode(&dummy_operator, n1);
-  n0->AppendInput(graph.main_zone(), n2);
-  graph.SetStart(n0);
-  graph.SetEnd(n2);
-
-  PreNodeVisitor node_visitor;
-  graph.VisitNodeUsesFromStart(&node_visitor);
-
-  CHECK_EQ(3, static_cast<int>(node_visitor.nodes_.size()));
-  CHECK(n0->id() == node_visitor.nodes_[0]->id());
-  CHECK(n1->id() == node_visitor.nodes_[1]->id());
-  CHECK(n2->id() == node_visitor.nodes_[2]->id());
-}
-
-
 TEST(TestPrintNodeGraphToNodeGraphviz) {
   GraphWithStartNodeTester graph;
   Node* n2 = graph.NewNode(&dummy_operator, graph.start());
index 57bf7fa..a48adb9 100644 (file)
@@ -115,51 +115,6 @@ TEST(Int64Constant_hits) {
 }
 
 
-TEST(PtrConstant_back_to_back) {
-  GraphTester graph;
-  PtrNodeCache cache;
-  int32_t buffer[50];
-
-  for (int32_t* p = buffer;
-       (p - buffer) < static_cast<ptrdiff_t>(arraysize(buffer)); p++) {
-    Node** pos = cache.Find(graph.zone(), p);
-    CHECK_NE(NULL, pos);
-    for (int j = 0; j < 3; j++) {
-      Node** npos = cache.Find(graph.zone(), p);
-      CHECK_EQ(pos, npos);
-    }
-  }
-}
-
-
-TEST(PtrConstant_hits) {
-  GraphTester graph;
-  PtrNodeCache cache;
-  const int32_t kSize = 50;
-  int32_t buffer[kSize];
-  Node* nodes[kSize];
-  CommonOperatorBuilder common(graph.zone());
-
-  for (size_t i = 0; i < arraysize(buffer); i++) {
-    int k = static_cast<int>(i);
-    int32_t* p = &buffer[i];
-    nodes[i] = graph.NewNode(common.Int32Constant(k));
-    *cache.Find(graph.zone(), p) = nodes[i];
-  }
-
-  int hits = 0;
-  for (size_t i = 0; i < arraysize(buffer); i++) {
-    int32_t* p = &buffer[i];
-    Node** pos = cache.Find(graph.zone(), p);
-    if (*pos != NULL) {
-      CHECK_EQ(nodes[i], *pos);
-      hits++;
-    }
-  }
-  CHECK_LT(4, hits);
-}
-
-
 static bool Contains(NodeVector* nodes, Node* n) {
   for (size_t i = 0; i < nodes->size(); i++) {
     if (nodes->at(i) == n) return true;
index 5ac2e4a..eafabd3 100644 (file)
@@ -7,7 +7,6 @@
 #include "src/v8.h"
 
 #include "graph-tester.h"
-#include "src/compiler/generic-node-inl.h"
 #include "src/compiler/node.h"
 #include "src/compiler/operator.h"
 
@@ -93,10 +92,8 @@ TEST(NodeInputIteratorOne) {
 TEST(NodeUseIteratorEmpty) {
   GraphTester graph;
   Node* n1 = graph.NewNode(&dummy_operator);
-  Node::Uses::iterator i(n1->uses().begin());
   int use_count = 0;
-  for (; i != n1->uses().end(); ++i) {
-    Node::Edge edge(i.edge());
+  for (Edge const edge : n1->use_edges()) {
     USE(edge);
     use_count++;
   }
@@ -366,31 +363,31 @@ TEST(AppendInputsAndIterator) {
   Node* n1 = graph.NewNode(&dummy_operator, n0);
   Node* n2 = graph.NewNode(&dummy_operator, n0, n1);
 
-  Node::Inputs inputs(n2->inputs());
-  Node::Inputs::iterator current = inputs.begin();
+  Node::InputEdges inputs(n2->input_edges());
+  Node::InputEdges::iterator current = inputs.begin();
   CHECK(current != inputs.end());
-  CHECK(*current == n0);
+  CHECK((*current).to() == n0);
   ++current;
   CHECK(current != inputs.end());
-  CHECK(*current == n1);
+  CHECK((*current).to() == n1);
   ++current;
   CHECK(current == inputs.end());
 
   Node* n3 = graph.NewNode(&dummy_operator);
   n2->AppendInput(graph.zone(), n3);
-  inputs = n2->inputs();
+  inputs = n2->input_edges();
   current = inputs.begin();
   CHECK(current != inputs.end());
-  CHECK(*current == n0);
-  CHECK_EQ(0, current.index());
+  CHECK((*current).to() == n0);
+  CHECK_EQ(0, (*current).index());
   ++current;
   CHECK(current != inputs.end());
-  CHECK(*current == n1);
-  CHECK_EQ(1, current.index());
+  CHECK((*current).to() == n1);
+  CHECK_EQ(1, (*current).index());
   ++current;
   CHECK(current != inputs.end());
-  CHECK(*current == n3);
-  CHECK_EQ(2, current.index());
+  CHECK((*current).to() == n3);
+  CHECK_EQ(2, (*current).index());
   ++current;
   CHECK(current == inputs.end());
 }
diff --git a/deps/v8/test/cctest/compiler/test-phi-reducer.cc b/deps/v8/test/cctest/compiler/test-phi-reducer.cc
deleted file mode 100644 (file)
index 7d2fab6..0000000
+++ /dev/null
@@ -1,230 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
-#include "src/v8.h"
-#include "test/cctest/cctest.h"
-
-#include "src/compiler/common-operator.h"
-#include "src/compiler/graph-inl.h"
-#include "src/compiler/phi-reducer.h"
-
-using namespace v8::internal;
-using namespace v8::internal::compiler;
-
-class PhiReducerTester : HandleAndZoneScope {
- public:
-  explicit PhiReducerTester(int num_parameters = 0)
-      : isolate(main_isolate()),
-        common(main_zone()),
-        graph(main_zone()),
-        self(graph.NewNode(common.Start(num_parameters))),
-        dead(graph.NewNode(common.Dead())) {
-    graph.SetStart(self);
-  }
-
-  Isolate* isolate;
-  CommonOperatorBuilder common;
-  Graph graph;
-  Node* self;
-  Node* dead;
-
-  void CheckReduce(Node* expect, Node* phi) {
-    PhiReducer reducer;
-    Reduction reduction = reducer.Reduce(phi);
-    if (expect == phi) {
-      CHECK(!reduction.Changed());
-    } else {
-      CHECK(reduction.Changed());
-      CHECK_EQ(expect, reduction.replacement());
-    }
-  }
-
-  Node* Int32Constant(int32_t val) {
-    return graph.NewNode(common.Int32Constant(val));
-  }
-
-  Node* Float64Constant(double val) {
-    return graph.NewNode(common.Float64Constant(val));
-  }
-
-  Node* Parameter(int32_t index = 0) {
-    return graph.NewNode(common.Parameter(index), graph.start());
-  }
-
-  Node* Phi(Node* a) {
-    return SetSelfReferences(graph.NewNode(common.Phi(kMachAnyTagged, 1), a));
-  }
-
-  Node* Phi(Node* a, Node* b) {
-    return SetSelfReferences(
-        graph.NewNode(common.Phi(kMachAnyTagged, 2), a, b));
-  }
-
-  Node* Phi(Node* a, Node* b, Node* c) {
-    return SetSelfReferences(
-        graph.NewNode(common.Phi(kMachAnyTagged, 3), a, b, c));
-  }
-
-  Node* Phi(Node* a, Node* b, Node* c, Node* d) {
-    return SetSelfReferences(
-        graph.NewNode(common.Phi(kMachAnyTagged, 4), a, b, c, d));
-  }
-
-  Node* PhiWithControl(Node* a, Node* control) {
-    return SetSelfReferences(
-        graph.NewNode(common.Phi(kMachAnyTagged, 1), a, control));
-  }
-
-  Node* PhiWithControl(Node* a, Node* b, Node* control) {
-    return SetSelfReferences(
-        graph.NewNode(common.Phi(kMachAnyTagged, 2), a, b, control));
-  }
-
-  Node* SetSelfReferences(Node* node) {
-    Node::Inputs inputs = node->inputs();
-    for (Node::Inputs::iterator iter(inputs.begin()); iter != inputs.end();
-         ++iter) {
-      Node* input = *iter;
-      if (input == self) node->ReplaceInput(iter.index(), node);
-    }
-    return node;
-  }
-};
-
-
-TEST(PhiReduce1) {
-  PhiReducerTester R;
-  Node* zero = R.Int32Constant(0);
-  Node* one = R.Int32Constant(1);
-  Node* oneish = R.Float64Constant(1.1);
-  Node* param = R.Parameter();
-
-  Node* singles[] = {zero, one, oneish, param};
-  for (size_t i = 0; i < arraysize(singles); i++) {
-    R.CheckReduce(singles[i], R.Phi(singles[i]));
-  }
-}
-
-
-TEST(PhiReduce2) {
-  PhiReducerTester R;
-  Node* zero = R.Int32Constant(0);
-  Node* one = R.Int32Constant(1);
-  Node* oneish = R.Float64Constant(1.1);
-  Node* param = R.Parameter();
-
-  Node* singles[] = {zero, one, oneish, param};
-  for (size_t i = 0; i < arraysize(singles); i++) {
-    Node* a = singles[i];
-    R.CheckReduce(a, R.Phi(a, a));
-  }
-
-  for (size_t i = 0; i < arraysize(singles); i++) {
-    Node* a = singles[i];
-    R.CheckReduce(a, R.Phi(R.self, a));
-    R.CheckReduce(a, R.Phi(a, R.self));
-  }
-
-  for (size_t i = 1; i < arraysize(singles); i++) {
-    Node* a = singles[i], *b = singles[0];
-    Node* phi1 = R.Phi(b, a);
-    R.CheckReduce(phi1, phi1);
-
-    Node* phi2 = R.Phi(a, b);
-    R.CheckReduce(phi2, phi2);
-  }
-}
-
-
-TEST(PhiReduce3) {
-  PhiReducerTester R;
-  Node* zero = R.Int32Constant(0);
-  Node* one = R.Int32Constant(1);
-  Node* oneish = R.Float64Constant(1.1);
-  Node* param = R.Parameter();
-
-  Node* singles[] = {zero, one, oneish, param};
-  for (size_t i = 0; i < arraysize(singles); i++) {
-    Node* a = singles[i];
-    R.CheckReduce(a, R.Phi(a, a, a));
-  }
-
-  for (size_t i = 0; i < arraysize(singles); i++) {
-    Node* a = singles[i];
-    R.CheckReduce(a, R.Phi(R.self, a, a));
-    R.CheckReduce(a, R.Phi(a, R.self, a));
-    R.CheckReduce(a, R.Phi(a, a, R.self));
-  }
-
-  for (size_t i = 1; i < arraysize(singles); i++) {
-    Node* a = singles[i], *b = singles[0];
-    Node* phi1 = R.Phi(b, a, a);
-    R.CheckReduce(phi1, phi1);
-
-    Node* phi2 = R.Phi(a, b, a);
-    R.CheckReduce(phi2, phi2);
-
-    Node* phi3 = R.Phi(a, a, b);
-    R.CheckReduce(phi3, phi3);
-  }
-}
-
-
-TEST(PhiReduce4) {
-  PhiReducerTester R;
-  Node* zero = R.Int32Constant(0);
-  Node* one = R.Int32Constant(1);
-  Node* oneish = R.Float64Constant(1.1);
-  Node* param = R.Parameter();
-
-  Node* singles[] = {zero, one, oneish, param};
-  for (size_t i = 0; i < arraysize(singles); i++) {
-    Node* a = singles[i];
-    R.CheckReduce(a, R.Phi(a, a, a, a));
-  }
-
-  for (size_t i = 0; i < arraysize(singles); i++) {
-    Node* a = singles[i];
-    R.CheckReduce(a, R.Phi(R.self, a, a, a));
-    R.CheckReduce(a, R.Phi(a, R.self, a, a));
-    R.CheckReduce(a, R.Phi(a, a, R.self, a));
-    R.CheckReduce(a, R.Phi(a, a, a, R.self));
-
-    R.CheckReduce(a, R.Phi(R.self, R.self, a, a));
-    R.CheckReduce(a, R.Phi(a, R.self, R.self, a));
-    R.CheckReduce(a, R.Phi(a, a, R.self, R.self));
-    R.CheckReduce(a, R.Phi(R.self, a, a, R.self));
-  }
-
-  for (size_t i = 1; i < arraysize(singles); i++) {
-    Node* a = singles[i], *b = singles[0];
-    Node* phi1 = R.Phi(b, a, a, a);
-    R.CheckReduce(phi1, phi1);
-
-    Node* phi2 = R.Phi(a, b, a, a);
-    R.CheckReduce(phi2, phi2);
-
-    Node* phi3 = R.Phi(a, a, b, a);
-    R.CheckReduce(phi3, phi3);
-
-    Node* phi4 = R.Phi(a, a, a, b);
-    R.CheckReduce(phi4, phi4);
-  }
-}
-
-
-TEST(PhiReduceShouldIgnoreControlNodes) {
-  PhiReducerTester R;
-  Node* zero = R.Int32Constant(0);
-  Node* one = R.Int32Constant(1);
-  Node* oneish = R.Float64Constant(1.1);
-  Node* param = R.Parameter();
-
-  Node* singles[] = {zero, one, oneish, param};
-  for (size_t i = 0; i < arraysize(singles); ++i) {
-    R.CheckReduce(singles[i], R.PhiWithControl(singles[i], R.dead));
-    R.CheckReduce(singles[i], R.PhiWithControl(R.self, singles[i], R.dead));
-    R.CheckReduce(singles[i], R.PhiWithControl(singles[i], R.self, R.dead));
-  }
-}
index 8656b98..19b96ba 100644 (file)
@@ -25,7 +25,8 @@ static void AssertInlineCount(const v8::FunctionCallbackInfo<v8::Value>& args) {
     frames_seen++;
     it.Advance();
   }
-  CHECK_EQ(args[0]->ToInt32()->Value(), topmost->GetInlineCount());
+  CHECK_EQ(args[0]->ToInt32(args.GetIsolate())->Value(),
+           topmost->GetInlineCount());
 }
 
 
@@ -37,16 +38,20 @@ static void InstallAssertInlineCountHelper(v8::Isolate* isolate) {
 }
 
 
+static uint32_t kInlineFlags = CompilationInfo::kInliningEnabled |
+                               CompilationInfo::kContextSpecializing |
+                               CompilationInfo::kTypingEnabled;
+
+
 TEST(SimpleInlining) {
   FLAG_turbo_deoptimization = true;
   FunctionTester T(
       "(function(){"
-      "function foo(s) { AssertInlineCount(2); return s; };"
-      "function bar(s, t) { return foo(s); };"
-      "return bar;})();",
-      CompilationInfo::kInliningEnabled |
-          CompilationInfo::kContextSpecializing |
-          CompilationInfo::kTypingEnabled);
+      "  function foo(s) { AssertInlineCount(2); return s; };"
+      "  function bar(s, t) { return foo(s); };"
+      "  return bar;"
+      "})();",
+      kInlineFlags);
 
   InstallAssertInlineCountHelper(CcTest::isolate());
   T.CheckCall(T.Val(1), T.Val(1), T.Val(2));
@@ -57,13 +62,11 @@ TEST(SimpleInliningDeopt) {
   FLAG_turbo_deoptimization = true;
   FunctionTester T(
       "(function(){"
-      "function foo(s) { %DeoptimizeFunction(bar); return "
-      "s; };"
-      "function bar(s, t) { return foo(s); };"
-      "return bar;})();",
-      CompilationInfo::kInliningEnabled |
-          CompilationInfo::kContextSpecializing |
-          CompilationInfo::kTypingEnabled);
+      "  function foo(s) { %DeoptimizeFunction(bar); return s; };"
+      "  function bar(s, t) { return foo(s); };"
+      "  return bar;"
+      "})();",
+      kInlineFlags);
 
   InstallAssertInlineCountHelper(CcTest::isolate());
   T.CheckCall(T.Val(1), T.Val(1), T.Val(2));
@@ -74,13 +77,11 @@ TEST(SimpleInliningContext) {
   FLAG_turbo_deoptimization = true;
   FunctionTester T(
       "(function () {"
-      "function foo(s) { AssertInlineCount(2); var x = 12; return s + x; };"
-      "function bar(s, t) { return foo(s); };"
-      "return bar;"
+      "  function foo(s) { AssertInlineCount(2); var x = 12; return s + x; };"
+      "  function bar(s, t) { return foo(s); };"
+      "  return bar;"
       "})();",
-      CompilationInfo::kInliningEnabled |
-          CompilationInfo::kContextSpecializing |
-          CompilationInfo::kTypingEnabled);
+      kInlineFlags);
 
   InstallAssertInlineCountHelper(CcTest::isolate());
   T.CheckCall(T.Val(13), T.Val(1), T.Val(2));
@@ -91,16 +92,14 @@ TEST(SimpleInliningContextDeopt) {
   FLAG_turbo_deoptimization = true;
   FunctionTester T(
       "(function () {"
-      "function foo(s) { "
-      "  AssertInlineCount(2); %DeoptimizeFunction(bar); var x = 12;"
-      "  return s + x;"
-      "};"
-      "function bar(s, t) { return foo(s); };"
-      "return bar;"
+      "  function foo(s) {"
+      "    AssertInlineCount(2); %DeoptimizeFunction(bar); var x = 12;"
+      "    return s + x;"
+      "  };"
+      "  function bar(s, t) { return foo(s); };"
+      "  return bar;"
       "})();",
-      CompilationInfo::kInliningEnabled |
-          CompilationInfo::kContextSpecializing |
-          CompilationInfo::kTypingEnabled);
+      kInlineFlags);
 
   InstallAssertInlineCountHelper(CcTest::isolate());
   T.CheckCall(T.Val(13), T.Val(1), T.Val(2));
@@ -111,14 +110,12 @@ TEST(CaptureContext) {
   FLAG_turbo_deoptimization = true;
   FunctionTester T(
       "var f = (function () {"
-      "var x = 42;"
-      "function bar(s) { return x + s; };"
-      "return (function (s) { return bar(s); });"
+      "  var x = 42;"
+      "  function bar(s) { return x + s; };"
+      "  return (function (s) { return bar(s); });"
       "})();"
-      "(function (s) { return f(s)})",
-      CompilationInfo::kInliningEnabled |
-          CompilationInfo::kContextSpecializing |
-          CompilationInfo::kTypingEnabled);
+      "(function (s) { return f(s) })",
+      kInlineFlags);
 
   InstallAssertInlineCountHelper(CcTest::isolate());
   T.CheckCall(T.Val(42 + 12), T.Val(12), T.undefined());
@@ -132,12 +129,10 @@ TEST(DontInlineEval) {
   FunctionTester T(
       "var x = 42;"
       "(function () {"
-      "function bar(s, t) { return eval(\"AssertInlineCount(1); x\") };"
-      "return bar;"
+      "  function bar(s, t) { return eval(\"AssertInlineCount(1); x\") };"
+      "  return bar;"
       "})();",
-      CompilationInfo::kInliningEnabled |
-          CompilationInfo::kContextSpecializing |
-          CompilationInfo::kTypingEnabled);
+      kInlineFlags);
 
   InstallAssertInlineCountHelper(CcTest::isolate());
   T.CheckCall(T.Val(42), T.Val("x"), T.undefined());
@@ -148,13 +143,11 @@ TEST(InlineOmitArguments) {
   FLAG_turbo_deoptimization = true;
   FunctionTester T(
       "(function () {"
-      "var x = 42;"
-      "function bar(s, t, u, v) { AssertInlineCount(2); return x + s; };"
-      "return (function (s,t) { return bar(s); });"
+      "  var x = 42;"
+      "  function bar(s, t, u, v) { AssertInlineCount(2); return x + s; };"
+      "  return (function (s,t) { return bar(s); });"
       "})();",
-      CompilationInfo::kInliningEnabled |
-          CompilationInfo::kContextSpecializing |
-          CompilationInfo::kTypingEnabled);
+      kInlineFlags);
 
   InstallAssertInlineCountHelper(CcTest::isolate());
   T.CheckCall(T.Val(42 + 12), T.Val(12), T.undefined());
@@ -165,16 +158,14 @@ TEST(InlineOmitArgumentsDeopt) {
   FLAG_turbo_deoptimization = true;
   FunctionTester T(
       "(function () {"
-      "function foo(s,t,u,v) { AssertInlineCount(2); %DeoptimizeFunction(bar); "
-      "return baz(); };"
-      "function bar() { return foo(11); };"
-      "function baz() { return foo.arguments.length == 1 && "
-      "                        foo.arguments[0] == 11 ; }"
-      "return bar;"
+      "  function foo(s,t,u,v) { AssertInlineCount(2);"
+      "                          %DeoptimizeFunction(bar); return baz(); };"
+      "  function bar() { return foo(11); };"
+      "  function baz() { return foo.arguments.length == 1 &&"
+      "                          foo.arguments[0] == 11; }"
+      "  return bar;"
       "})();",
-      CompilationInfo::kInliningEnabled |
-          CompilationInfo::kContextSpecializing |
-          CompilationInfo::kTypingEnabled);
+      kInlineFlags);
 
   InstallAssertInlineCountHelper(CcTest::isolate());
   T.CheckCall(T.true_value(), T.Val(12), T.Val(14));
@@ -185,14 +176,12 @@ TEST(InlineSurplusArguments) {
   FLAG_turbo_deoptimization = true;
   FunctionTester T(
       "(function () {"
-      "var x = 42;"
-      "function foo(s) { AssertInlineCount(2); return x + s; };"
-      "function bar(s,t) { return foo(s,t,13); };"
-      "return bar;"
+      "  var x = 42;"
+      "  function foo(s) { AssertInlineCount(2); return x + s; };"
+      "  function bar(s,t) { return foo(s,t,13); };"
+      "  return bar;"
       "})();",
-      CompilationInfo::kInliningEnabled |
-          CompilationInfo::kContextSpecializing |
-          CompilationInfo::kTypingEnabled);
+      kInlineFlags);
 
   InstallAssertInlineCountHelper(CcTest::isolate());
   T.CheckCall(T.Val(42 + 12), T.Val(12), T.undefined());
@@ -203,18 +192,16 @@ TEST(InlineSurplusArgumentsDeopt) {
   FLAG_turbo_deoptimization = true;
   FunctionTester T(
       "(function () {"
-      "function foo(s) { AssertInlineCount(2); %DeoptimizeFunction(bar); "
-      "return baz(); };"
-      "function bar() { return foo(13, 14, 15); };"
-      "function baz() { return foo.arguments.length == 3 && "
-      "                        foo.arguments[0] == 13 && "
-      "                        foo.arguments[1] == 14 && "
-      "                        foo.arguments[2] == 15; }"
-      "return bar;"
+      "  function foo(s) { AssertInlineCount(2); %DeoptimizeFunction(bar);"
+      "                    return baz(); };"
+      "  function bar() { return foo(13, 14, 15); };"
+      "  function baz() { return foo.arguments.length == 3 &&"
+      "                          foo.arguments[0] == 13 &&"
+      "                          foo.arguments[1] == 14 &&"
+      "                          foo.arguments[2] == 15; }"
+      "  return bar;"
       "})();",
-      CompilationInfo::kInliningEnabled |
-          CompilationInfo::kContextSpecializing |
-          CompilationInfo::kTypingEnabled);
+      kInlineFlags);
 
   InstallAssertInlineCountHelper(CcTest::isolate());
   T.CheckCall(T.true_value(), T.Val(12), T.Val(14));
@@ -225,13 +212,11 @@ TEST(InlineTwice) {
   FLAG_turbo_deoptimization = true;
   FunctionTester T(
       "(function () {"
-      "var x = 42;"
-      "function bar(s) { AssertInlineCount(2); return x + s; };"
-      "return (function (s,t) { return bar(s) + bar(t); });"
+      "  var x = 42;"
+      "  function bar(s) { AssertInlineCount(2); return x + s; };"
+      "  return (function (s,t) { return bar(s) + bar(t); });"
       "})();",
-      CompilationInfo::kInliningEnabled |
-          CompilationInfo::kContextSpecializing |
-          CompilationInfo::kTypingEnabled);
+      kInlineFlags);
 
   InstallAssertInlineCountHelper(CcTest::isolate());
   T.CheckCall(T.Val(2 * 42 + 12 + 4), T.Val(12), T.Val(4));
@@ -242,14 +227,12 @@ TEST(InlineTwiceDependent) {
   FLAG_turbo_deoptimization = true;
   FunctionTester T(
       "(function () {"
-      "var x = 42;"
-      "function foo(s) { AssertInlineCount(2); return x + s; };"
-      "function bar(s,t) { return foo(foo(s)); };"
-      "return bar;"
+      "  var x = 42;"
+      "  function foo(s) { AssertInlineCount(2); return x + s; };"
+      "  function bar(s,t) { return foo(foo(s)); };"
+      "  return bar;"
       "})();",
-      CompilationInfo::kInliningEnabled |
-          CompilationInfo::kContextSpecializing |
-          CompilationInfo::kTypingEnabled);
+      kInlineFlags);
 
   InstallAssertInlineCountHelper(CcTest::isolate());
   T.CheckCall(T.Val(42 + 42 + 12), T.Val(12), T.Val(4));
@@ -260,15 +243,13 @@ TEST(InlineTwiceDependentDiamond) {
   FLAG_turbo_deoptimization = true;
   FunctionTester T(
       "(function () {"
-      "var x = 41;"
-      "function foo(s) { AssertInlineCount(2); if (s % 2 == 0) {"
-      "                  return x - s } else { return x + s; } };"
-      "function bar(s,t) { return foo(foo(s)); };"
-      "return bar;"
+      "  var x = 41;"
+      "  function foo(s) { AssertInlineCount(2); if (s % 2 == 0) {"
+      "                    return x - s } else { return x + s; } };"
+      "  function bar(s,t) { return foo(foo(s)); };"
+      "  return bar;"
       "})();",
-      CompilationInfo::kInliningEnabled |
-          CompilationInfo::kContextSpecializing |
-          CompilationInfo::kTypingEnabled);
+      kInlineFlags);
 
   InstallAssertInlineCountHelper(CcTest::isolate());
   T.CheckCall(T.Val(-11), T.Val(11), T.Val(4));
@@ -279,34 +260,60 @@ TEST(InlineTwiceDependentDiamondDifferent) {
   FLAG_turbo_deoptimization = true;
   FunctionTester T(
       "(function () {"
-      "var x = 41;"
-      "function foo(s,t) { AssertInlineCount(2); if (s % 2 == 0) {"
-      "                    return x - s * t } else { return x + s * t; } };"
-      "function bar(s,t) { return foo(foo(s, 3), 5); };"
-      "return bar;"
+      "  var x = 41;"
+      "  function foo(s,t) { AssertInlineCount(2); if (s % 2 == 0) {"
+      "                      return x - s * t } else { return x + s * t; } };"
+      "  function bar(s,t) { return foo(foo(s, 3), 5); };"
+      "  return bar;"
       "})();",
-      CompilationInfo::kInliningEnabled |
-          CompilationInfo::kContextSpecializing |
-          CompilationInfo::kTypingEnabled);
+      kInlineFlags);
 
   InstallAssertInlineCountHelper(CcTest::isolate());
   T.CheckCall(T.Val(-329), T.Val(11), T.Val(4));
 }
 
 
-TEST(InlineLoop) {
+TEST(InlineLoopGuardedEmpty) {
   FLAG_turbo_deoptimization = true;
   FunctionTester T(
       "(function () {"
-      "var x = 41;"
-      "function foo(s) { AssertInlineCount(2); while (s > 0) {"
-      "                  s = s - 1; }; return s; };"
-      "function bar(s,t) { return foo(foo(s)); };"
-      "return bar;"
+      "  function foo(s) { AssertInlineCount(2); if (s) while (s); return s; };"
+      "  function bar(s,t) { return foo(s); };"
+      "  return bar;"
       "})();",
-      CompilationInfo::kInliningEnabled |
-          CompilationInfo::kContextSpecializing |
-          CompilationInfo::kTypingEnabled);
+      kInlineFlags);
+
+  InstallAssertInlineCountHelper(CcTest::isolate());
+  T.CheckCall(T.Val(0.0), T.Val(0.0), T.Val(4));
+}
+
+
+TEST(InlineLoopGuardedOnce) {
+  FLAG_turbo_deoptimization = true;
+  FunctionTester T(
+      "(function () {"
+      "  function foo(s,t) { AssertInlineCount(2); if (t > 0) while (s > 0) {"
+      "                      s = s - 1; }; return s; };"
+      "  function bar(s,t) { return foo(s,t); };"
+      "  return bar;"
+      "})();",
+      kInlineFlags);
+
+  InstallAssertInlineCountHelper(CcTest::isolate());
+  T.CheckCall(T.Val(0.0), T.Val(11), T.Val(4));
+}
+
+
+TEST(InlineLoopGuardedTwice) {
+  FLAG_turbo_deoptimization = true;
+  FunctionTester T(
+      "(function () {"
+      "  function foo(s,t) { AssertInlineCount(2); if (t > 0) while (s > 0) {"
+      "                      s = s - 1; }; return s; };"
+      "  function bar(s,t) { return foo(foo(s,t),t); };"
+      "  return bar;"
+      "})();",
+      kInlineFlags);
 
   InstallAssertInlineCountHelper(CcTest::isolate());
   T.CheckCall(T.Val(0.0), T.Val(11), T.Val(4));
@@ -317,15 +324,13 @@ TEST(InlineStrictIntoNonStrict) {
   FLAG_turbo_deoptimization = true;
   FunctionTester T(
       "(function () {"
-      "var x = Object.create({}, { y: { value:42, writable:false } });"
-      "function foo(s) { 'use strict';"
-      "                   x.y = 9; };"
-      "function bar(s,t) { return foo(s); };"
-      "return bar;"
+      "  var x = Object.create({}, { y: { value:42, writable:false } });"
+      "  function foo(s) { 'use strict';"
+      "                     x.y = 9; };"
+      "  function bar(s,t) { return foo(s); };"
+      "  return bar;"
       "})();",
-      CompilationInfo::kInliningEnabled |
-          CompilationInfo::kContextSpecializing |
-          CompilationInfo::kTypingEnabled);
+      kInlineFlags);
 
   InstallAssertInlineCountHelper(CcTest::isolate());
   T.CheckThrows(T.undefined(), T.undefined());
@@ -336,14 +341,12 @@ TEST(InlineNonStrictIntoStrict) {
   FLAG_turbo_deoptimization = true;
   FunctionTester T(
       "(function () {"
-      "var x = Object.create({}, { y: { value:42, writable:false } });"
-      "function foo(s) { x.y = 9; return x.y; };"
-      "function bar(s,t) { \'use strict\'; return foo(s); };"
-      "return bar;"
+      "  var x = Object.create({}, { y: { value:42, writable:false } });"
+      "  function foo(s) { x.y = 9; return x.y; };"
+      "  function bar(s,t) { \'use strict\'; return foo(s); };"
+      "  return bar;"
       "})();",
-      CompilationInfo::kInliningEnabled |
-          CompilationInfo::kContextSpecializing |
-          CompilationInfo::kTypingEnabled);
+      kInlineFlags);
 
   InstallAssertInlineCountHelper(CcTest::isolate());
   T.CheckCall(T.Val(42), T.undefined(), T.undefined());
@@ -354,13 +357,11 @@ TEST(InlineIntrinsicIsSmi) {
   FLAG_turbo_deoptimization = true;
   FunctionTester T(
       "(function () {"
-      "var x = 42;"
-      "function bar(s,t) { return %_IsSmi(x); };"
-      "return bar;"
+      "  var x = 42;"
+      "  function bar(s,t) { return %_IsSmi(x); };"
+      "  return bar;"
       "})();",
-      CompilationInfo::kInliningEnabled |
-          CompilationInfo::kContextSpecializing |
-          CompilationInfo::kTypingEnabled);
+      kInlineFlags);
 
   InstallAssertInlineCountHelper(CcTest::isolate());
   T.CheckCall(T.true_value(), T.Val(12), T.Val(4));
@@ -371,13 +372,11 @@ TEST(InlineIntrinsicIsNonNegativeSmi) {
   FLAG_turbo_deoptimization = true;
   FunctionTester T(
       "(function () {"
-      "var x = 42;"
-      "function bar(s,t) { return %_IsNonNegativeSmi(x); };"
-      "return bar;"
+      "  var x = 42;"
+      "  function bar(s,t) { return %_IsNonNegativeSmi(x); };"
+      "  return bar;"
       "})();",
-      CompilationInfo::kInliningEnabled |
-          CompilationInfo::kContextSpecializing |
-          CompilationInfo::kTypingEnabled);
+      kInlineFlags);
 
   InstallAssertInlineCountHelper(CcTest::isolate());
   T.CheckCall(T.true_value(), T.Val(12), T.Val(4));
@@ -388,38 +387,32 @@ TEST(InlineIntrinsicIsArray) {
   FLAG_turbo_deoptimization = true;
   FunctionTester T(
       "(function () {"
-      "var x = [1,2,3];"
-      "function bar(s,t) { return %_IsArray(x); };"
-      "return bar;"
+      "  var x = [1,2,3];"
+      "  function bar(s,t) { return %_IsArray(x); };"
+      "  return bar;"
       "})();",
-      CompilationInfo::kInliningEnabled |
-          CompilationInfo::kContextSpecializing |
-          CompilationInfo::kTypingEnabled);
+      kInlineFlags);
 
   InstallAssertInlineCountHelper(CcTest::isolate());
   T.CheckCall(T.true_value(), T.Val(12), T.Val(4));
 
   FunctionTester T2(
       "(function () {"
-      "var x = 32;"
-      "function bar(s,t) { return %_IsArray(x); };"
-      "return bar;"
+      "  var x = 32;"
+      "  function bar(s,t) { return %_IsArray(x); };"
+      "  return bar;"
       "})();",
-      CompilationInfo::kInliningEnabled |
-          CompilationInfo::kContextSpecializing |
-          CompilationInfo::kTypingEnabled);
+      kInlineFlags);
 
   T2.CheckCall(T.false_value(), T.Val(12), T.Val(4));
 
   FunctionTester T3(
       "(function () {"
-      "var x = bar;"
-      "function bar(s,t) { return %_IsArray(x); };"
-      "return bar;"
+      "  var x = bar;"
+      "  function bar(s,t) { return %_IsArray(x); };"
+      "  return bar;"
       "})();",
-      CompilationInfo::kInliningEnabled |
-          CompilationInfo::kContextSpecializing |
-          CompilationInfo::kTypingEnabled);
+      kInlineFlags);
 
   T3.CheckCall(T.false_value(), T.Val(12), T.Val(4));
 }
@@ -429,19 +422,16 @@ TEST(InlineWithArguments) {
   FLAG_turbo_deoptimization = true;
   FunctionTester T(
       "(function () {"
-      "  function foo(s,t,u) { AssertInlineCount(2); "
-      "    return foo.arguments.length == 3 && "
-      "           foo.arguments[0] == 13 && "
-      "           foo.arguments[1] == 14 && "
-      "           foo.arguments[2] == 15; "
+      "  function foo(s,t,u) { AssertInlineCount(2);"
+      "    return foo.arguments.length == 3 &&"
+      "           foo.arguments[0] == 13 &&"
+      "           foo.arguments[1] == 14 &&"
+      "           foo.arguments[2] == 15;"
       "  }"
       "  function bar() { return foo(13, 14, 15); };"
       "  return bar;"
-      "}"
-      ")();",
-      CompilationInfo::kInliningEnabled |
-          CompilationInfo::kContextSpecializing |
-          CompilationInfo::kTypingEnabled);
+      "})();",
+      kInlineFlags);
 
   InstallAssertInlineCountHelper(CcTest::isolate());
   T.CheckCall(T.true_value(), T.Val(12), T.Val(14));
index 9eb6753..974d4ce 100644 (file)
@@ -8,7 +8,6 @@
 
 #include "src/base/bits.h"
 #include "src/codegen.h"
-#include "src/compiler/generic-node-inl.h"
 #include "test/cctest/cctest.h"
 #include "test/cctest/compiler/codegen-tester.h"
 #include "test/cctest/compiler/value-helper.h"
@@ -129,36 +128,6 @@ TEST(RunBranch) {
 }
 
 
-TEST(RunRedundantBranch1) {
-  RawMachineAssemblerTester<int32_t> m;
-  int constant = 944777;
-
-  MLabel blocka;
-  m.Branch(m.Int32Constant(0), &blocka, &blocka);
-  m.Bind(&blocka);
-  m.Return(m.Int32Constant(constant));
-
-  CHECK_EQ(constant, m.Call());
-}
-
-
-TEST(RunRedundantBranch2) {
-  RawMachineAssemblerTester<int32_t> m;
-  int constant = 966777;
-
-  MLabel blocka, blockb, blockc;
-  m.Branch(m.Int32Constant(0), &blocka, &blockc);
-  m.Bind(&blocka);
-  m.Branch(m.Int32Constant(0), &blockb, &blockb);
-  m.Bind(&blockc);
-  m.Goto(&blockb);
-  m.Bind(&blockb);
-  m.Return(m.Int32Constant(constant));
-
-  CHECK_EQ(constant, m.Call());
-}
-
-
 TEST(RunDiamond2) {
   RawMachineAssemblerTester<int32_t> m;
 
index 7b4f975..1eb3547 100644 (file)
@@ -5,7 +5,6 @@
 #include "src/v8.h"
 
 #include "src/compiler/common-operator.h"
-#include "src/compiler/generic-node-inl.h"
 #include "src/compiler/graph.h"
 #include "src/compiler/machine-operator.h"
 #include "src/compiler/node.h"
@@ -163,7 +162,8 @@ TEST(BuildMulNodeGraph) {
   Schedule schedule(scope.main_zone());
   Graph graph(scope.main_zone());
   CommonOperatorBuilder common(scope.main_zone());
-  MachineOperatorBuilder machine;
+  // TODO(titzer): use test operators.
+  MachineOperatorBuilder machine(scope.main_zone());
 
   Node* start = graph.NewNode(common.Start(0));
   graph.SetStart(start);
index aed8f8b..1b79ed5 100644 (file)
@@ -3,26 +3,27 @@
 // found in the LICENSE file.
 
 #include "src/v8.h"
-#include "test/cctest/cctest.h"
 
 #include "src/compiler/access-builder.h"
 #include "src/compiler/common-operator.h"
-#include "src/compiler/generic-node-inl.h"
-#include "src/compiler/generic-node.h"
 #include "src/compiler/graph.h"
 #include "src/compiler/graph-visualizer.h"
 #include "src/compiler/js-operator.h"
-#include "src/compiler/machine-operator.h"
 #include "src/compiler/node.h"
+#include "src/compiler/opcodes.h"
 #include "src/compiler/operator.h"
 #include "src/compiler/schedule.h"
 #include "src/compiler/scheduler.h"
 #include "src/compiler/simplified-operator.h"
 #include "src/compiler/verifier.h"
+#include "test/cctest/cctest.h"
 
 using namespace v8::internal;
 using namespace v8::internal::compiler;
 
+Operator kIntAdd(IrOpcode::kInt32Add, Operator::kPure, "Int32Add", 2, 0, 0, 1,
+                 0, 0);
+
 // TODO(titzer): pull RPO tests out to their own file.
 static void CheckRPONumbers(BasicBlockVector* order, size_t expected,
                             bool loops_allowed) {
@@ -34,17 +35,6 @@ static void CheckRPONumbers(BasicBlockVector* order, size_t expected,
       CHECK_EQ(NULL, order->at(i)->loop_header());
     }
   }
-  int number = 0;
-  for (auto const block : *order) {
-    if (block->deferred()) continue;
-    CHECK_EQ(number, block->ao_number());
-    ++number;
-  }
-  for (auto const block : *order) {
-    if (!block->deferred()) continue;
-    CHECK_EQ(number, block->ao_number());
-    ++number;
-  }
 }
 
 
@@ -120,8 +110,7 @@ static Schedule* ComputeAndVerifySchedule(int expected, Graph* graph) {
     os << AsDOT(*graph);
   }
 
-  ZonePool zone_pool(graph->zone()->isolate());
-  Schedule* schedule = Scheduler::ComputeSchedule(&zone_pool, graph);
+  Schedule* schedule = Scheduler::ComputeSchedule(graph->zone(), graph);
 
   if (FLAG_trace_turbo_scheduler) {
     OFStream os(stdout);
@@ -137,8 +126,8 @@ TEST(RPODegenerate1) {
   HandleAndZoneScope scope;
   Schedule schedule(scope.main_zone());
 
-  ZonePool zone_pool(scope.main_isolate());
-  BasicBlockVector* order = Scheduler::ComputeSpecialRPO(&zone_pool, &schedule);
+  BasicBlockVector* order =
+      Scheduler::ComputeSpecialRPO(scope.main_zone(), &schedule);
   CheckRPONumbers(order, 1, false);
   CHECK_EQ(schedule.start(), order->at(0));
 }
@@ -149,8 +138,8 @@ TEST(RPODegenerate2) {
   Schedule schedule(scope.main_zone());
 
   schedule.AddGoto(schedule.start(), schedule.end());
-  ZonePool zone_pool(scope.main_isolate());
-  BasicBlockVector* order = Scheduler::ComputeSpecialRPO(&zone_pool, &schedule);
+  BasicBlockVector* order =
+      Scheduler::ComputeSpecialRPO(scope.main_zone(), &schedule);
   CheckRPONumbers(order, 2, false);
   CHECK_EQ(schedule.start(), order->at(0));
   CHECK_EQ(schedule.end(), order->at(1));
@@ -170,9 +159,8 @@ TEST(RPOLine) {
       schedule.AddGoto(last, block);
       last = block;
     }
-    ZonePool zone_pool(scope.main_isolate());
     BasicBlockVector* order =
-        Scheduler::ComputeSpecialRPO(&zone_pool, &schedule);
+        Scheduler::ComputeSpecialRPO(scope.main_zone(), &schedule);
     CheckRPONumbers(order, 1 + i, false);
 
     for (size_t i = 0; i < schedule.BasicBlockCount(); i++) {
@@ -189,8 +177,8 @@ TEST(RPOSelfLoop) {
   HandleAndZoneScope scope;
   Schedule schedule(scope.main_zone());
   schedule.AddSuccessorForTesting(schedule.start(), schedule.start());
-  ZonePool zone_pool(scope.main_isolate());
-  BasicBlockVector* order = Scheduler::ComputeSpecialRPO(&zone_pool, &schedule);
+  BasicBlockVector* order =
+      Scheduler::ComputeSpecialRPO(scope.main_zone(), &schedule);
   CheckRPONumbers(order, 1, true);
   BasicBlock* loop[] = {schedule.start()};
   CheckLoop(order, loop, 1);
@@ -200,12 +188,13 @@ TEST(RPOSelfLoop) {
 TEST(RPOEntryLoop) {
   HandleAndZoneScope scope;
   Schedule schedule(scope.main_zone());
-  schedule.AddSuccessorForTesting(schedule.start(), schedule.end());
-  schedule.AddSuccessorForTesting(schedule.end(), schedule.start());
-  ZonePool zone_pool(scope.main_isolate());
-  BasicBlockVector* order = Scheduler::ComputeSpecialRPO(&zone_pool, &schedule);
+  BasicBlock* body = schedule.NewBasicBlock();
+  schedule.AddSuccessorForTesting(schedule.start(), body);
+  schedule.AddSuccessorForTesting(body, schedule.start());
+  BasicBlockVector* order =
+      Scheduler::ComputeSpecialRPO(scope.main_zone(), &schedule);
   CheckRPONumbers(order, 2, true);
-  BasicBlock* loop[] = {schedule.start(), schedule.end()};
+  BasicBlock* loop[] = {schedule.start(), body};
   CheckLoop(order, loop, 2);
 }
 
@@ -215,8 +204,8 @@ TEST(RPOEndLoop) {
   Schedule schedule(scope.main_zone());
   SmartPointer<TestLoop> loop1(CreateLoop(&schedule, 2));
   schedule.AddSuccessorForTesting(schedule.start(), loop1->header());
-  ZonePool zone_pool(scope.main_isolate());
-  BasicBlockVector* order = Scheduler::ComputeSpecialRPO(&zone_pool, &schedule);
+  BasicBlockVector* order =
+      Scheduler::ComputeSpecialRPO(scope.main_zone(), &schedule);
   CheckRPONumbers(order, 3, true);
   loop1->Check(order);
 }
@@ -228,8 +217,8 @@ TEST(RPOEndLoopNested) {
   SmartPointer<TestLoop> loop1(CreateLoop(&schedule, 2));
   schedule.AddSuccessorForTesting(schedule.start(), loop1->header());
   schedule.AddSuccessorForTesting(loop1->last(), schedule.start());
-  ZonePool zone_pool(scope.main_isolate());
-  BasicBlockVector* order = Scheduler::ComputeSpecialRPO(&zone_pool, &schedule);
+  BasicBlockVector* order =
+      Scheduler::ComputeSpecialRPO(scope.main_zone(), &schedule);
   CheckRPONumbers(order, 3, true);
   loop1->Check(order);
 }
@@ -249,8 +238,8 @@ TEST(RPODiamond) {
   schedule.AddSuccessorForTesting(B, D);
   schedule.AddSuccessorForTesting(C, D);
 
-  ZonePool zone_pool(scope.main_isolate());
-  BasicBlockVector* order = Scheduler::ComputeSpecialRPO(&zone_pool, &schedule);
+  BasicBlockVector* order =
+      Scheduler::ComputeSpecialRPO(scope.main_zone(), &schedule);
   CheckRPONumbers(order, 4, false);
 
   CHECK_EQ(0, A->rpo_number());
@@ -274,8 +263,8 @@ TEST(RPOLoop1) {
   schedule.AddSuccessorForTesting(C, B);
   schedule.AddSuccessorForTesting(C, D);
 
-  ZonePool zone_pool(scope.main_isolate());
-  BasicBlockVector* order = Scheduler::ComputeSpecialRPO(&zone_pool, &schedule);
+  BasicBlockVector* order =
+      Scheduler::ComputeSpecialRPO(scope.main_zone(), &schedule);
   CheckRPONumbers(order, 4, true);
   BasicBlock* loop[] = {B, C};
   CheckLoop(order, loop, 2);
@@ -296,8 +285,8 @@ TEST(RPOLoop2) {
   schedule.AddSuccessorForTesting(C, B);
   schedule.AddSuccessorForTesting(B, D);
 
-  ZonePool zone_pool(scope.main_isolate());
-  BasicBlockVector* order = Scheduler::ComputeSpecialRPO(&zone_pool, &schedule);
+  BasicBlockVector* order =
+      Scheduler::ComputeSpecialRPO(scope.main_zone(), &schedule);
   CheckRPONumbers(order, 4, true);
   BasicBlock* loop[] = {B, C};
   CheckLoop(order, loop, 2);
@@ -339,9 +328,8 @@ TEST(RPOLoopN) {
     if (i == 9) schedule.AddSuccessorForTesting(E, G);
     if (i == 10) schedule.AddSuccessorForTesting(F, G);
 
-    ZonePool zone_pool(scope.main_isolate());
     BasicBlockVector* order =
-        Scheduler::ComputeSpecialRPO(&zone_pool, &schedule);
+        Scheduler::ComputeSpecialRPO(scope.main_zone(), &schedule);
     CheckRPONumbers(order, 7, true);
     BasicBlock* loop[] = {B, C, D, E, F};
     CheckLoop(order, loop, 5);
@@ -368,8 +356,8 @@ TEST(RPOLoopNest1) {
   schedule.AddSuccessorForTesting(E, B);
   schedule.AddSuccessorForTesting(E, F);
 
-  ZonePool zone_pool(scope.main_isolate());
-  BasicBlockVector* order = Scheduler::ComputeSpecialRPO(&zone_pool, &schedule);
+  BasicBlockVector* order =
+      Scheduler::ComputeSpecialRPO(scope.main_zone(), &schedule);
   CheckRPONumbers(order, 6, true);
   BasicBlock* loop1[] = {B, C, D, E};
   CheckLoop(order, loop1, 4);
@@ -404,8 +392,8 @@ TEST(RPOLoopNest2) {
   schedule.AddSuccessorForTesting(F, C);
   schedule.AddSuccessorForTesting(G, B);
 
-  ZonePool zone_pool(scope.main_isolate());
-  BasicBlockVector* order = Scheduler::ComputeSpecialRPO(&zone_pool, &schedule);
+  BasicBlockVector* order =
+      Scheduler::ComputeSpecialRPO(scope.main_zone(), &schedule);
   CheckRPONumbers(order, 8, true);
   BasicBlock* loop1[] = {B, C, D, E, F, G};
   CheckLoop(order, loop1, 6);
@@ -432,8 +420,8 @@ TEST(RPOLoopFollow1) {
   schedule.AddSuccessorForTesting(loop1->header(), loop2->header());
   schedule.AddSuccessorForTesting(loop2->last(), E);
 
-  ZonePool zone_pool(scope.main_isolate());
-  BasicBlockVector* order = Scheduler::ComputeSpecialRPO(&zone_pool, &schedule);
+  BasicBlockVector* order =
+      Scheduler::ComputeSpecialRPO(scope.main_zone(), &schedule);
 
   CHECK_EQ(static_cast<int>(schedule.BasicBlockCount()),
            static_cast<int>(order->size()));
@@ -459,8 +447,8 @@ TEST(RPOLoopFollow2) {
   schedule.AddSuccessorForTesting(S, loop2->header());
   schedule.AddSuccessorForTesting(loop2->last(), E);
 
-  ZonePool zone_pool(scope.main_isolate());
-  BasicBlockVector* order = Scheduler::ComputeSpecialRPO(&zone_pool, &schedule);
+  BasicBlockVector* order =
+      Scheduler::ComputeSpecialRPO(scope.main_zone(), &schedule);
 
   CHECK_EQ(static_cast<int>(schedule.BasicBlockCount()),
            static_cast<int>(order->size()));
@@ -483,9 +471,8 @@ TEST(RPOLoopFollowN) {
       schedule.AddSuccessorForTesting(A, loop1->header());
       schedule.AddSuccessorForTesting(loop1->nodes[exit], loop2->header());
       schedule.AddSuccessorForTesting(loop2->nodes[exit], E);
-      ZonePool zone_pool(scope.main_isolate());
       BasicBlockVector* order =
-          Scheduler::ComputeSpecialRPO(&zone_pool, &schedule);
+          Scheduler::ComputeSpecialRPO(scope.main_zone(), &schedule);
 
       CHECK_EQ(static_cast<int>(schedule.BasicBlockCount()),
                static_cast<int>(order->size()));
@@ -515,8 +502,8 @@ TEST(RPONestedLoopFollow1) {
   schedule.AddSuccessorForTesting(C, E);
   schedule.AddSuccessorForTesting(C, B);
 
-  ZonePool zone_pool(scope.main_isolate());
-  BasicBlockVector* order = Scheduler::ComputeSpecialRPO(&zone_pool, &schedule);
+  BasicBlockVector* order =
+      Scheduler::ComputeSpecialRPO(scope.main_zone(), &schedule);
 
   CHECK_EQ(static_cast<int>(schedule.BasicBlockCount()),
            static_cast<int>(order->size()));
@@ -545,9 +532,8 @@ TEST(RPOLoopBackedges1) {
       schedule.AddSuccessorForTesting(loop1->nodes[i], loop1->header());
       schedule.AddSuccessorForTesting(loop1->nodes[j], E);
 
-      ZonePool zone_pool(scope.main_isolate());
       BasicBlockVector* order =
-          Scheduler::ComputeSpecialRPO(&zone_pool, &schedule);
+          Scheduler::ComputeSpecialRPO(scope.main_zone(), &schedule);
       CheckRPONumbers(order, schedule.BasicBlockCount(), true);
       loop1->Check(order);
     }
@@ -574,9 +560,8 @@ TEST(RPOLoopOutedges1) {
       schedule.AddSuccessorForTesting(loop1->nodes[j], D);
       schedule.AddSuccessorForTesting(D, E);
 
-      ZonePool zone_pool(scope.main_isolate());
       BasicBlockVector* order =
-          Scheduler::ComputeSpecialRPO(&zone_pool, &schedule);
+          Scheduler::ComputeSpecialRPO(scope.main_zone(), &schedule);
       CheckRPONumbers(order, schedule.BasicBlockCount(), true);
       loop1->Check(order);
     }
@@ -603,9 +588,8 @@ TEST(RPOLoopOutedges2) {
       schedule.AddSuccessorForTesting(O, E);
     }
 
-    ZonePool zone_pool(scope.main_isolate());
     BasicBlockVector* order =
-        Scheduler::ComputeSpecialRPO(&zone_pool, &schedule);
+        Scheduler::ComputeSpecialRPO(scope.main_zone(), &schedule);
     CheckRPONumbers(order, schedule.BasicBlockCount(), true);
     loop1->Check(order);
   }
@@ -631,9 +615,8 @@ TEST(RPOLoopOutloops1) {
       schedule.AddSuccessorForTesting(loopN[j]->last(), E);
     }
 
-    ZonePool zone_pool(scope.main_isolate());
     BasicBlockVector* order =
-        Scheduler::ComputeSpecialRPO(&zone_pool, &schedule);
+        Scheduler::ComputeSpecialRPO(scope.main_zone(), &schedule);
     CheckRPONumbers(order, schedule.BasicBlockCount(), true);
     loop1->Check(order);
 
@@ -653,7 +636,7 @@ TEST(RPOLoopMultibackedge) {
   BasicBlock* A = schedule.start();
   BasicBlock* B = schedule.NewBasicBlock();
   BasicBlock* C = schedule.NewBasicBlock();
-  BasicBlock* D = schedule.end();
+  BasicBlock* D = schedule.NewBasicBlock();
   BasicBlock* E = schedule.NewBasicBlock();
 
   schedule.AddSuccessorForTesting(A, B);
@@ -664,8 +647,8 @@ TEST(RPOLoopMultibackedge) {
   schedule.AddSuccessorForTesting(D, B);
   schedule.AddSuccessorForTesting(E, B);
 
-  ZonePool zone_pool(scope.main_isolate());
-  BasicBlockVector* order = Scheduler::ComputeSpecialRPO(&zone_pool, &schedule);
+  BasicBlockVector* order =
+      Scheduler::ComputeSpecialRPO(scope.main_zone(), &schedule);
   CheckRPONumbers(order, 5, true);
 
   BasicBlock* loop1[] = {B, C, D, E};
@@ -680,8 +663,7 @@ TEST(BuildScheduleEmpty) {
   graph.SetStart(graph.NewNode(builder.Start(0)));
   graph.SetEnd(graph.NewNode(builder.End(), graph.start()));
 
-  ZonePool zone_pool(scope.main_isolate());
-  USE(Scheduler::ComputeSchedule(&zone_pool, &graph));
+  USE(Scheduler::ComputeSchedule(scope.main_zone(), &graph));
 }
 
 
@@ -696,8 +678,7 @@ TEST(BuildScheduleOneParameter) {
 
   graph.SetEnd(graph.NewNode(builder.End(), ret));
 
-  ZonePool zone_pool(scope.main_isolate());
-  USE(Scheduler::ComputeSchedule(&zone_pool, &graph));
+  USE(Scheduler::ComputeSchedule(scope.main_zone(), &graph));
 }
 
 
@@ -1571,7 +1552,6 @@ TEST(BuildScheduleSimpleLoopWithCodeMotion) {
   Graph graph(scope.main_zone());
   CommonOperatorBuilder common_builder(scope.main_zone());
   JSOperatorBuilder js_builder(scope.main_zone());
-  MachineOperatorBuilder machine_builder;
   const Operator* op;
 
   Handle<HeapObject> object =
@@ -1607,7 +1587,7 @@ TEST(BuildScheduleSimpleLoopWithCodeMotion) {
   Node* n20 = graph.NewNode(op, nil, nil, nil, nil, nil);
   USE(n20);
   n20->ReplaceInput(0, n9);
-  op = machine_builder.Int32Add();
+  op = &kIntAdd;
   Node* n19 = graph.NewNode(op, nil, nil);
   USE(n19);
   op = common_builder.Phi(kMachAnyTagged, 2);
@@ -1731,7 +1711,6 @@ TEST(FloatingDiamond2) {
   HandleAndZoneScope scope;
   Graph graph(scope.main_zone());
   CommonOperatorBuilder common(scope.main_zone());
-  MachineOperatorBuilder machine;
 
   Node* start = graph.NewNode(common.Start(2));
   graph.SetStart(start);
@@ -1740,7 +1719,7 @@ TEST(FloatingDiamond2) {
   Node* p1 = graph.NewNode(common.Parameter(1), start);
   Node* d1 = CreateDiamond(&graph, &common, p0);
   Node* d2 = CreateDiamond(&graph, &common, p1);
-  Node* add = graph.NewNode(machine.Int32Add(), d1, d2);
+  Node* add = graph.NewNode(&kIntAdd, d1, d2);
   Node* ret = graph.NewNode(common.Return(), add, start, start);
   Node* end = graph.NewNode(common.End(), ret, start);
 
@@ -1754,7 +1733,6 @@ TEST(FloatingDiamond3) {
   HandleAndZoneScope scope;
   Graph graph(scope.main_zone());
   CommonOperatorBuilder common(scope.main_zone());
-  MachineOperatorBuilder machine;
 
   Node* start = graph.NewNode(common.Start(2));
   graph.SetStart(start);
@@ -1763,7 +1741,7 @@ TEST(FloatingDiamond3) {
   Node* p1 = graph.NewNode(common.Parameter(1), start);
   Node* d1 = CreateDiamond(&graph, &common, p0);
   Node* d2 = CreateDiamond(&graph, &common, p1);
-  Node* add = graph.NewNode(machine.Int32Add(), d1, d2);
+  Node* add = graph.NewNode(&kIntAdd, d1, d2);
   Node* d3 = CreateDiamond(&graph, &common, add);
   Node* ret = graph.NewNode(common.Return(), d3, start, start);
   Node* end = graph.NewNode(common.End(), ret, start);
@@ -1779,7 +1757,6 @@ TEST(NestedFloatingDiamonds) {
   Graph graph(scope.main_zone());
   CommonOperatorBuilder common(scope.main_zone());
   SimplifiedOperatorBuilder simplified(scope.main_zone());
-  MachineOperatorBuilder machine;
 
   Node* start = graph.NewNode(common.Start(2));
   graph.SetStart(start);
@@ -1816,12 +1793,94 @@ TEST(NestedFloatingDiamonds) {
 }
 
 
+TEST(NestedFloatingDiamondWithChain) {
+  HandleAndZoneScope scope;
+  Graph graph(scope.main_zone());
+  CommonOperatorBuilder common(scope.main_zone());
+
+  Node* start = graph.NewNode(common.Start(2));
+  graph.SetStart(start);
+
+  Node* p0 = graph.NewNode(common.Parameter(0), start);
+  Node* p1 = graph.NewNode(common.Parameter(1), start);
+  Node* c = graph.NewNode(common.Int32Constant(7));
+
+  Node* brA1 = graph.NewNode(common.Branch(), p0, graph.start());
+  Node* tA1 = graph.NewNode(common.IfTrue(), brA1);
+  Node* fA1 = graph.NewNode(common.IfFalse(), brA1);
+  Node* mA1 = graph.NewNode(common.Merge(2), tA1, fA1);
+  Node* phiA1 = graph.NewNode(common.Phi(kMachAnyTagged, 2), p0, p1, mA1);
+
+  Node* brB1 = graph.NewNode(common.Branch(), p1, graph.start());
+  Node* tB1 = graph.NewNode(common.IfTrue(), brB1);
+  Node* fB1 = graph.NewNode(common.IfFalse(), brB1);
+  Node* mB1 = graph.NewNode(common.Merge(2), tB1, fB1);
+  Node* phiB1 = graph.NewNode(common.Phi(kMachAnyTagged, 2), p0, p1, mB1);
+
+  Node* brA2 = graph.NewNode(common.Branch(), phiB1, mA1);
+  Node* tA2 = graph.NewNode(common.IfTrue(), brA2);
+  Node* fA2 = graph.NewNode(common.IfFalse(), brA2);
+  Node* mA2 = graph.NewNode(common.Merge(2), tA2, fA2);
+  Node* phiA2 = graph.NewNode(common.Phi(kMachAnyTagged, 2), phiB1, c, mA2);
+
+  Node* brB2 = graph.NewNode(common.Branch(), phiA1, mB1);
+  Node* tB2 = graph.NewNode(common.IfTrue(), brB2);
+  Node* fB2 = graph.NewNode(common.IfFalse(), brB2);
+  Node* mB2 = graph.NewNode(common.Merge(2), tB2, fB2);
+  Node* phiB2 = graph.NewNode(common.Phi(kMachAnyTagged, 2), phiA1, c, mB2);
+
+  Node* add = graph.NewNode(&kIntAdd, phiA2, phiB2);
+  Node* ret = graph.NewNode(common.Return(), add, start, start);
+  Node* end = graph.NewNode(common.End(), ret, start);
+
+  graph.SetEnd(end);
+
+  ComputeAndVerifySchedule(35, &graph);
+}
+
+
+TEST(NestedFloatingDiamondWithLoop) {
+  HandleAndZoneScope scope;
+  Graph graph(scope.main_zone());
+  CommonOperatorBuilder common(scope.main_zone());
+
+  Node* start = graph.NewNode(common.Start(2));
+  graph.SetStart(start);
+
+  Node* p0 = graph.NewNode(common.Parameter(0), start);
+
+  Node* fv = graph.NewNode(common.Int32Constant(7));
+  Node* br = graph.NewNode(common.Branch(), p0, graph.start());
+  Node* t = graph.NewNode(common.IfTrue(), br);
+  Node* f = graph.NewNode(common.IfFalse(), br);
+
+  Node* loop = graph.NewNode(common.Loop(2), f, start);
+  Node* ind = graph.NewNode(common.Phi(kMachAnyTagged, 2), p0, p0, loop);
+
+  Node* add = graph.NewNode(&kIntAdd, ind, fv);
+  Node* br1 = graph.NewNode(common.Branch(), add, loop);
+  Node* t1 = graph.NewNode(common.IfTrue(), br1);
+  Node* f1 = graph.NewNode(common.IfFalse(), br1);
+
+  loop->ReplaceInput(1, t1);  // close loop.
+  ind->ReplaceInput(1, ind);  // close induction variable.
+
+  Node* m = graph.NewNode(common.Merge(2), t, f1);
+  Node* phi = graph.NewNode(common.Phi(kMachAnyTagged, 2), fv, ind, m);
+
+  Node* ret = graph.NewNode(common.Return(), phi, start, start);
+  Node* end = graph.NewNode(common.End(), ret, start);
+
+  graph.SetEnd(end);
+
+  ComputeAndVerifySchedule(20, &graph);
+}
+
+
 TEST(LoopedFloatingDiamond1) {
   HandleAndZoneScope scope;
   Graph graph(scope.main_zone());
   CommonOperatorBuilder common(scope.main_zone());
-  SimplifiedOperatorBuilder simplified(scope.main_zone());
-  MachineOperatorBuilder machine;
 
   Node* start = graph.NewNode(common.Start(2));
   graph.SetStart(start);
@@ -1831,7 +1890,7 @@ TEST(LoopedFloatingDiamond1) {
   Node* c = graph.NewNode(common.Int32Constant(7));
   Node* loop = graph.NewNode(common.Loop(2), start, start);
   Node* ind = graph.NewNode(common.Phi(kMachAnyTagged, 2), p0, p0, loop);
-  Node* add = graph.NewNode(machine.IntAdd(), ind, c);
+  Node* add = graph.NewNode(&kIntAdd, ind, c);
 
   Node* br = graph.NewNode(common.Branch(), add, loop);
   Node* t = graph.NewNode(common.IfTrue(), br);
@@ -1859,8 +1918,6 @@ TEST(LoopedFloatingDiamond2) {
   HandleAndZoneScope scope;
   Graph graph(scope.main_zone());
   CommonOperatorBuilder common(scope.main_zone());
-  SimplifiedOperatorBuilder simplified(scope.main_zone());
-  MachineOperatorBuilder machine;
 
   Node* start = graph.NewNode(common.Start(2));
   graph.SetStart(start);
@@ -1877,7 +1934,7 @@ TEST(LoopedFloatingDiamond2) {
   Node* m1 = graph.NewNode(common.Merge(2), t1, f1);
   Node* phi1 = graph.NewNode(common.Phi(kMachAnyTagged, 2), c, ind, m1);
 
-  Node* add = graph.NewNode(machine.IntAdd(), ind, phi1);
+  Node* add = graph.NewNode(&kIntAdd, ind, phi1);
 
   Node* br = graph.NewNode(common.Branch(), add, loop);
   Node* t = graph.NewNode(common.IfTrue(), br);
@@ -1895,12 +1952,60 @@ TEST(LoopedFloatingDiamond2) {
 }
 
 
+TEST(LoopedFloatingDiamond3) {
+  HandleAndZoneScope scope;
+  Graph graph(scope.main_zone());
+  CommonOperatorBuilder common(scope.main_zone());
+
+  Node* start = graph.NewNode(common.Start(2));
+  graph.SetStart(start);
+
+  Node* p0 = graph.NewNode(common.Parameter(0), start);
+
+  Node* c = graph.NewNode(common.Int32Constant(7));
+  Node* loop = graph.NewNode(common.Loop(2), start, start);
+  Node* ind = graph.NewNode(common.Phi(kMachAnyTagged, 2), p0, p0, loop);
+
+  Node* br1 = graph.NewNode(common.Branch(), p0, graph.start());
+  Node* t1 = graph.NewNode(common.IfTrue(), br1);
+  Node* f1 = graph.NewNode(common.IfFalse(), br1);
+
+  Node* loop1 = graph.NewNode(common.Loop(2), t1, start);
+  Node* ind1 = graph.NewNode(common.Phi(kMachAnyTagged, 2), p0, p0, loop);
+
+  Node* add1 = graph.NewNode(&kIntAdd, ind1, c);
+  Node* br2 = graph.NewNode(common.Branch(), add1, loop1);
+  Node* t2 = graph.NewNode(common.IfTrue(), br2);
+  Node* f2 = graph.NewNode(common.IfFalse(), br2);
+
+  loop1->ReplaceInput(1, t2);   // close inner loop.
+  ind1->ReplaceInput(1, ind1);  // close inner induction variable.
+
+  Node* m1 = graph.NewNode(common.Merge(2), f1, f2);
+  Node* phi1 = graph.NewNode(common.Phi(kMachAnyTagged, 2), c, ind1, m1);
+
+  Node* add = graph.NewNode(&kIntAdd, ind, phi1);
+
+  Node* br = graph.NewNode(common.Branch(), add, loop);
+  Node* t = graph.NewNode(common.IfTrue(), br);
+  Node* f = graph.NewNode(common.IfFalse(), br);
+
+  loop->ReplaceInput(1, t);   // close loop.
+  ind->ReplaceInput(1, add);  // close induction variable.
+
+  Node* ret = graph.NewNode(common.Return(), ind, start, f);
+  Node* end = graph.NewNode(common.End(), ret, f);
+
+  graph.SetEnd(end);
+
+  ComputeAndVerifySchedule(28, &graph);
+}
+
+
 TEST(PhisPushedDownToDifferentBranches) {
   HandleAndZoneScope scope;
   Graph graph(scope.main_zone());
   CommonOperatorBuilder common(scope.main_zone());
-  SimplifiedOperatorBuilder simplified(scope.main_zone());
-  MachineOperatorBuilder machine;
 
   Node* start = graph.NewNode(common.Start(2));
   graph.SetStart(start);
index 5ddc10d..d463997 100644 (file)
@@ -7,7 +7,6 @@
 #include "src/compiler/access-builder.h"
 #include "src/compiler/change-lowering.h"
 #include "src/compiler/control-builders.h"
-#include "src/compiler/generic-node-inl.h"
 #include "src/compiler/graph-reducer.h"
 #include "src/compiler/graph-visualizer.h"
 #include "src/compiler/node-properties-inl.h"
@@ -63,7 +62,7 @@ class SimplifiedLoweringTester : public GraphBuilderTester<ReturnType> {
     Linkage linkage(
         zone, Linkage::GetSimplifiedCDescriptor(zone, this->machine_sig_));
     ChangeLowering lowering(&jsgraph, &linkage);
-    GraphReducer reducer(this->graph());
+    GraphReducer reducer(this->graph(), this->zone());
     reducer.AddReducer(&lowering);
     reducer.ReduceGraph();
     Verifier::Run(this->graph());
@@ -233,10 +232,8 @@ TEST(RunLoadStoreMap) {
 TEST(RunLoadStoreFixedArrayIndex) {
   SimplifiedLoweringTester<Object*> t(kMachAnyTagged);
   ElementAccess access = AccessBuilder::ForFixedArrayElement();
-  Node* load = t.LoadElement(access, t.Parameter(0), t.Int32Constant(0),
-                             t.Int32Constant(2));
-  t.StoreElement(access, t.Parameter(0), t.Int32Constant(1), t.Int32Constant(2),
-                 load);
+  Node* load = t.LoadElement(access, t.Parameter(0), t.Int32Constant(0));
+  t.StoreElement(access, t.Parameter(0), t.Int32Constant(1), load);
   t.Return(load);
 
   t.LowerAllNodes();
@@ -265,10 +262,9 @@ TEST(RunLoadStoreArrayBuffer) {
   Node* backing_store = t.LoadField(
       AccessBuilder::ForJSArrayBufferBackingStore(), t.Parameter(0));
   Node* load =
-      t.LoadElement(buffer_access, backing_store, t.Int32Constant(index),
-                    t.Int32Constant(array_length));
+      t.LoadElement(buffer_access, backing_store, t.Int32Constant(index));
   t.StoreElement(buffer_access, backing_store, t.Int32Constant(index + 1),
-                 t.Int32Constant(array_length), load);
+                 load);
   t.Return(t.jsgraph.TrueConstant());
 
   t.LowerAllNodes();
@@ -351,13 +347,12 @@ TEST(RunLoadElementFromUntaggedBase) {
   for (size_t i = 0; i < arraysize(smis); i++) {    // for header sizes
     for (size_t j = 0; (i + j) < arraysize(smis); j++) {  // for element index
       int offset = static_cast<int>(i * sizeof(Smi*));
-      ElementAccess access = {kNoBoundsCheck, kUntaggedBase, offset,
-                              Type::Integral32(), kMachAnyTagged};
+      ElementAccess access = {kUntaggedBase, offset, Type::Integral32(),
+                              kMachAnyTagged};
 
       SimplifiedLoweringTester<Object*> t;
-      Node* load = t.LoadElement(
-          access, t.PointerConstant(smis), t.Int32Constant(static_cast<int>(j)),
-          t.Int32Constant(static_cast<int>(arraysize(smis))));
+      Node* load = t.LoadElement(access, t.PointerConstant(smis),
+                                 t.Int32Constant(static_cast<int>(j)));
       t.Return(load);
       t.LowerAllNodes();
 
@@ -380,14 +375,13 @@ TEST(RunStoreElementFromUntaggedBase) {
   for (size_t i = 0; i < arraysize(smis); i++) {    // for header sizes
     for (size_t j = 0; (i + j) < arraysize(smis); j++) {  // for element index
       int offset = static_cast<int>(i * sizeof(Smi*));
-      ElementAccess access = {kNoBoundsCheck, kUntaggedBase, offset,
-                              Type::Integral32(), kMachAnyTagged};
+      ElementAccess access = {kUntaggedBase, offset, Type::Integral32(),
+                              kMachAnyTagged};
 
       SimplifiedLoweringTester<Object*> t(kMachAnyTagged);
       Node* p0 = t.Parameter(0);
       t.StoreElement(access, t.PointerConstant(smis),
-                     t.Int32Constant(static_cast<int>(j)),
-                     t.Int32Constant(static_cast<int>(arraysize(smis))), p0);
+                     t.Int32Constant(static_cast<int>(j)), p0);
       t.Return(p0);
       t.LowerAllNodes();
 
@@ -453,10 +447,8 @@ class AccessTester : public HandleAndZoneScope {
 
     SimplifiedLoweringTester<Object*> t;
     Node* ptr = GetBaseNode(&t);
-    Node* load = t.LoadElement(access, ptr, t.Int32Constant(from_index),
-                               t.Int32Constant(static_cast<int>(num_elements)));
-    t.StoreElement(access, ptr, t.Int32Constant(to_index),
-                   t.Int32Constant(static_cast<int>(num_elements)), load);
+    Node* load = t.LoadElement(access, ptr, t.Int32Constant(from_index));
+    t.StoreElement(access, ptr, t.Int32Constant(to_index), load);
     t.Return(t.jsgraph.TrueConstant());
     t.LowerAllNodes();
     t.GenerateCode();
@@ -543,9 +535,9 @@ class AccessTester : public HandleAndZoneScope {
 
  private:
   ElementAccess GetElementAccess() {
-    ElementAccess access = {
-        kNoBoundsCheck, tagged ? kTaggedBase : kUntaggedBase,
-        tagged ? FixedArrayBase::kHeaderSize : 0, Type::Any(), rep};
+    ElementAccess access = {tagged ? kTaggedBase : kUntaggedBase,
+                            tagged ? FixedArrayBase::kHeaderSize : 0,
+                            Type::Any(), rep};
     return access;
   }
 
@@ -744,6 +736,17 @@ class TestingGraph : public HandleAndZoneScope, public GraphAndBuilders {
     }
   }
 
+  Node* ExampleWithTypeAndRep(Type* type, MachineType mach_type) {
+    FieldAccess access = {kUntaggedBase, 0, Handle<Name>::null(), type,
+                          mach_type};
+    // TODO(titzer): using loads here just to force the representation is ugly.
+    Node* node = graph()->NewNode(simplified()->LoadField(access),
+                                  jsgraph.IntPtrConstant(0), graph()->start(),
+                                  graph()->start());
+    NodeProperties::SetBounds(node, Bounds(type));
+    return node;
+  }
+
   Node* Use(Node* node, MachineType type) {
     if (type & kTypeInt32) {
       return graph()->NewNode(machine()->Int32LessThan(), node,
@@ -757,6 +760,9 @@ class TestingGraph : public HandleAndZoneScope, public GraphAndBuilders {
     } else if (type & kRepWord64) {
       return graph()->NewNode(machine()->Int64LessThan(), node,
                               Int64Constant(1));
+    } else if (type & kRepWord32) {
+      return graph()->NewNode(machine()->Word32Equal(), node,
+                              jsgraph.Int32Constant(1));
     } else {
       return graph()->NewNode(simplified()->ReferenceEqual(Type::Any()), node,
                               jsgraph.TrueConstant());
@@ -1060,9 +1066,7 @@ TEST(LowerNumberToInt32_to_ChangeTaggedToInt32) {
 TEST(LowerNumberToInt32_to_TruncateFloat64ToInt32) {
   // NumberToInt32(x: kRepFloat64) used as kMachInt32
   TestingGraph t(Type::Number());
-  Node* p0 = t.ExampleWithOutput(kMachFloat64);
-  // TODO(titzer): run the typer here, or attach machine type to param.
-  NodeProperties::SetBounds(p0, Bounds(Type::Number()));
+  Node* p0 = t.ExampleWithTypeAndRep(Type::Number(), kMachFloat64);
   Node* trunc = t.graph()->NewNode(t.simplified()->NumberToInt32(), p0);
   Node* use = t.Use(trunc, kMachInt32);
   t.Return(use);
@@ -1086,17 +1090,6 @@ TEST(LowerNumberToInt32_to_TruncateFloat64ToInt32_with_change) {
 }
 
 
-TEST(LowerNumberToInt32_to_ChangeFloat64ToTagged) {
-  // TODO(titzer): NumberToInt32(x: kRepFloat64 | kTypeInt32) used as kRepTagged
-}
-
-
-TEST(LowerNumberToInt32_to_ChangeFloat64ToInt32) {
-  // TODO(titzer): NumberToInt32(x: kRepFloat64 | kTypeInt32) used as kRepWord32
-  // | kTypeInt32
-}
-
-
 TEST(LowerNumberToUint32_to_nop) {
   // NumberToUint32(x: kRepTagged | kTypeUint32) used as kRepTagged
   TestingGraph t(Type::Unsigned32());
@@ -1159,20 +1152,67 @@ TEST(LowerNumberToUint32_to_TruncateFloat64ToInt32_with_change) {
 }
 
 
-TEST(LowerNumberToUint32_to_ChangeFloat64ToTagged) {
-  // TODO(titzer): NumberToUint32(x: kRepFloat64 | kTypeUint32) used as
-  // kRepTagged
+TEST(LowerNumberToUint32_to_TruncateFloat64ToInt32_uint32) {
+  // NumberToUint32(x: kRepFloat64) used as kRepWord32
+  TestingGraph t(Type::Unsigned32());
+  Node* input = t.ExampleWithTypeAndRep(Type::Number(), kMachFloat64);
+  Node* trunc = t.graph()->NewNode(t.simplified()->NumberToUint32(), input);
+  Node* use = t.Use(trunc, kRepWord32);
+  t.Return(use);
+  t.Lower();
+  CheckChangeOf(IrOpcode::kTruncateFloat64ToInt32, input, use->InputAt(0));
 }
 
 
-TEST(LowerNumberToUint32_to_ChangeFloat64ToUint32) {
-  // TODO(titzer): NumberToUint32(x: kRepFloat64 | kTypeUint32) used as
-  // kRepWord32
+TEST(LowerNumberToUI32_of_Float64_used_as_word32) {
+  // NumberTo(Int,Uint)32(x: kRepFloat64 | kType(Int,Uint)32) used as
+  // kType(Int,Uint)32 | kRepWord32
+  Type* types[] = {Type::Signed32(), Type::Unsigned32()};
+  MachineType mach[] = {kTypeInt32, kTypeUint32, kMachNone};
+
+  for (int i = 0; i < 2; i++) {
+    for (int u = 0; u < 3; u++) {
+      TestingGraph t(types[i]);
+      Node* input = t.ExampleWithTypeAndRep(
+          types[i], static_cast<MachineType>(kRepFloat64 | mach[i]));
+      const Operator* op = i == 0 ? t.simplified()->NumberToInt32()
+                                  : t.simplified()->NumberToUint32();
+      Node* trunc = t.graph()->NewNode(op, input);
+      Node* use = t.Use(trunc, static_cast<MachineType>(kRepWord32 | mach[u]));
+      t.Return(use);
+      t.Lower();
+      IrOpcode::Value opcode = i == 0 ? IrOpcode::kChangeFloat64ToInt32
+                                      : IrOpcode::kChangeFloat64ToUint32;
+      CheckChangeOf(opcode, input, use->InputAt(0));
+    }
+  }
 }
 
 
-TEST(LowerNumberToUint32_to_TruncateFloat64ToUint32) {
-  // TODO(titzer): NumberToUint32(x: kRepFloat64) used as kRepWord32
+TEST(LowerNumberToUI32_of_Float64_used_as_tagged) {
+  // NumberTo(Int,Uint)32(x: kRepFloat64 | kType(Int,Uint)32) used as
+  // kType(Int,Uint)32 | kRepTagged
+  Type* types[] = {Type::Signed32(), Type::Unsigned32(), Type::Any()};
+  MachineType mach[] = {kTypeInt32, kTypeUint32, kMachNone};
+
+  for (int i = 0; i < 2; i++) {
+    for (int u = 0; u < 3; u++) {
+      TestingGraph t(types[i]);
+      Node* input = t.ExampleWithTypeAndRep(
+          types[i], static_cast<MachineType>(kRepFloat64 | mach[i]));
+      const Operator* op = i == 0 ? t.simplified()->NumberToInt32()
+                                  : t.simplified()->NumberToUint32();
+      Node* trunc = t.graph()->NewNode(op, input);
+      // TODO(titzer): we use the store here to force the representation.
+      FieldAccess access = {kTaggedBase, 0, Handle<Name>(), types[u],
+                            static_cast<MachineType>(mach[u] | kRepTagged)};
+      Node* store = t.graph()->NewNode(t.simplified()->StoreField(access), t.p0,
+                                       trunc, t.start, t.start);
+      t.Effect(store);
+      t.Lower();
+      CheckChangeOf(IrOpcode::kChangeFloat64ToTagged, input, store->InputAt(2));
+    }
+  }
 }
 
 
@@ -1416,13 +1456,11 @@ TEST(LowerLoadElement_to_load) {
   TestingGraph t(Type::Any(), Type::Signed32());
 
   for (size_t i = 0; i < arraysize(kMachineReps); i++) {
-    ElementAccess access = {kNoBoundsCheck, kTaggedBase,
-                            FixedArrayBase::kHeaderSize, Type::Any(),
-                            kMachineReps[i]};
+    ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
+                            Type::Any(), kMachineReps[i]};
 
-    Node* load =
-        t.graph()->NewNode(t.simplified()->LoadElement(access), t.p0, t.p1,
-                           t.jsgraph.Int32Constant(1024), t.start, t.start);
+    Node* load = t.graph()->NewNode(t.simplified()->LoadElement(access), t.p0,
+                                    t.p1, t.start, t.start);
     Node* use = t.Use(load, kMachineReps[i]);
     t.Return(use);
     t.Lower();
@@ -1440,14 +1478,12 @@ TEST(LowerStoreElement_to_store) {
   TestingGraph t(Type::Any(), Type::Signed32());
 
   for (size_t i = 0; i < arraysize(kMachineReps); i++) {
-    ElementAccess access = {kNoBoundsCheck, kTaggedBase,
-                            FixedArrayBase::kHeaderSize, Type::Any(),
-                            kMachineReps[i]};
+    ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize,
+                            Type::Any(), kMachineReps[i]};
 
     Node* val = t.ExampleWithOutput(kMachineReps[i]);
     Node* store = t.graph()->NewNode(t.simplified()->StoreElement(access), t.p0,
-                                     t.p1, t.jsgraph.Int32Constant(1024), val,
-                                     t.start, t.start);
+                                     t.p1, val, t.start, t.start);
     t.Effect(store);
     t.Lower();
     CHECK_EQ(IrOpcode::kStore, store->opcode());
@@ -1466,13 +1502,12 @@ TEST(LowerStoreElement_to_store) {
 TEST(InsertChangeForLoadElementIndex) {
   // LoadElement(obj: Tagged, index: kTypeInt32 | kRepTagged, length) =>
   //   Load(obj, Int32Add(Int32Mul(ChangeTaggedToInt32(index), #k), #k))
-  TestingGraph t(Type::Any(), Type::Signed32(), Type::Any());
-  ElementAccess access = {kNoBoundsCheck, kTaggedBase,
-                          FixedArrayBase::kHeaderSize, Type::Any(),
+  TestingGraph t(Type::Any(), Type::Signed32());
+  ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize, Type::Any(),
                           kMachAnyTagged};
 
   Node* load = t.graph()->NewNode(t.simplified()->LoadElement(access), t.p0,
-                                  t.p1, t.p2, t.start, t.start);
+                                  t.p1, t.start, t.start);
   t.Return(load);
   t.Lower();
   CHECK_EQ(IrOpcode::kLoad, load->opcode());
@@ -1486,13 +1521,12 @@ TEST(InsertChangeForLoadElementIndex) {
 TEST(InsertChangeForStoreElementIndex) {
   // StoreElement(obj: Tagged, index: kTypeInt32 | kRepTagged, length, val) =>
   //   Store(obj, Int32Add(Int32Mul(ChangeTaggedToInt32(index), #k), #k), val)
-  TestingGraph t(Type::Any(), Type::Signed32(), Type::Any());
-  ElementAccess access = {kNoBoundsCheck, kTaggedBase,
-                          FixedArrayBase::kHeaderSize, Type::Any(),
+  TestingGraph t(Type::Any(), Type::Signed32());
+  ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize, Type::Any(),
                           kMachAnyTagged};
 
   Node* store =
-      t.graph()->NewNode(t.simplified()->StoreElement(access), t.p0, t.p1, t.p2,
+      t.graph()->NewNode(t.simplified()->StoreElement(access), t.p0, t.p1,
                          t.jsgraph.TrueConstant(), t.start, t.start);
   t.Effect(store);
   t.Lower();
@@ -1507,12 +1541,11 @@ TEST(InsertChangeForStoreElementIndex) {
 TEST(InsertChangeForLoadElement) {
   // TODO(titzer): test all load/store representation change insertions.
   TestingGraph t(Type::Any(), Type::Signed32(), Type::Any());
-  ElementAccess access = {kNoBoundsCheck, kTaggedBase,
-                          FixedArrayBase::kHeaderSize, Type::Any(),
+  ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize, Type::Any(),
                           kMachFloat64};
 
   Node* load = t.graph()->NewNode(t.simplified()->LoadElement(access), t.p0,
-                                  t.p1, t.p1, t.start, t.start);
+                                  t.p1, t.start, t.start);
   t.Return(load);
   t.Lower();
   CHECK_EQ(IrOpcode::kLoad, load->opcode());
@@ -1539,14 +1572,13 @@ TEST(InsertChangeForLoadField) {
 
 TEST(InsertChangeForStoreElement) {
   // TODO(titzer): test all load/store representation change insertions.
-  TestingGraph t(Type::Any(), Type::Signed32(), Type::Any());
-  ElementAccess access = {kNoBoundsCheck, kTaggedBase,
-                          FixedArrayBase::kHeaderSize, Type::Any(),
+  TestingGraph t(Type::Any(), Type::Signed32());
+  ElementAccess access = {kTaggedBase, FixedArrayBase::kHeaderSize, Type::Any(),
                           kMachFloat64};
 
-  Node* store = t.graph()->NewNode(t.simplified()->StoreElement(access), t.p0,
-                                   t.jsgraph.Int32Constant(0), t.p2, t.p1,
-                                   t.start, t.start);
+  Node* store =
+      t.graph()->NewNode(t.simplified()->StoreElement(access), t.p0,
+                         t.jsgraph.Int32Constant(0), t.p1, t.start, t.start);
   t.Effect(store);
   t.Lower();
 
index 2b9d91a..5f7f55a 100644 (file)
@@ -5,6 +5,7 @@
 #include <functional>
 
 #include "src/codegen.h"
+#include "src/compiler/js-operator.h"
 #include "src/compiler/node-properties-inl.h"
 #include "src/compiler/typer.h"
 #include "test/cctest/cctest.h"
index 5bf61c8..5f452ea 100644 (file)
@@ -38,6 +38,7 @@ using ::v8::ObjectTemplate;
 using ::v8::Value;
 using ::v8::Context;
 using ::v8::Local;
+using ::v8::Name;
 using ::v8::String;
 using ::v8::Script;
 using ::v8::Function;
@@ -513,7 +514,7 @@ void JSONStringifyEnumerator(const v8::PropertyCallbackInfo<v8::Array>& info) {
 }
 
 
-void JSONStringifyGetter(Local<String> name,
+void JSONStringifyGetter(Local<Name> name,
                          const v8::PropertyCallbackInfo<v8::Value>& info) {
   info.GetReturnValue().Set(v8_str("crbug-161028"));
 }
@@ -525,8 +526,8 @@ THREADED_TEST(JSONStringifyNamedInterceptorObject) {
   v8::HandleScope scope(isolate);
 
   v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New(isolate);
-  obj->SetNamedPropertyHandler(
-      JSONStringifyGetter, NULL, NULL, NULL, JSONStringifyEnumerator);
+  obj->SetHandler(v8::NamedPropertyHandlerConfiguration(
+      JSONStringifyGetter, NULL, NULL, NULL, JSONStringifyEnumerator));
   env->Global()->Set(v8_str("obj"), obj->NewInstance());
   v8::Handle<v8::String> expected = v8_str("{\"regress\":\"crbug-161028\"}");
   CHECK(CompileRun("JSON.stringify(obj)")->Equals(expected));
@@ -577,3 +578,30 @@ THREADED_TEST(GlobalObjectAccessor) {
   CHECK(v8::Utils::OpenHandle(*CompileRun("getter()"))->IsJSGlobalProxy());
   CHECK(v8::Utils::OpenHandle(*CompileRun("set_value"))->IsJSGlobalProxy());
 }
+
+
+static void EmptyGetter(Local<Name> name,
+                        const v8::PropertyCallbackInfo<v8::Value>& info) {
+  ApiTestFuzzer::Fuzz();
+}
+
+
+static void OneProperty(Local<String> name,
+                        const v8::PropertyCallbackInfo<v8::Value>& info) {
+  ApiTestFuzzer::Fuzz();
+  info.GetReturnValue().Set(v8_num(1));
+}
+
+
+THREADED_TEST(Regress433458) {
+  LocalContext env;
+  v8::Isolate* isolate = env->GetIsolate();
+  v8::HandleScope scope(isolate);
+  v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New(isolate);
+  obj->SetHandler(v8::NamedPropertyHandlerConfiguration(EmptyGetter));
+  obj->SetNativeDataProperty(v8_str("prop"), OneProperty);
+  env->Global()->Set(v8_str("obj"), obj->NewInstance());
+  CompileRun(
+      "Object.defineProperty(obj, 'prop', { writable: false });"
+      "Object.defineProperty(obj, 'prop', { writable: true });");
+}
index 5fa2a2f..5c8584d 100644 (file)
@@ -185,7 +185,8 @@ THREADED_TEST(IsolateOfContext) {
 }
 
 
-static void TestSignature(const char* loop_js, Local<Value> receiver) {
+static void TestSignature(const char* loop_js, Local<Value> receiver,
+                          v8::Isolate* isolate) {
   i::ScopedVector<char> source(200);
   i::SNPrintF(source,
               "for (var i = 0; i < 10; i++) {"
@@ -202,7 +203,7 @@ static void TestSignature(const char* loop_js, Local<Value> receiver) {
     CHECK_EQ(10, signature_callback_count);
   } else {
     CHECK_EQ(v8_str("TypeError: Illegal invocation"),
-             try_catch.Exception()->ToString());
+             try_catch.Exception()->ToString(isolate));
   }
 }
 
@@ -267,17 +268,17 @@ THREADED_TEST(ReceiverSignature) {
     i::SNPrintF(
         source, "var test_object = %s; test_object", test_objects[i]);
     Local<Value> test_object = CompileRun(source.start());
-    TestSignature("test_object.prop();", test_object);
-    TestSignature("test_object.accessor;", test_object);
-    TestSignature("test_object[accessor_key];", test_object);
-    TestSignature("test_object.accessor = 1;", test_object);
-    TestSignature("test_object[accessor_key] = 1;", test_object);
+    TestSignature("test_object.prop();", test_object, isolate);
+    TestSignature("test_object.accessor;", test_object, isolate);
+    TestSignature("test_object[accessor_key];", test_object, isolate);
+    TestSignature("test_object.accessor = 1;", test_object, isolate);
+    TestSignature("test_object[accessor_key] = 1;", test_object, isolate);
     if (i >= bad_signature_start_offset) test_object = Local<Value>();
-    TestSignature("test_object.prop_sig();", test_object);
-    TestSignature("test_object.accessor_sig;", test_object);
-    TestSignature("test_object[accessor_sig_key];", test_object);
-    TestSignature("test_object.accessor_sig = 1;", test_object);
-    TestSignature("test_object[accessor_sig_key] = 1;", test_object);
+    TestSignature("test_object.prop_sig();", test_object, isolate);
+    TestSignature("test_object.accessor_sig;", test_object, isolate);
+    TestSignature("test_object[accessor_sig_key];", test_object, isolate);
+    TestSignature("test_object.accessor_sig = 1;", test_object, isolate);
+    TestSignature("test_object[accessor_sig_key] = 1;", test_object, isolate);
   }
 }
 
@@ -356,7 +357,7 @@ THREADED_TEST(HulIgennem) {
   v8::Isolate* isolate = env->GetIsolate();
   v8::HandleScope scope(isolate);
   v8::Handle<v8::Primitive> undef = v8::Undefined(isolate);
-  Local<String> undef_str = undef->ToString();
+  Local<String> undef_str = undef->ToString(isolate);
   char* value = i::NewArray<char>(undef_str->Utf8Length() + 1);
   undef_str->WriteUtf8(value);
   CHECK_EQ(0, strcmp(value, "undefined"));
@@ -742,23 +743,28 @@ THREADED_TEST(UsingExternalOneByteString) {
 }
 
 
-class DummyResource : public v8::String::ExternalStringResource {
+class RandomLengthResource : public v8::String::ExternalStringResource {
  public:
+  explicit RandomLengthResource(int length) : length_(length) {}
   virtual const uint16_t* data() const { return string_; }
-  virtual size_t length() const { return 1 << 30; }
+  virtual size_t length() const { return length_; }
 
  private:
   uint16_t string_[10];
+  int length_;
 };
 
 
-class DummyOneByteResource : public v8::String::ExternalOneByteStringResource {
+class RandomLengthOneByteResource
+    : public v8::String::ExternalOneByteStringResource {
  public:
+  explicit RandomLengthOneByteResource(int length) : length_(length) {}
   virtual const char* data() const { return string_; }
-  virtual size_t length() const { return 1 << 30; }
+  virtual size_t length() const { return length_; }
 
  private:
   char string_[10];
+  int length_;
 };
 
 
@@ -767,7 +773,7 @@ THREADED_TEST(NewExternalForVeryLongString) {
     LocalContext env;
     v8::HandleScope scope(env->GetIsolate());
     v8::TryCatch try_catch;
-    DummyOneByteResource r;
+    RandomLengthOneByteResource r(1 << 30);
     v8::Local<v8::String> str = v8::String::NewExternal(CcTest::isolate(), &r);
     CHECK(str.IsEmpty());
     CHECK(try_catch.HasCaught());
@@ -779,7 +785,7 @@ THREADED_TEST(NewExternalForVeryLongString) {
     LocalContext env;
     v8::HandleScope scope(env->GetIsolate());
     v8::TryCatch try_catch;
-    DummyResource r;
+    RandomLengthResource r(1 << 30);
     v8::Local<v8::String> str = v8::String::NewExternal(CcTest::isolate(), &r);
     CHECK(str.IsEmpty());
     CHECK(try_catch.HasCaught());
@@ -1228,7 +1234,8 @@ Handle<Value> TestFastReturnValues() {
 
 THREADED_PROFILED_TEST(FastReturnValues) {
   LocalContext env;
-  v8::HandleScope scope(CcTest::isolate());
+  v8::Isolate* isolate = env->GetIsolate();
+  v8::HandleScope scope(isolate);
   v8::Handle<v8::Value> value;
   // check int32_t and uint32_t
   int32_t int_values[] = {
@@ -1253,13 +1260,13 @@ THREADED_PROFILED_TEST(FastReturnValues) {
   // check double
   value = TestFastReturnValues<double>();
   CHECK(value->IsNumber());
-  CHECK_EQ(kFastReturnValueDouble, value->ToNumber()->Value());
+  CHECK_EQ(kFastReturnValueDouble, value->ToNumber(isolate)->Value());
   // check bool values
   for (int i = 0; i < 2; i++) {
     fast_return_value_bool = i == 0;
     value = TestFastReturnValues<bool>();
     CHECK(value->IsBoolean());
-    CHECK_EQ(fast_return_value_bool, value->ToBoolean()->Value());
+    CHECK_EQ(fast_return_value_bool, value->ToBoolean(isolate)->Value());
   }
   // check oddballs
   ReturnValueOddball oddballs[] = {
@@ -1979,7 +1986,7 @@ THREADED_TEST(DescriptorInheritance) {
 int echo_named_call_count;
 
 
-static void EchoNamedProperty(Local<String> name,
+static void EchoNamedProperty(Local<Name> name,
                               const v8::PropertyCallbackInfo<v8::Value>& info) {
   ApiTestFuzzer::Fuzz();
   CHECK_EQ(v8_str("data"), info.Data());
@@ -2034,18 +2041,28 @@ static void ThrowingSymbolAccessorGetter(
   info.GetReturnValue().Set(info.GetIsolate()->ThrowException(name));
 }
 
-void EmptyInterceptorGetter(Local<String> name,
-                            const v8::PropertyCallbackInfo<v8::Value>& info) {
-}
 
-void EmptyInterceptorSetter(Local<String> name,
-                            Local<Value> value,
-                            const v8::PropertyCallbackInfo<v8::Value>& info) {
-}
+void EmptyInterceptorGetter(Local<Name> name,
+                            const v8::PropertyCallbackInfo<v8::Value>& info) {}
 
-void InterceptorGetter(Local<String> name,
-                       const v8::PropertyCallbackInfo<v8::Value>& info) {
-  // Intercept names that start with 'interceptor_'.
+
+void EmptyInterceptorSetter(Local<Name> name, Local<Value> value,
+                            const v8::PropertyCallbackInfo<v8::Value>& info) {}
+
+
+void EmptyGenericInterceptorGetter(
+    Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {}
+
+
+void EmptyGenericInterceptorSetter(
+    Local<Name> name, Local<Value> value,
+    const v8::PropertyCallbackInfo<v8::Value>& info) {}
+
+
+void StringInterceptorGetter(
+    Local<String> name,
+    const v8::PropertyCallbackInfo<v8::Value>&
+        info) {  // Intercept names that start with 'interceptor_'.
   String::Utf8Value utf8(name);
   char* name_str = *utf8;
   char prefix[] = "interceptor_";
@@ -2057,9 +2074,9 @@ void InterceptorGetter(Local<String> name,
   info.GetReturnValue().Set(self->GetHiddenValue(v8_str(name_str + i)));
 }
 
-void InterceptorSetter(Local<String> name,
-                       Local<Value> value,
-                       const v8::PropertyCallbackInfo<v8::Value>& info) {
+
+void StringInterceptorSetter(Local<String> name, Local<Value> value,
+                             const v8::PropertyCallbackInfo<v8::Value>& info) {
   // Intercept accesses that set certain integer values, for which the name does
   // not start with 'accessor_'.
   String::Utf8Value utf8(name);
@@ -2078,6 +2095,57 @@ void InterceptorSetter(Local<String> name,
   }
 }
 
+void InterceptorGetter(Local<Name> generic_name,
+                       const v8::PropertyCallbackInfo<v8::Value>& info) {
+  if (generic_name->IsSymbol()) return;
+  StringInterceptorGetter(Local<String>::Cast(generic_name), info);
+}
+
+void InterceptorSetter(Local<Name> generic_name, Local<Value> value,
+                       const v8::PropertyCallbackInfo<v8::Value>& info) {
+  if (generic_name->IsSymbol()) return;
+  StringInterceptorSetter(Local<String>::Cast(generic_name), value, info);
+}
+
+void GenericInterceptorGetter(Local<Name> generic_name,
+                              const v8::PropertyCallbackInfo<v8::Value>& info) {
+  Local<String> str;
+  if (generic_name->IsSymbol()) {
+    Local<Value> name = Local<Symbol>::Cast(generic_name)->Name();
+    if (name->IsUndefined()) return;
+    str = String::Concat(v8_str("_sym_"), Local<String>::Cast(name));
+  } else {
+    Local<String> name = Local<String>::Cast(generic_name);
+    String::Utf8Value utf8(name);
+    char* name_str = *utf8;
+    if (*name_str == '_') return;
+    str = String::Concat(v8_str("_str_"), name);
+  }
+
+  Handle<Object> self = Handle<Object>::Cast(info.This());
+  info.GetReturnValue().Set(self->Get(str));
+}
+
+void GenericInterceptorSetter(Local<Name> generic_name, Local<Value> value,
+                              const v8::PropertyCallbackInfo<v8::Value>& info) {
+  Local<String> str;
+  if (generic_name->IsSymbol()) {
+    Local<Value> name = Local<Symbol>::Cast(generic_name)->Name();
+    if (name->IsUndefined()) return;
+    str = String::Concat(v8_str("_sym_"), Local<String>::Cast(name));
+  } else {
+    Local<String> name = Local<String>::Cast(generic_name);
+    String::Utf8Value utf8(name);
+    char* name_str = *utf8;
+    if (*name_str == '_') return;
+    str = String::Concat(v8_str("_str_"), name);
+  }
+
+  Handle<Object> self = Handle<Object>::Cast(info.This());
+  self->Set(str, value);
+  info.GetReturnValue().Set(value);
+}
+
 void AddAccessor(Handle<FunctionTemplate> templ,
                  Handle<String> name,
                  v8::AccessorGetterCallback getter,
@@ -2099,6 +2167,13 @@ void AddAccessor(Handle<FunctionTemplate> templ,
   templ->PrototypeTemplate()->SetAccessor(name, getter, setter);
 }
 
+void AddInterceptor(Handle<FunctionTemplate> templ,
+                    v8::GenericNamedPropertyGetterCallback getter,
+                    v8::GenericNamedPropertySetterCallback setter) {
+  templ->InstanceTemplate()->SetHandler(
+      v8::NamedPropertyHandlerConfiguration(getter, setter));
+}
+
 
 THREADED_TEST(EmptyInterceptorDoesNotShadowAccessors) {
   v8::HandleScope scope(CcTest::isolate());
@@ -2118,6 +2193,62 @@ THREADED_TEST(EmptyInterceptorDoesNotShadowAccessors) {
 }
 
 
+THREADED_TEST(LegacyInterceptorDoesNotSeeSymbols) {
+  LocalContext env;
+  v8::Isolate* isolate = CcTest::isolate();
+  v8::HandleScope scope(isolate);
+  Handle<FunctionTemplate> parent = FunctionTemplate::New(isolate);
+  Handle<FunctionTemplate> child = FunctionTemplate::New(isolate);
+  v8::Local<v8::Symbol> age = v8::Symbol::New(isolate, v8_str("age"));
+
+  child->Inherit(parent);
+  AddAccessor(parent, age, SymbolAccessorGetter, SymbolAccessorSetter);
+  AddInterceptor(child, StringInterceptorGetter, StringInterceptorSetter);
+
+  env->Global()->Set(v8_str("Child"), child->GetFunction());
+  env->Global()->Set(v8_str("age"), age);
+  CompileRun(
+      "var child = new Child;"
+      "child[age] = 10;");
+  ExpectInt32("child[age]", 10);
+  ExpectBoolean("child.hasOwnProperty('age')", false);
+  ExpectBoolean("child.hasOwnProperty('accessor_age')", true);
+}
+
+
+THREADED_TEST(GenericInterceptorDoesSeeSymbols) {
+  LocalContext env;
+  v8::Isolate* isolate = CcTest::isolate();
+  v8::HandleScope scope(isolate);
+  Handle<FunctionTemplate> parent = FunctionTemplate::New(isolate);
+  Handle<FunctionTemplate> child = FunctionTemplate::New(isolate);
+  v8::Local<v8::Symbol> age = v8::Symbol::New(isolate, v8_str("age"));
+  v8::Local<v8::Symbol> anon = v8::Symbol::New(isolate);
+
+  child->Inherit(parent);
+  AddAccessor(parent, age, SymbolAccessorGetter, SymbolAccessorSetter);
+  AddInterceptor(child, GenericInterceptorGetter, GenericInterceptorSetter);
+
+  env->Global()->Set(v8_str("Child"), child->GetFunction());
+  env->Global()->Set(v8_str("age"), age);
+  env->Global()->Set(v8_str("anon"), anon);
+  CompileRun(
+      "var child = new Child;"
+      "child[age] = 10;");
+  ExpectInt32("child[age]", 10);
+  ExpectInt32("child._sym_age", 10);
+
+  // Check that it also sees strings.
+  CompileRun("child.foo = 47");
+  ExpectInt32("child.foo", 47);
+  ExpectInt32("child._str_foo", 47);
+
+  // Check that the interceptor can punt (in this case, on anonymous symbols).
+  CompileRun("child[anon] = 31337");
+  ExpectInt32("child[anon]", 31337);
+}
+
+
 THREADED_TEST(ExecutableAccessorIsPreservedOnAttributeChange) {
   v8::Isolate* isolate = CcTest::isolate();
   v8::HandleScope scope(isolate);
@@ -2363,9 +2494,8 @@ THREADED_TEST(NamedPropertyHandlerGetter) {
   v8::HandleScope scope(CcTest::isolate());
   v8::Handle<v8::FunctionTemplate> templ =
       v8::FunctionTemplate::New(CcTest::isolate());
-  templ->InstanceTemplate()->SetNamedPropertyHandler(EchoNamedProperty,
-                                                     0, 0, 0, 0,
-                                                     v8_str("data"));
+  templ->InstanceTemplate()->SetHandler(v8::NamedPropertyHandlerConfiguration(
+      EchoNamedProperty, 0, 0, 0, 0, v8_str("data")));
   LocalContext env;
   env->Global()->Set(v8_str("obj"),
                      templ->GetFunction()->NewInstance());
@@ -2400,9 +2530,8 @@ THREADED_TEST(IndexedPropertyHandlerGetter) {
   v8::Isolate* isolate = CcTest::isolate();
   v8::HandleScope scope(isolate);
   v8::Handle<v8::FunctionTemplate> templ = v8::FunctionTemplate::New(isolate);
-  templ->InstanceTemplate()->SetIndexedPropertyHandler(EchoIndexedProperty,
-                                                       0, 0, 0, 0,
-                                                       v8_num(637));
+  templ->InstanceTemplate()->SetHandler(v8::IndexedPropertyHandlerConfiguration(
+      EchoIndexedProperty, 0, 0, 0, 0, v8_num(637)));
   LocalContext env;
   env->Global()->Set(v8_str("obj"),
                      templ->GetFunction()->NewInstance());
@@ -2422,8 +2551,7 @@ static void CheckThisIndexedPropertyHandler(
 }
 
 static void CheckThisNamedPropertyHandler(
-    Local<String> name,
-    const v8::PropertyCallbackInfo<v8::Value>& info) {
+    Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
   CheckReturnValue(info, FUNCTION_ADDR(CheckThisNamedPropertyHandler));
   ApiTestFuzzer::Fuzz();
   CHECK(info.This()->Equals(bottom));
@@ -2440,8 +2568,7 @@ void CheckThisIndexedPropertySetter(
 
 
 void CheckThisNamedPropertySetter(
-    Local<String> property,
-    Local<Value> value,
+    Local<Name> property, Local<Value> value,
     const v8::PropertyCallbackInfo<v8::Value>& info) {
   CheckReturnValue(info, FUNCTION_ADDR(CheckThisNamedPropertySetter));
   ApiTestFuzzer::Fuzz();
@@ -2458,8 +2585,7 @@ void CheckThisIndexedPropertyQuery(
 
 
 void CheckThisNamedPropertyQuery(
-    Local<String> property,
-    const v8::PropertyCallbackInfo<v8::Integer>& info) {
+    Local<Name> property, const v8::PropertyCallbackInfo<v8::Integer>& info) {
   CheckReturnValue(info, FUNCTION_ADDR(CheckThisNamedPropertyQuery));
   ApiTestFuzzer::Fuzz();
   CHECK(info.This()->Equals(bottom));
@@ -2476,8 +2602,7 @@ void CheckThisIndexedPropertyDeleter(
 
 
 void CheckThisNamedPropertyDeleter(
-    Local<String> property,
-    const v8::PropertyCallbackInfo<v8::Boolean>& info) {
+    Local<Name> property, const v8::PropertyCallbackInfo<v8::Boolean>& info) {
   CheckReturnValue(info, FUNCTION_ADDR(CheckThisNamedPropertyDeleter));
   ApiTestFuzzer::Fuzz();
   CHECK(info.This()->Equals(bottom));
@@ -2507,19 +2632,15 @@ THREADED_PROFILED_TEST(PropertyHandlerInPrototype) {
 
   // Set up a prototype chain with three interceptors.
   v8::Handle<v8::FunctionTemplate> templ = v8::FunctionTemplate::New(isolate);
-  templ->InstanceTemplate()->SetIndexedPropertyHandler(
-      CheckThisIndexedPropertyHandler,
-      CheckThisIndexedPropertySetter,
-      CheckThisIndexedPropertyQuery,
-      CheckThisIndexedPropertyDeleter,
-      CheckThisIndexedPropertyEnumerator);
-
-  templ->InstanceTemplate()->SetNamedPropertyHandler(
-      CheckThisNamedPropertyHandler,
-      CheckThisNamedPropertySetter,
-      CheckThisNamedPropertyQuery,
-      CheckThisNamedPropertyDeleter,
-      CheckThisNamedPropertyEnumerator);
+  templ->InstanceTemplate()->SetHandler(v8::IndexedPropertyHandlerConfiguration(
+      CheckThisIndexedPropertyHandler, CheckThisIndexedPropertySetter,
+      CheckThisIndexedPropertyQuery, CheckThisIndexedPropertyDeleter,
+      CheckThisIndexedPropertyEnumerator));
+
+  templ->InstanceTemplate()->SetHandler(v8::NamedPropertyHandlerConfiguration(
+      CheckThisNamedPropertyHandler, CheckThisNamedPropertySetter,
+      CheckThisNamedPropertyQuery, CheckThisNamedPropertyDeleter,
+      CheckThisNamedPropertyEnumerator));
 
   bottom = templ->GetFunction()->NewInstance();
   Local<v8::Object> top = templ->GetFunction()->NewInstance();
@@ -2551,8 +2672,7 @@ THREADED_PROFILED_TEST(PropertyHandlerInPrototype) {
 
 
 static void PrePropertyHandlerGet(
-    Local<String> key,
-    const v8::PropertyCallbackInfo<v8::Value>& info) {
+    Local<Name> key, const v8::PropertyCallbackInfo<v8::Value>& info) {
   ApiTestFuzzer::Fuzz();
   if (v8_str("pre")->Equals(key)) {
     info.GetReturnValue().Set(v8_str("PrePropertyHandler: pre"));
@@ -2561,8 +2681,7 @@ static void PrePropertyHandlerGet(
 
 
 static void PrePropertyHandlerQuery(
-    Local<String> key,
-    const v8::PropertyCallbackInfo<v8::Integer>& info) {
+    Local<Name> key, const v8::PropertyCallbackInfo<v8::Integer>& info) {
   if (v8_str("pre")->Equals(key)) {
     info.GetReturnValue().Set(static_cast<int32_t>(v8::None));
   }
@@ -2573,9 +2692,8 @@ THREADED_TEST(PrePropertyHandler) {
   v8::Isolate* isolate = CcTest::isolate();
   v8::HandleScope scope(isolate);
   v8::Handle<v8::FunctionTemplate> desc = v8::FunctionTemplate::New(isolate);
-  desc->InstanceTemplate()->SetNamedPropertyHandler(PrePropertyHandlerGet,
-                                                    0,
-                                                    PrePropertyHandlerQuery);
+  desc->InstanceTemplate()->SetHandler(v8::NamedPropertyHandlerConfiguration(
+      PrePropertyHandlerGet, 0, PrePropertyHandlerQuery));
   LocalContext env(NULL, desc->InstanceTemplate());
   CompileRun("var pre = 'Object: pre'; var on = 'Object: on';");
   v8::Handle<Value> result_pre = CompileRun("pre");
@@ -2648,16 +2766,17 @@ THREADED_TEST(DeepCrossLanguageRecursion) {
 
 
 static void ThrowingPropertyHandlerGet(
-    Local<String> key,
-    const v8::PropertyCallbackInfo<v8::Value>& info) {
+    Local<Name> key, const v8::PropertyCallbackInfo<v8::Value>& info) {
+  // Since this interceptor is used on "with" objects, the runtime will look up
+  // @@unscopables.  Punt.
+  if (key->IsSymbol()) return;
   ApiTestFuzzer::Fuzz();
   info.GetReturnValue().Set(info.GetIsolate()->ThrowException(key));
 }
 
 
 static void ThrowingPropertyHandlerSet(
-    Local<String> key,
-    Local<Value>,
+    Local<Name> key, Local<Value>,
     const v8::PropertyCallbackInfo<v8::Value>& info) {
   info.GetIsolate()->ThrowException(key);
   info.GetReturnValue().SetUndefined();  // not the same as empty handle
@@ -2668,8 +2787,8 @@ THREADED_TEST(CallbackExceptionRegression) {
   v8::Isolate* isolate = CcTest::isolate();
   v8::HandleScope scope(isolate);
   v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New(isolate);
-  obj->SetNamedPropertyHandler(ThrowingPropertyHandlerGet,
-                               ThrowingPropertyHandlerSet);
+  obj->SetHandler(v8::NamedPropertyHandlerConfiguration(
+      ThrowingPropertyHandlerGet, ThrowingPropertyHandlerSet));
   LocalContext env;
   env->Global()->Set(v8_str("obj"), obj->NewInstance());
   v8::Handle<Value> otto = CompileRun(
@@ -2911,6 +3030,53 @@ THREADED_TEST(GlobalProxyIdentityHash) {
 }
 
 
+TEST(SymbolIdentityHash) {
+  LocalContext env;
+  v8::Isolate* isolate = env->GetIsolate();
+  v8::HandleScope scope(isolate);
+
+  {
+    Local<v8::Symbol> symbol = v8::Symbol::New(isolate);
+    int hash = symbol->GetIdentityHash();
+    int hash1 = symbol->GetIdentityHash();
+    CHECK_EQ(hash, hash1);
+    CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
+    int hash3 = symbol->GetIdentityHash();
+    CHECK_EQ(hash, hash3);
+  }
+
+  {
+    v8::Handle<v8::Symbol> js_symbol =
+        CompileRun("Symbol('foo')").As<v8::Symbol>();
+    int hash = js_symbol->GetIdentityHash();
+    int hash1 = js_symbol->GetIdentityHash();
+    CHECK_EQ(hash, hash1);
+    CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
+    int hash3 = js_symbol->GetIdentityHash();
+    CHECK_EQ(hash, hash3);
+  }
+}
+
+
+TEST(StringIdentityHash) {
+  LocalContext env;
+  v8::Isolate* isolate = env->GetIsolate();
+  v8::HandleScope scope(isolate);
+
+  Local<v8::String> str = v8::String::NewFromUtf8(isolate, "str1");
+  int hash = str->GetIdentityHash();
+  int hash1 = str->GetIdentityHash();
+  CHECK_EQ(hash, hash1);
+  CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
+  int hash3 = str->GetIdentityHash();
+  CHECK_EQ(hash, hash3);
+
+  Local<v8::String> str2 = v8::String::NewFromUtf8(isolate, "str1");
+  int hash4 = str2->GetIdentityHash();
+  CHECK_EQ(hash, hash4);
+}
+
+
 THREADED_TEST(SymbolProperties) {
   LocalContext env;
   v8::Isolate* isolate = env->GetIsolate();
@@ -3527,7 +3693,7 @@ THREADED_TEST(Regress97784) {
 
 static bool interceptor_for_hidden_properties_called;
 static void InterceptorForHiddenProperties(
-    Local<String> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
+    Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
   interceptor_for_hidden_properties_called = true;
 }
 
@@ -3544,7 +3710,8 @@ THREADED_TEST(HiddenPropertiesWithInterceptors) {
   // Associate an interceptor with an object and start setting hidden values.
   Local<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New(isolate);
   Local<v8::ObjectTemplate> instance_templ = fun_templ->InstanceTemplate();
-  instance_templ->SetNamedPropertyHandler(InterceptorForHiddenProperties);
+  instance_templ->SetHandler(
+      v8::NamedPropertyHandlerConfiguration(InterceptorForHiddenProperties));
   Local<v8::Function> function = fun_templ->GetFunction();
   Local<v8::Object> obj = function->NewInstance();
   CHECK(obj->SetHiddenValue(key, v8::Integer::New(isolate, 2302)));
@@ -4260,6 +4427,45 @@ THREADED_TEST(ApiObjectGroupsCycle) {
 }
 
 
+THREADED_TEST(WeakRootsSurviveTwoRoundsOfGC) {
+  LocalContext env;
+  v8::Isolate* iso = env->GetIsolate();
+  HandleScope scope(iso);
+
+  WeakCallCounter counter(1234);
+
+  WeakCallCounterAndPersistent<Value> weak_obj(&counter);
+
+  // Create a weak object that references a internalized string.
+  {
+    HandleScope scope(iso);
+    weak_obj.handle.Reset(iso, Object::New(iso));
+    weak_obj.handle.SetWeak(&weak_obj, &WeakPointerCallback);
+    CHECK(weak_obj.handle.IsWeak());
+    Local<Object>::New(iso, weak_obj.handle.As<Object>())->Set(
+        v8_str("x"),
+        String::NewFromUtf8(iso, "magic cookie", String::kInternalizedString));
+  }
+  // Do a single full GC
+  i::Isolate* i_iso = reinterpret_cast<v8::internal::Isolate*>(iso);
+  i::Heap* heap = i_iso->heap();
+  heap->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
+
+  // We should have received the weak callback.
+  CHECK_EQ(1, counter.NumberOfWeakCalls());
+
+  // Check that the string is still alive.
+  {
+    HandleScope scope(iso);
+    i::MaybeHandle<i::String> magic_string =
+        i::StringTable::LookupStringIfExists(
+            i_iso,
+            v8::Utils::OpenHandle(*String::NewFromUtf8(iso, "magic cookie")));
+    magic_string.Check();
+  }
+}
+
+
 // TODO(mstarzinger): This should be a THREADED_TEST but causes failures
 // on the buildbots, so was made non-threaded for the time being.
 TEST(ApiObjectGroupsCycleForScavenger) {
@@ -4382,13 +4588,14 @@ THREADED_TEST(ScriptException) {
 
 TEST(TryCatchCustomException) {
   LocalContext env;
-  v8::HandleScope scope(env->GetIsolate());
+  v8::Isolate* isolate = env->GetIsolate();
+  v8::HandleScope scope(isolate);
   v8::TryCatch try_catch;
   CompileRun("function CustomError() { this.a = 'b'; }"
              "(function f() { throw new CustomError(); })();");
   CHECK(try_catch.HasCaught());
-  CHECK(try_catch.Exception()->ToObject()->
-            Get(v8_str("a"))->Equals(v8_str("b")));
+  CHECK(try_catch.Exception()->ToObject(isolate)->Get(v8_str("a"))->Equals(
+      v8_str("b")));
 }
 
 
@@ -4882,49 +5089,51 @@ static void CheckUncle(v8::TryCatch* try_catch) {
 
 THREADED_TEST(ConversionNumber) {
   LocalContext env;
-  v8::HandleScope scope(env->GetIsolate());
+  v8::Isolate* isolate = env->GetIsolate();
+  v8::HandleScope scope(isolate);
   // Very large number.
   CompileRun("var obj = Math.pow(2,32) * 1237;");
   Local<Value> obj = env->Global()->Get(v8_str("obj"));
-  CHECK_EQ(5312874545152.0, obj->ToNumber()->Value());
-  CHECK_EQ(0, obj->ToInt32()->Value());
-  CHECK(0u == obj->ToUint32()->Value());  // NOLINT - no CHECK_EQ for unsigned.
+  CHECK_EQ(5312874545152.0, obj->ToNumber(isolate)->Value());
+  CHECK_EQ(0, obj->ToInt32(isolate)->Value());
+  CHECK(0u ==
+        obj->ToUint32(isolate)->Value());  // NOLINT - no CHECK_EQ for unsigned.
   // Large number.
   CompileRun("var obj = -1234567890123;");
   obj = env->Global()->Get(v8_str("obj"));
-  CHECK_EQ(-1234567890123.0, obj->ToNumber()->Value());
-  CHECK_EQ(-1912276171, obj->ToInt32()->Value());
-  CHECK(2382691125u == obj->ToUint32()->Value());  // NOLINT
+  CHECK_EQ(-1234567890123.0, obj->ToNumber(isolate)->Value());
+  CHECK_EQ(-1912276171, obj->ToInt32(isolate)->Value());
+  CHECK(2382691125u == obj->ToUint32(isolate)->Value());  // NOLINT
   // Small positive integer.
   CompileRun("var obj = 42;");
   obj = env->Global()->Get(v8_str("obj"));
-  CHECK_EQ(42.0, obj->ToNumber()->Value());
-  CHECK_EQ(42, obj->ToInt32()->Value());
-  CHECK(42u == obj->ToUint32()->Value());  // NOLINT
+  CHECK_EQ(42.0, obj->ToNumber(isolate)->Value());
+  CHECK_EQ(42, obj->ToInt32(isolate)->Value());
+  CHECK(42u == obj->ToUint32(isolate)->Value());  // NOLINT
   // Negative integer.
   CompileRun("var obj = -37;");
   obj = env->Global()->Get(v8_str("obj"));
-  CHECK_EQ(-37.0, obj->ToNumber()->Value());
-  CHECK_EQ(-37, obj->ToInt32()->Value());
-  CHECK(4294967259u == obj->ToUint32()->Value());  // NOLINT
+  CHECK_EQ(-37.0, obj->ToNumber(isolate)->Value());
+  CHECK_EQ(-37, obj->ToInt32(isolate)->Value());
+  CHECK(4294967259u == obj->ToUint32(isolate)->Value());  // NOLINT
   // Positive non-int32 integer.
   CompileRun("var obj = 0x81234567;");
   obj = env->Global()->Get(v8_str("obj"));
-  CHECK_EQ(2166572391.0, obj->ToNumber()->Value());
-  CHECK_EQ(-2128394905, obj->ToInt32()->Value());
-  CHECK(2166572391u == obj->ToUint32()->Value());  // NOLINT
+  CHECK_EQ(2166572391.0, obj->ToNumber(isolate)->Value());
+  CHECK_EQ(-2128394905, obj->ToInt32(isolate)->Value());
+  CHECK(2166572391u == obj->ToUint32(isolate)->Value());  // NOLINT
   // Fraction.
   CompileRun("var obj = 42.3;");
   obj = env->Global()->Get(v8_str("obj"));
-  CHECK_EQ(42.3, obj->ToNumber()->Value());
-  CHECK_EQ(42, obj->ToInt32()->Value());
-  CHECK(42u == obj->ToUint32()->Value());  // NOLINT
+  CHECK_EQ(42.3, obj->ToNumber(isolate)->Value());
+  CHECK_EQ(42, obj->ToInt32(isolate)->Value());
+  CHECK(42u == obj->ToUint32(isolate)->Value());  // NOLINT
   // Large negative fraction.
   CompileRun("var obj = -5726623061.75;");
   obj = env->Global()->Get(v8_str("obj"));
-  CHECK_EQ(-5726623061.75, obj->ToNumber()->Value());
-  CHECK_EQ(-1431655765, obj->ToInt32()->Value());
-  CHECK(2863311531u == obj->ToUint32()->Value());  // NOLINT
+  CHECK_EQ(-5726623061.75, obj->ToNumber(isolate)->Value());
+  CHECK_EQ(-1431655765, obj->ToInt32(isolate)->Value());
+  CHECK(2863311531u == obj->ToUint32(isolate)->Value());  // NOLINT
 }
 
 
@@ -4989,29 +5198,29 @@ THREADED_TEST(ConversionException) {
     "var obj = new TestClass();");
   Local<Value> obj = env->Global()->Get(v8_str("obj"));
 
-  v8::TryCatch try_catch;
+  v8::TryCatch try_catch(isolate);
 
-  Local<Value> to_string_result = obj->ToString();
+  Local<Value> to_string_result = obj->ToString(isolate);
   CHECK(to_string_result.IsEmpty());
   CheckUncle(&try_catch);
 
-  Local<Value> to_number_result = obj->ToNumber();
+  Local<Value> to_number_result = obj->ToNumber(isolate);
   CHECK(to_number_result.IsEmpty());
   CheckUncle(&try_catch);
 
-  Local<Value> to_integer_result = obj->ToInteger();
+  Local<Value> to_integer_result = obj->ToInteger(isolate);
   CHECK(to_integer_result.IsEmpty());
   CheckUncle(&try_catch);
 
-  Local<Value> to_uint32_result = obj->ToUint32();
+  Local<Value> to_uint32_result = obj->ToUint32(isolate);
   CHECK(to_uint32_result.IsEmpty());
   CheckUncle(&try_catch);
 
-  Local<Value> to_int32_result = obj->ToInt32();
+  Local<Value> to_int32_result = obj->ToInt32(isolate);
   CHECK(to_int32_result.IsEmpty());
   CheckUncle(&try_catch);
 
-  Local<Value> to_object_result = v8::Undefined(isolate)->ToObject();
+  Local<Value> to_object_result = v8::Undefined(isolate)->ToObject(isolate);
   CHECK(to_object_result.IsEmpty());
   CHECK(try_catch.HasCaught());
   try_catch.Reset();
@@ -5047,7 +5256,7 @@ void CCatcher(const v8::FunctionCallbackInfo<v8::Value>& args) {
   }
   v8::HandleScope scope(args.GetIsolate());
   v8::TryCatch try_catch;
-  Local<Value> result = CompileRun(args[0]->ToString());
+  Local<Value> result = CompileRun(args[0]->ToString(args.GetIsolate()));
   CHECK(!try_catch.HasCaught() || result.IsEmpty());
   args.GetReturnValue().Set(try_catch.HasCaught());
 }
@@ -6096,7 +6305,7 @@ THREADED_TEST(NoAccessors) {
 }
 
 
-static void XPropertyGetter(Local<String> property,
+static void XPropertyGetter(Local<Name> property,
                             const v8::PropertyCallbackInfo<v8::Value>& info) {
   ApiTestFuzzer::Fuzz();
   CHECK(info.Data()->IsUndefined());
@@ -6108,7 +6317,7 @@ THREADED_TEST(NamedInterceptorPropertyRead) {
   v8::Isolate* isolate = CcTest::isolate();
   v8::HandleScope scope(isolate);
   Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
-  templ->SetNamedPropertyHandler(XPropertyGetter);
+  templ->SetHandler(v8::NamedPropertyHandlerConfiguration(XPropertyGetter));
   LocalContext context;
   context->Global()->Set(v8_str("obj"), templ->NewInstance());
   Local<Script> script = v8_compile("obj.x");
@@ -6123,7 +6332,7 @@ THREADED_TEST(NamedInterceptorDictionaryIC) {
   v8::Isolate* isolate = CcTest::isolate();
   v8::HandleScope scope(isolate);
   Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
-  templ->SetNamedPropertyHandler(XPropertyGetter);
+  templ->SetHandler(v8::NamedPropertyHandlerConfiguration(XPropertyGetter));
   LocalContext context;
   // Create an object with a named interceptor.
   context->Global()->Set(v8_str("interceptor_obj"), templ->NewInstance());
@@ -6157,7 +6366,7 @@ THREADED_TEST(NamedInterceptorDictionaryICMultipleContext) {
 
   context1->Enter();
   Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
-  templ->SetNamedPropertyHandler(XPropertyGetter);
+  templ->SetHandler(v8::NamedPropertyHandlerConfiguration(XPropertyGetter));
   // Create an object with a named interceptor.
   v8::Local<v8::Object> object = templ->NewInstance();
   context1->Global()->Set(v8_str("interceptor_obj"), object);
@@ -6192,8 +6401,7 @@ THREADED_TEST(NamedInterceptorDictionaryICMultipleContext) {
 
 
 static void SetXOnPrototypeGetter(
-    Local<String> property,
-    const v8::PropertyCallbackInfo<v8::Value>& info) {
+    Local<Name> property, const v8::PropertyCallbackInfo<v8::Value>& info) {
   // Set x on the prototype object and do not handle the get request.
   v8::Handle<v8::Value> proto = info.Holder()->GetPrototype();
   proto.As<v8::Object>()->Set(v8_str("x"),
@@ -6210,7 +6418,8 @@ THREADED_TEST(NamedInterceptorMapTransitionRead) {
       v8::FunctionTemplate::New(isolate);
   Local<v8::ObjectTemplate> instance_template
       = function_template->InstanceTemplate();
-  instance_template->SetNamedPropertyHandler(SetXOnPrototypeGetter);
+  instance_template->SetHandler(
+      v8::NamedPropertyHandlerConfiguration(SetXOnPrototypeGetter));
   LocalContext context;
   context->Global()->Set(v8_str("F"), function_template->GetFunction());
   // Create an instance of F and introduce a map transition for x.
@@ -6246,8 +6455,8 @@ THREADED_TEST(IndexedInterceptorWithIndexedAccessor) {
   v8::Isolate* isolate = CcTest::isolate();
   v8::HandleScope scope(isolate);
   Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
-  templ->SetIndexedPropertyHandler(IndexedPropertyGetter,
-                                   IndexedPropertySetter);
+  templ->SetHandler(v8::IndexedPropertyHandlerConfiguration(
+      IndexedPropertyGetter, IndexedPropertySetter));
   LocalContext context;
   context->Global()->Set(v8_str("obj"), templ->NewInstance());
   Local<Script> getter_script = v8_compile(
@@ -6312,11 +6521,9 @@ THREADED_TEST(IndexedInterceptorUnboxedDoubleWithIndexedAccessor) {
   v8::Isolate* isolate = CcTest::isolate();
   v8::HandleScope scope(isolate);
   Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
-  templ->SetIndexedPropertyHandler(UnboxedDoubleIndexedPropertyGetter,
-                                   UnboxedDoubleIndexedPropertySetter,
-                                   0,
-                                   0,
-                                   UnboxedDoubleIndexedPropertyEnumerator);
+  templ->SetHandler(v8::IndexedPropertyHandlerConfiguration(
+      UnboxedDoubleIndexedPropertyGetter, UnboxedDoubleIndexedPropertySetter, 0,
+      0, UnboxedDoubleIndexedPropertyEnumerator));
   LocalContext context;
   context->Global()->Set(v8_str("obj"), templ->NewInstance());
   // When obj is created, force it to be Stored in a FastDoubleArray.
@@ -6326,7 +6533,7 @@ THREADED_TEST(IndexedInterceptorUnboxedDoubleWithIndexedAccessor) {
       "for (x in obj) {key_count++;};"
       "obj;");
   Local<Value> result = create_unboxed_double_script->Run();
-  CHECK(result->ToObject()->HasRealIndexedProperty(2000));
+  CHECK(result->ToObject(isolate)->HasRealIndexedProperty(2000));
   Local<Script> key_count_check = v8_compile("key_count;");
   result = key_count_check->Run();
   CHECK_EQ(v8_num(40013), result);
@@ -6368,11 +6575,9 @@ THREADED_TEST(IndexedInterceptorSloppyArgsWithIndexedAccessor) {
   v8::Isolate* isolate = CcTest::isolate();
   v8::HandleScope scope(isolate);
   Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
-  templ->SetIndexedPropertyHandler(SloppyIndexedPropertyGetter,
-                                   0,
-                                   0,
-                                   0,
-                                   SloppyArgsIndexedPropertyEnumerator);
+  templ->SetHandler(v8::IndexedPropertyHandlerConfiguration(
+      SloppyIndexedPropertyGetter, 0, 0, 0,
+      SloppyArgsIndexedPropertyEnumerator));
   LocalContext context;
   context->Global()->Set(v8_str("obj"), templ->NewInstance());
   Local<Script> create_args_script = v8_compile(
@@ -6394,7 +6599,8 @@ THREADED_TEST(IndexedInterceptorWithGetOwnPropertyDescriptor) {
   v8::Isolate* isolate = CcTest::isolate();
   v8::HandleScope scope(isolate);
   Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
-  templ->SetIndexedPropertyHandler(IdentityIndexedPropertyGetter);
+  templ->SetHandler(
+      v8::IndexedPropertyHandlerConfiguration(IdentityIndexedPropertyGetter));
 
   LocalContext context;
   context->Global()->Set(v8_str("obj"), templ->NewInstance());
@@ -6416,7 +6622,8 @@ THREADED_TEST(IndexedInterceptorWithNoSetter) {
   v8::Isolate* isolate = CcTest::isolate();
   v8::HandleScope scope(isolate);
   Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
-  templ->SetIndexedPropertyHandler(IdentityIndexedPropertyGetter);
+  templ->SetHandler(
+      v8::IndexedPropertyHandlerConfiguration(IdentityIndexedPropertyGetter));
 
   LocalContext context;
   context->Global()->Set(v8_str("obj"), templ->NewInstance());
@@ -6440,7 +6647,8 @@ THREADED_TEST(IndexedInterceptorWithAccessorCheck) {
   v8::Isolate* isolate = CcTest::isolate();
   v8::HandleScope scope(isolate);
   Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
-  templ->SetIndexedPropertyHandler(IdentityIndexedPropertyGetter);
+  templ->SetHandler(
+      v8::IndexedPropertyHandlerConfiguration(IdentityIndexedPropertyGetter));
 
   LocalContext context;
   Local<v8::Object> obj = templ->NewInstance();
@@ -6468,7 +6676,8 @@ THREADED_TEST(IndexedInterceptorWithAccessorCheckSwitchedOn) {
   v8::Isolate* isolate = CcTest::isolate();
   v8::HandleScope scope(isolate);
   Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
-  templ->SetIndexedPropertyHandler(IdentityIndexedPropertyGetter);
+  templ->SetHandler(
+      v8::IndexedPropertyHandlerConfiguration(IdentityIndexedPropertyGetter));
 
   LocalContext context;
   Local<v8::Object> obj = templ->NewInstance();
@@ -6506,7 +6715,8 @@ THREADED_TEST(IndexedInterceptorWithDifferentIndices) {
   v8::Isolate* isolate = CcTest::isolate();
   v8::HandleScope scope(isolate);
   Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
-  templ->SetIndexedPropertyHandler(IdentityIndexedPropertyGetter);
+  templ->SetHandler(
+      v8::IndexedPropertyHandlerConfiguration(IdentityIndexedPropertyGetter));
 
   LocalContext context;
   Local<v8::Object> obj = templ->NewInstance();
@@ -6530,7 +6740,8 @@ THREADED_TEST(IndexedInterceptorWithNegativeIndices) {
   v8::Isolate* isolate = CcTest::isolate();
   v8::HandleScope scope(isolate);
   Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
-  templ->SetIndexedPropertyHandler(IdentityIndexedPropertyGetter);
+  templ->SetHandler(
+      v8::IndexedPropertyHandlerConfiguration(IdentityIndexedPropertyGetter));
 
   LocalContext context;
   Local<v8::Object> obj = templ->NewInstance();
@@ -6570,7 +6781,8 @@ THREADED_TEST(IndexedInterceptorWithNotSmiLookup) {
   v8::Isolate* isolate = CcTest::isolate();
   v8::HandleScope scope(isolate);
   Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
-  templ->SetIndexedPropertyHandler(IdentityIndexedPropertyGetter);
+  templ->SetHandler(
+      v8::IndexedPropertyHandlerConfiguration(IdentityIndexedPropertyGetter));
 
   LocalContext context;
   Local<v8::Object> obj = templ->NewInstance();
@@ -6600,7 +6812,8 @@ THREADED_TEST(IndexedInterceptorGoingMegamorphic) {
   v8::Isolate* isolate = CcTest::isolate();
   v8::HandleScope scope(isolate);
   Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
-  templ->SetIndexedPropertyHandler(IdentityIndexedPropertyGetter);
+  templ->SetHandler(
+      v8::IndexedPropertyHandlerConfiguration(IdentityIndexedPropertyGetter));
 
   LocalContext context;
   Local<v8::Object> obj = templ->NewInstance();
@@ -6631,7 +6844,8 @@ THREADED_TEST(IndexedInterceptorReceiverTurningSmi) {
   v8::Isolate* isolate = CcTest::isolate();
   v8::HandleScope scope(isolate);
   Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
-  templ->SetIndexedPropertyHandler(IdentityIndexedPropertyGetter);
+  templ->SetHandler(
+      v8::IndexedPropertyHandlerConfiguration(IdentityIndexedPropertyGetter));
 
   LocalContext context;
   Local<v8::Object> obj = templ->NewInstance();
@@ -6662,7 +6876,8 @@ THREADED_TEST(IndexedInterceptorOnProto) {
   v8::Isolate* isolate = CcTest::isolate();
   v8::HandleScope scope(isolate);
   Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
-  templ->SetIndexedPropertyHandler(IdentityIndexedPropertyGetter);
+  templ->SetHandler(
+      v8::IndexedPropertyHandlerConfiguration(IdentityIndexedPropertyGetter));
 
   LocalContext context;
   Local<v8::Object> obj = templ->NewInstance();
@@ -7487,8 +7702,7 @@ struct FlagAndPersistent {
 };
 
 
-static void SetFlag(
-    const v8::WeakCallbackData<v8::Object, FlagAndPersistent>& data) {
+static void SetFlag(const v8::PhantomCallbackData<FlagAndPersistent>& data) {
   data.GetParameter()->flag = true;
 }
 
@@ -7554,6 +7768,98 @@ THREADED_TEST(IndependentWeakHandle) {
 }
 
 
+class Trivial {
+ public:
+  explicit Trivial(int x) : x_(x) {}
+
+  int x() { return x_; }
+  void set_x(int x) { x_ = x; }
+
+ private:
+  int x_;
+};
+
+
+class Trivial2 {
+ public:
+  Trivial2(int x, int y) : y_(y), x_(x) {}
+
+  int x() { return x_; }
+  void set_x(int x) { x_ = x; }
+
+  int y() { return y_; }
+  void set_y(int y) { y_ = y; }
+
+ private:
+  int y_;
+  int x_;
+};
+
+
+void CheckInternalFields(
+    const v8::InternalFieldsCallbackData<Trivial, Trivial2>& data) {
+  Trivial* t1 = data.GetInternalField1();
+  Trivial2* t2 = data.GetInternalField2();
+  CHECK_EQ(42, t1->x());
+  CHECK_EQ(103, t2->x());
+  t1->set_x(1729);
+  t2->set_x(33550336);
+}
+
+
+void InternalFieldCallback(bool global_gc) {
+  LocalContext env;
+  v8::Isolate* isolate = env->GetIsolate();
+  v8::HandleScope scope(isolate);
+
+  Local<v8::FunctionTemplate> templ = v8::FunctionTemplate::New(isolate);
+  Local<v8::ObjectTemplate> instance_templ = templ->InstanceTemplate();
+  Trivial* t1;
+  Trivial2* t2;
+  instance_templ->SetInternalFieldCount(2);
+  {
+    v8::HandleScope scope(isolate);
+    Local<v8::Object> obj = templ->GetFunction()->NewInstance();
+    v8::Persistent<v8::Object> handle(isolate, obj);
+    CHECK_EQ(2, obj->InternalFieldCount());
+    CHECK(obj->GetInternalField(0)->IsUndefined());
+    t1 = new Trivial(42);
+    t2 = new Trivial2(103, 9);
+
+    obj->SetAlignedPointerInInternalField(0, t1);
+    t1 = reinterpret_cast<Trivial*>(obj->GetAlignedPointerFromInternalField(0));
+    CHECK_EQ(42, t1->x());
+
+    obj->SetAlignedPointerInInternalField(1, t2);
+    t2 =
+        reinterpret_cast<Trivial2*>(obj->GetAlignedPointerFromInternalField(1));
+    CHECK_EQ(103, t2->x());
+
+    handle.SetPhantom(CheckInternalFields, 0, 1);
+    if (!global_gc) {
+      handle.MarkIndependent();
+    }
+  }
+  if (global_gc) {
+    CcTest::heap()->CollectAllGarbage(TestHeap::Heap::kNoGCFlags);
+  } else {
+    CcTest::heap()->CollectGarbage(i::NEW_SPACE);
+  }
+
+  CHECK_EQ(1729, t1->x());
+  CHECK_EQ(33550336, t2->x());
+
+  delete t1;
+  delete t2;
+}
+
+
+THREADED_TEST(InternalFieldCallback) {
+  InternalFieldCallback(false);
+  InternalFieldCallback(true);
+}
+
+
 static void ResetUseValueAndSetFlag(
     const v8::WeakCallbackData<v8::Object, FlagAndPersistent>& data) {
   // Blink will reset the handle, and then use the other handle, so they
@@ -7579,7 +7885,8 @@ static void ResetWeakHandle(bool global_gc) {
     object_a.handle.Reset(iso, a);
     object_b.handle.Reset(iso, b);
     if (global_gc) {
-      CcTest::heap()->CollectAllGarbage(TestHeap::Heap::kNoGCFlags);
+      CcTest::heap()->CollectAllGarbage(
+          TestHeap::Heap::kAbortIncrementalMarkingMask);
     } else {
       CcTest::heap()->CollectGarbage(i::NEW_SPACE);
     }
@@ -7595,7 +7902,8 @@ static void ResetWeakHandle(bool global_gc) {
     CHECK(object_b.handle.IsIndependent());
   }
   if (global_gc) {
-    CcTest::heap()->CollectAllGarbage(TestHeap::Heap::kNoGCFlags);
+    CcTest::heap()->CollectAllGarbage(
+        TestHeap::Heap::kAbortIncrementalMarkingMask);
   } else {
     CcTest::heap()->CollectGarbage(i::NEW_SPACE);
   }
@@ -7737,9 +8045,8 @@ THREADED_TEST(Arguments) {
 }
 
 
-static void NoBlockGetterX(Local<String> name,
-                           const v8::PropertyCallbackInfo<v8::Value>&) {
-}
+static void NoBlockGetterX(Local<Name> name,
+                           const v8::PropertyCallbackInfo<v8::Value>&) {}
 
 
 static void NoBlockGetterI(uint32_t index,
@@ -7747,7 +8054,7 @@ static void NoBlockGetterI(uint32_t index,
 }
 
 
-static void PDeleter(Local<String> name,
+static void PDeleter(Local<Name> name,
                      const v8::PropertyCallbackInfo<v8::Boolean>& info) {
   if (!name->Equals(v8_str("foo"))) {
     return;  // not intercepted
@@ -7771,8 +8078,10 @@ THREADED_TEST(Deleter) {
   v8::Isolate* isolate = CcTest::isolate();
   v8::HandleScope scope(isolate);
   v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New(isolate);
-  obj->SetNamedPropertyHandler(NoBlockGetterX, NULL, NULL, PDeleter, NULL);
-  obj->SetIndexedPropertyHandler(NoBlockGetterI, NULL, NULL, IDeleter, NULL);
+  obj->SetHandler(v8::NamedPropertyHandlerConfiguration(NoBlockGetterX, NULL,
+                                                        NULL, PDeleter, NULL));
+  obj->SetHandler(v8::IndexedPropertyHandlerConfiguration(
+      NoBlockGetterI, NULL, NULL, IDeleter, NULL));
   LocalContext context;
   context->Global()->Set(v8_str("k"), obj->NewInstance());
   CompileRun(
@@ -7794,7 +8103,7 @@ THREADED_TEST(Deleter) {
 }
 
 
-static void GetK(Local<String> name,
+static void GetK(Local<Name> name,
                  const v8::PropertyCallbackInfo<v8::Value>& info) {
   ApiTestFuzzer::Fuzz();
   if (name->Equals(v8_str("foo")) ||
@@ -7835,8 +8144,10 @@ THREADED_TEST(Enumerators) {
   v8::Isolate* isolate = CcTest::isolate();
   v8::HandleScope scope(isolate);
   v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New(isolate);
-  obj->SetNamedPropertyHandler(GetK, NULL, NULL, NULL, NamedEnum);
-  obj->SetIndexedPropertyHandler(IndexedGetK, NULL, NULL, NULL, IndexedEnum);
+  obj->SetHandler(
+      v8::NamedPropertyHandlerConfiguration(GetK, NULL, NULL, NULL, NamedEnum));
+  obj->SetHandler(v8::IndexedPropertyHandlerConfiguration(
+      IndexedGetK, NULL, NULL, NULL, IndexedEnum));
   LocalContext context;
   context->Global()->Set(v8_str("k"), obj->NewInstance());
   v8::Handle<v8::Array> result = v8::Handle<v8::Array>::Cast(CompileRun(
@@ -7928,7 +8239,7 @@ static void RunHolderTest(v8::Handle<v8::ObjectTemplate> obj) {
 }
 
 
-static void PGetter2(Local<String> name,
+static void PGetter2(Local<Name> name,
                      const v8::PropertyCallbackInfo<v8::Value>& info) {
   ApiTestFuzzer::Fuzz();
   p_getter_count2++;
@@ -7965,7 +8276,7 @@ THREADED_TEST(PreInterceptorHolders) {
   v8::Isolate* isolate = CcTest::isolate();
   v8::HandleScope scope(isolate);
   v8::Handle<v8::ObjectTemplate> obj = ObjectTemplate::New(isolate);
-  obj->SetNamedPropertyHandler(PGetter2);
+  obj->SetHandler(v8::NamedPropertyHandlerConfiguration(PGetter2));
   p_getter_count2 = 0;
   RunHolderTest(obj);
   CHECK_EQ(40, p_getter_count2);
@@ -8635,7 +8946,7 @@ static void ThrowV8Exception(const v8::FunctionCallbackInfo<v8::Value>& info) {
 }
 
 
-THREADED_TEST(ExceptionGetMessage) {
+THREADED_TEST(ExceptionCreateMessage) {
   LocalContext context;
   v8::HandleScope scope(context->GetIsolate());
   v8::Handle<String> foo_str = v8_str("foo");
@@ -8660,7 +8971,7 @@ THREADED_TEST(ExceptionGetMessage) {
   CHECK(error->IsObject());
   CHECK(error.As<v8::Object>()->Get(message_str)->Equals(foo_str));
 
-  v8::Handle<v8::Message> message = v8::Exception::GetMessage(error);
+  v8::Handle<v8::Message> message = v8::Exception::CreateMessage(error);
   CHECK(!message.IsEmpty());
   CHECK_EQ(2, message->GetLineNumber());
   CHECK_EQ(2, message->GetStartColumn());
@@ -8669,6 +8980,10 @@ THREADED_TEST(ExceptionGetMessage) {
   CHECK(!stackTrace.IsEmpty());
   CHECK_EQ(2, stackTrace->GetFrameCount());
 
+  stackTrace = v8::Exception::GetStackTrace(error);
+  CHECK(!stackTrace.IsEmpty());
+  CHECK_EQ(2, stackTrace->GetFrameCount());
+
   v8::V8::SetCaptureStackTraceForUncaughtExceptions(false);
 
   // Now check message location when SetCaptureStackTraceForUncaughtExceptions
@@ -8686,7 +9001,7 @@ THREADED_TEST(ExceptionGetMessage) {
   CHECK(error->IsObject());
   CHECK(error.As<v8::Object>()->Get(message_str)->Equals(foo_str));
 
-  message = v8::Exception::GetMessage(error);
+  message = v8::Exception::CreateMessage(error);
   CHECK(!message.IsEmpty());
   CHECK_EQ(2, message->GetLineNumber());
   CHECK_EQ(9, message->GetStartColumn());
@@ -8694,6 +9009,7 @@ THREADED_TEST(ExceptionGetMessage) {
   // Should be empty stack trace.
   stackTrace = message->GetStackTrace();
   CHECK(stackTrace.IsEmpty());
+  CHECK(v8::Exception::GetStackTrace(error).IsEmpty());
 }
 
 
@@ -8908,7 +9224,7 @@ TEST(TryCatchFinallyUsingTryCatchHandler) {
 
 void CEvaluate(const v8::FunctionCallbackInfo<v8::Value>& args) {
   v8::HandleScope scope(args.GetIsolate());
-  CompileRun(args[0]->ToString());
+  CompileRun(args[0]->ToString(args.GetIsolate()));
 }
 
 
@@ -9182,11 +9498,11 @@ TEST(SecurityTestGCAllowed) {
   CHECK(indexed_security_check_with_gc_called);
 
   named_security_check_with_gc_called = false;
-  CHECK(CompileRun("obj.foo")->ToString()->Equals(v8_str("1001")));
+  CHECK(CompileRun("obj.foo")->ToString(isolate)->Equals(v8_str("1001")));
   CHECK(named_security_check_with_gc_called);
 
   indexed_security_check_with_gc_called = false;
-  CHECK(CompileRun("obj[0]")->ToString()->Equals(v8_str("1002")));
+  CHECK(CompileRun("obj[0]")->ToString(isolate)->Equals(v8_str("1002")));
   CHECK(indexed_security_check_with_gc_called);
 }
 
@@ -9958,9 +10274,8 @@ TEST(SuperAccessControl) {
 
 static void IndexedPropertyEnumerator(
     const v8::PropertyCallbackInfo<v8::Array>& info) {
-  v8::Handle<v8::Array> result = v8::Array::New(info.GetIsolate(), 2);
+  v8::Handle<v8::Array> result = v8::Array::New(info.GetIsolate(), 1);
   result->Set(0, v8::Integer::New(info.GetIsolate(), 7));
-  result->Set(1, v8::Object::New(info.GetIsolate()));
   info.GetReturnValue().Set(result);
 }
 
@@ -9969,7 +10284,7 @@ static void NamedPropertyEnumerator(
     const v8::PropertyCallbackInfo<v8::Array>& info) {
   v8::Handle<v8::Array> result = v8::Array::New(info.GetIsolate(), 2);
   result->Set(0, v8_str("x"));
-  result->Set(1, v8::Object::New(info.GetIsolate()));
+  result->Set(1, v8::Symbol::GetIterator(info.GetIsolate()));
   info.GetReturnValue().Set(result);
 }
 
@@ -9982,10 +10297,10 @@ THREADED_TEST(GetOwnPropertyNamesWithInterceptor) {
 
   obj_template->Set(v8_str("7"), v8::Integer::New(CcTest::isolate(), 7));
   obj_template->Set(v8_str("x"), v8::Integer::New(CcTest::isolate(), 42));
-  obj_template->SetIndexedPropertyHandler(NULL, NULL, NULL, NULL,
-                                          IndexedPropertyEnumerator);
-  obj_template->SetNamedPropertyHandler(NULL, NULL, NULL, NULL,
-                                        NamedPropertyEnumerator);
+  obj_template->SetHandler(v8::IndexedPropertyHandlerConfiguration(
+      NULL, NULL, NULL, NULL, IndexedPropertyEnumerator));
+  obj_template->SetHandler(v8::NamedPropertyHandlerConfiguration(
+      NULL, NULL, NULL, NULL, NamedPropertyEnumerator));
 
   LocalContext context;
   v8::Handle<v8::Object> global = context->Global();
@@ -9995,13 +10310,26 @@ THREADED_TEST(GetOwnPropertyNamesWithInterceptor) {
       CompileRun("Object.getOwnPropertyNames(object)");
   CHECK(result->IsArray());
   v8::Handle<v8::Array> result_array = v8::Handle<v8::Array>::Cast(result);
-  CHECK_EQ(3, result_array->Length());
+  CHECK_EQ(2, result_array->Length());
   CHECK(result_array->Get(0)->IsString());
   CHECK(result_array->Get(1)->IsString());
-  CHECK(result_array->Get(2)->IsString());
   CHECK_EQ(v8_str("7"), result_array->Get(0));
-  CHECK_EQ(v8_str("[object Object]"), result_array->Get(1));
-  CHECK_EQ(v8_str("x"), result_array->Get(2));
+  CHECK_EQ(v8_str("x"), result_array->Get(1));
+
+  result = CompileRun("var ret = []; for (var k in object) ret.push(k); ret");
+  CHECK(result->IsArray());
+  result_array = v8::Handle<v8::Array>::Cast(result);
+  CHECK_EQ(2, result_array->Length());
+  CHECK(result_array->Get(0)->IsString());
+  CHECK(result_array->Get(1)->IsString());
+  CHECK_EQ(v8_str("7"), result_array->Get(0));
+  CHECK_EQ(v8_str("x"), result_array->Get(1));
+
+  result = CompileRun("Object.getOwnPropertySymbols(object)");
+  CHECK(result->IsArray());
+  result_array = v8::Handle<v8::Array>::Cast(result);
+  CHECK_EQ(1, result_array->Length());
+  CHECK_EQ(result_array->Get(0), v8::Symbol::GetIterator(isolate));
 }
 
 
@@ -10275,15 +10603,13 @@ THREADED_TEST(AccessControlFlatten) {
 
 
 static void AccessControlNamedGetter(
-    Local<String>,
-    const v8::PropertyCallbackInfo<v8::Value>& info) {
+    Local<Name>, const v8::PropertyCallbackInfo<v8::Value>& info) {
   info.GetReturnValue().Set(42);
 }
 
 
 static void AccessControlNamedSetter(
-    Local<String>,
-    Local<Value> value,
+    Local<Name>, Local<Value> value,
     const v8::PropertyCallbackInfo<v8::Value>& info) {
   info.GetReturnValue().Set(value);
 }
@@ -10322,10 +10648,10 @@ THREADED_TEST(AccessControlInterceptorIC) {
       v8::ObjectTemplate::New(isolate);
   object_template->SetAccessCheckCallbacks(NamedAccessCounter,
                                            IndexedAccessCounter);
-  object_template->SetNamedPropertyHandler(AccessControlNamedGetter,
-                                           AccessControlNamedSetter);
-  object_template->SetIndexedPropertyHandler(AccessControlIndexedGetter,
-                                             AccessControlIndexedSetter);
+  object_template->SetHandler(v8::NamedPropertyHandlerConfiguration(
+      AccessControlNamedGetter, AccessControlNamedSetter));
+  object_template->SetHandler(v8::IndexedPropertyHandlerConfiguration(
+      AccessControlIndexedGetter, AccessControlIndexedSetter));
   Local<v8::Object> object = object_template->NewInstance();
 
   v8::HandleScope scope1(isolate);
@@ -10407,8 +10733,7 @@ THREADED_TEST(InstanceProperties) {
 
 
 static void GlobalObjectInstancePropertiesGet(
-    Local<String> key,
-    const v8::PropertyCallbackInfo<v8::Value>&) {
+    Local<Name> key, const v8::PropertyCallbackInfo<v8::Value>&) {
   ApiTestFuzzer::Fuzz();
 }
 
@@ -10420,8 +10745,8 @@ THREADED_TEST(GlobalObjectInstanceProperties) {
   Local<Value> global_object;
 
   Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New(isolate);
-  t->InstanceTemplate()->SetNamedPropertyHandler(
-      GlobalObjectInstancePropertiesGet);
+  t->InstanceTemplate()->SetHandler(
+      v8::NamedPropertyHandlerConfiguration(GlobalObjectInstancePropertiesGet));
   Local<ObjectTemplate> instance_template = t->InstanceTemplate();
   instance_template->Set(v8_str("x"), v8_num(42));
   instance_template->Set(v8_str("f"),
@@ -10545,9 +10870,8 @@ static void ShadowIndexedGet(uint32_t index,
 }
 
 
-static void ShadowNamedGet(Local<String> key,
-                           const v8::PropertyCallbackInfo<v8::Value>&) {
-}
+static void ShadowNamedGet(Local<Name> key,
+                           const v8::PropertyCallbackInfo<v8::Value>&) {}
 
 
 THREADED_TEST(ShadowObject) {
@@ -10559,8 +10883,10 @@ THREADED_TEST(ShadowObject) {
   LocalContext context(NULL, global_template);
 
   Local<v8::FunctionTemplate> t = v8::FunctionTemplate::New(isolate);
-  t->InstanceTemplate()->SetNamedPropertyHandler(ShadowNamedGet);
-  t->InstanceTemplate()->SetIndexedPropertyHandler(ShadowIndexedGet);
+  t->InstanceTemplate()->SetHandler(
+      v8::NamedPropertyHandlerConfiguration(ShadowNamedGet));
+  t->InstanceTemplate()->SetHandler(
+      v8::IndexedPropertyHandlerConfiguration(ShadowIndexedGet));
   Local<ObjectTemplate> proto = t->PrototypeTemplate();
   Local<ObjectTemplate> instance = t->InstanceTemplate();
 
@@ -11047,7 +11373,7 @@ THREADED_TEST(ConstructorForObject) {
         "(function() { var o = new obj('tipli'); return o.a; })()");
     CHECK(!try_catch.HasCaught());
     CHECK(value->IsString());
-    String::Utf8Value string_value1(value->ToString());
+    String::Utf8Value string_value1(value->ToString(isolate));
     CHECK_EQ("tipli", *string_value1);
 
     Local<Value> args2[] = { v8_str("tipli") };
@@ -11057,7 +11383,7 @@ THREADED_TEST(ConstructorForObject) {
     value = object2->Get(v8_str("a"));
     CHECK(!try_catch.HasCaught());
     CHECK(value->IsString());
-    String::Utf8Value string_value2(value->ToString());
+    String::Utf8Value string_value2(value->ToString(isolate));
     CHECK_EQ("tipli", *string_value2);
 
     // Call the Object's constructor with a Boolean.
@@ -11320,8 +11646,7 @@ THREADED_TEST(CrossEval) {
 
 
 // Test that calling eval in a context which has been detached from
-// its global throws an exception.  This behavior is consistent with
-// other JavaScript implementations.
+// its global proxy works.
 THREADED_TEST(EvalInDetachedGlobal) {
   v8::Isolate* isolate = CcTest::isolate();
   v8::HandleScope scope(isolate);
@@ -11349,8 +11674,7 @@ THREADED_TEST(EvalInDetachedGlobal) {
   context0->DetachGlobal();
   v8::TryCatch catcher;
   x_value = CompileRun("fun('x')");
-  CHECK(x_value.IsEmpty());
-  CHECK(catcher.HasCaught());
+  CHECK_EQ(42, x_value->Int32Value());
   context1->Exit();
 }
 
@@ -11668,8 +11992,7 @@ THREADED_TEST(HandleIteration) {
 
 
 static void InterceptorHasOwnPropertyGetter(
-    Local<String> name,
-    const v8::PropertyCallbackInfo<v8::Value>& info) {
+    Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
   ApiTestFuzzer::Fuzz();
 }
 
@@ -11680,7 +12003,8 @@ THREADED_TEST(InterceptorHasOwnProperty) {
   v8::HandleScope scope(isolate);
   Local<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New(isolate);
   Local<v8::ObjectTemplate> instance_templ = fun_templ->InstanceTemplate();
-  instance_templ->SetNamedPropertyHandler(InterceptorHasOwnPropertyGetter);
+  instance_templ->SetHandler(
+      v8::NamedPropertyHandlerConfiguration(InterceptorHasOwnPropertyGetter));
   Local<Function> function = fun_templ->GetFunction();
   context->Global()->Set(v8_str("constructor"), function);
   v8::Handle<Value> value = CompileRun(
@@ -11699,8 +12023,7 @@ THREADED_TEST(InterceptorHasOwnProperty) {
 
 
 static void InterceptorHasOwnPropertyGetterGC(
-    Local<String> name,
-    const v8::PropertyCallbackInfo<v8::Value>& info) {
+    Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
   ApiTestFuzzer::Fuzz();
   CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
 }
@@ -11712,7 +12035,8 @@ THREADED_TEST(InterceptorHasOwnPropertyCausingGC) {
   v8::HandleScope scope(isolate);
   Local<v8::FunctionTemplate> fun_templ = v8::FunctionTemplate::New(isolate);
   Local<v8::ObjectTemplate> instance_templ = fun_templ->InstanceTemplate();
-  instance_templ->SetNamedPropertyHandler(InterceptorHasOwnPropertyGetterGC);
+  instance_templ->SetHandler(
+      v8::NamedPropertyHandlerConfiguration(InterceptorHasOwnPropertyGetterGC));
   Local<Function> function = fun_templ->GetFunction();
   context->Global()->Set(v8_str("constructor"), function);
   // Let's first make some stuff so we can be sure to get a good GC.
@@ -11736,18 +12060,14 @@ THREADED_TEST(InterceptorHasOwnPropertyCausingGC) {
 }
 
 
-typedef void (*NamedPropertyGetter)(
-    Local<String> property,
-    const v8::PropertyCallbackInfo<v8::Value>& info);
-
-
-static void CheckInterceptorLoadIC(NamedPropertyGetter getter,
-                                   const char* source,
-                                   int expected) {
+static void CheckInterceptorLoadIC(
+    v8::GenericNamedPropertyGetterCallback getter, const char* source,
+    int expected) {
   v8::Isolate* isolate = CcTest::isolate();
   v8::HandleScope scope(isolate);
   v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
-  templ->SetNamedPropertyHandler(getter, 0, 0, 0, 0, v8_str("data"));
+  templ->SetHandler(v8::NamedPropertyHandlerConfiguration(getter, 0, 0, 0, 0,
+                                                          v8_str("data")));
   LocalContext context;
   context->Global()->Set(v8_str("o"), templ->NewInstance());
   v8::Handle<Value> value = CompileRun(source);
@@ -11756,8 +12076,7 @@ static void CheckInterceptorLoadIC(NamedPropertyGetter getter,
 
 
 static void InterceptorLoadICGetter(
-    Local<String> name,
-    const v8::PropertyCallbackInfo<v8::Value>& info) {
+    Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
   ApiTestFuzzer::Fuzz();
   v8::Isolate* isolate = CcTest::isolate();
   CHECK_EQ(isolate, info.GetIsolate());
@@ -11783,8 +12102,7 @@ THREADED_TEST(InterceptorLoadIC) {
 // (those cases are special cased to get better performance).
 
 static void InterceptorLoadXICGetter(
-    Local<String> name,
-    const v8::PropertyCallbackInfo<v8::Value>& info) {
+    Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
   ApiTestFuzzer::Fuzz();
   info.GetReturnValue().Set(
       v8_str("x")->Equals(name) ?
@@ -11899,8 +12217,7 @@ THREADED_TEST(InterceptorLoadICInvalidatedField) {
 
 static int interceptor_load_not_handled_calls = 0;
 static void InterceptorLoadNotHandled(
-    Local<String> name,
-    const v8::PropertyCallbackInfo<v8::Value>& info) {
+    Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
   ++interceptor_load_not_handled_calls;
 }
 
@@ -11961,7 +12278,8 @@ THREADED_TEST(InterceptorLoadICWithCallbackOnHolder) {
   v8::Isolate* isolate = CcTest::isolate();
   v8::HandleScope scope(isolate);
   v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
-  templ->SetNamedPropertyHandler(InterceptorLoadXICGetter);
+  templ->SetHandler(
+      v8::NamedPropertyHandlerConfiguration(InterceptorLoadXICGetter));
   templ->SetAccessor(v8_str("y"), Return239Callback);
   LocalContext context;
   context->Global()->Set(v8_str("o"), templ->NewInstance());
@@ -11991,7 +12309,8 @@ THREADED_TEST(InterceptorLoadICWithCallbackOnProto) {
   v8::Isolate* isolate = CcTest::isolate();
   v8::HandleScope scope(isolate);
   v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New(isolate);
-  templ_o->SetNamedPropertyHandler(InterceptorLoadXICGetter);
+  templ_o->SetHandler(
+      v8::NamedPropertyHandlerConfiguration(InterceptorLoadXICGetter));
   v8::Handle<v8::ObjectTemplate> templ_p = ObjectTemplate::New(isolate);
   templ_p->SetAccessor(v8_str("y"), Return239Callback);
 
@@ -12025,7 +12344,8 @@ THREADED_TEST(InterceptorLoadICForCallbackWithOverride) {
   v8::Isolate* isolate = CcTest::isolate();
   v8::HandleScope scope(isolate);
   v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
-  templ->SetNamedPropertyHandler(InterceptorLoadXICGetter);
+  templ->SetHandler(
+      v8::NamedPropertyHandlerConfiguration(InterceptorLoadXICGetter));
   templ->SetAccessor(v8_str("y"), Return239Callback);
 
   LocalContext context;
@@ -12054,7 +12374,8 @@ THREADED_TEST(InterceptorLoadICCallbackNotNeeded) {
   v8::Isolate* isolate = CcTest::isolate();
   v8::HandleScope scope(isolate);
   v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New(isolate);
-  templ_o->SetNamedPropertyHandler(InterceptorLoadXICGetter);
+  templ_o->SetHandler(
+      v8::NamedPropertyHandlerConfiguration(InterceptorLoadXICGetter));
   v8::Handle<v8::ObjectTemplate> templ_p = ObjectTemplate::New(isolate);
   templ_p->SetAccessor(v8_str("y"), Return239Callback);
 
@@ -12083,7 +12404,8 @@ THREADED_TEST(InterceptorLoadICInvalidatedCallback) {
   v8::Isolate* isolate = CcTest::isolate();
   v8::HandleScope scope(isolate);
   v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New(isolate);
-  templ_o->SetNamedPropertyHandler(InterceptorLoadXICGetter);
+  templ_o->SetHandler(
+      v8::NamedPropertyHandlerConfiguration(InterceptorLoadXICGetter));
   v8::Handle<v8::ObjectTemplate> templ_p = ObjectTemplate::New(isolate);
   templ_p->SetAccessor(v8_str("y"), Return239Callback, SetOnThis);
 
@@ -12116,7 +12438,8 @@ THREADED_TEST(InterceptorLoadICInvalidatedCallbackViaGlobal) {
   v8::Isolate* isolate = CcTest::isolate();
   v8::HandleScope scope(isolate);
   v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New(isolate);
-  templ_o->SetNamedPropertyHandler(InterceptorLoadXICGetter);
+  templ_o->SetHandler(
+      v8::NamedPropertyHandlerConfiguration(InterceptorLoadXICGetter));
   v8::Handle<v8::ObjectTemplate> templ_p = ObjectTemplate::New(isolate);
   templ_p->SetAccessor(v8_str("y"), Return239Callback, SetOnThis);
 
@@ -12142,8 +12465,7 @@ THREADED_TEST(InterceptorLoadICInvalidatedCallbackViaGlobal) {
 
 
 static void InterceptorLoadICGetter0(
-    Local<String> name,
-    const v8::PropertyCallbackInfo<v8::Value>& info) {
+    Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
   ApiTestFuzzer::Fuzz();
   CHECK(v8_str("x")->Equals(name));
   info.GetReturnValue().Set(v8::Integer::New(info.GetIsolate(), 0));
@@ -12158,8 +12480,7 @@ THREADED_TEST(InterceptorReturningZero) {
 
 
 static void InterceptorStoreICSetter(
-    Local<String> key,
-    Local<Value> value,
+    Local<Name> key, Local<Value> value,
     const v8::PropertyCallbackInfo<v8::Value>& info) {
   CHECK(v8_str("x")->Equals(key));
   CHECK_EQ(42, value->Int32Value());
@@ -12172,9 +12493,9 @@ THREADED_TEST(InterceptorStoreIC) {
   v8::Isolate* isolate = CcTest::isolate();
   v8::HandleScope scope(isolate);
   v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
-  templ->SetNamedPropertyHandler(InterceptorLoadICGetter,
-                                 InterceptorStoreICSetter,
-                                 0, 0, 0, v8_str("data"));
+  templ->SetHandler(v8::NamedPropertyHandlerConfiguration(
+      InterceptorLoadICGetter, InterceptorStoreICSetter, 0, 0, 0,
+      v8_str("data")));
   LocalContext context;
   context->Global()->Set(v8_str("o"), templ->NewInstance());
   CompileRun(
@@ -12188,7 +12509,8 @@ THREADED_TEST(InterceptorStoreICWithNoSetter) {
   v8::Isolate* isolate = CcTest::isolate();
   v8::HandleScope scope(isolate);
   v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
-  templ->SetNamedPropertyHandler(InterceptorLoadXICGetter);
+  templ->SetHandler(
+      v8::NamedPropertyHandlerConfiguration(InterceptorLoadXICGetter));
   LocalContext context;
   context->Global()->Set(v8_str("o"), templ->NewInstance());
   v8::Handle<Value> value = CompileRun(
@@ -12207,8 +12529,7 @@ v8::Handle<Value> call_ic_function2;
 v8::Handle<Value> call_ic_function3;
 
 static void InterceptorCallICGetter(
-    Local<String> name,
-    const v8::PropertyCallbackInfo<v8::Value>& info) {
+    Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
   ApiTestFuzzer::Fuzz();
   CHECK(v8_str("x")->Equals(name));
   info.GetReturnValue().Set(call_ic_function);
@@ -12220,7 +12541,8 @@ THREADED_TEST(InterceptorCallIC) {
   v8::Isolate* isolate = CcTest::isolate();
   v8::HandleScope scope(isolate);
   v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
-  templ->SetNamedPropertyHandler(InterceptorCallICGetter);
+  templ->SetHandler(
+      v8::NamedPropertyHandlerConfiguration(InterceptorCallICGetter));
   LocalContext context;
   context->Global()->Set(v8_str("o"), templ->NewInstance());
   call_ic_function =
@@ -12240,7 +12562,7 @@ THREADED_TEST(InterceptorCallICSeesOthers) {
   v8::Isolate* isolate = CcTest::isolate();
   v8::HandleScope scope(isolate);
   v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
-  templ->SetNamedPropertyHandler(NoBlockGetterX);
+  templ->SetHandler(v8::NamedPropertyHandlerConfiguration(NoBlockGetterX));
   LocalContext context;
   context->Global()->Set(v8_str("o"), templ->NewInstance());
   v8::Handle<Value> value = CompileRun(
@@ -12255,8 +12577,7 @@ THREADED_TEST(InterceptorCallICSeesOthers) {
 
 static v8::Handle<Value> call_ic_function4;
 static void InterceptorCallICGetter4(
-    Local<String> name,
-    const v8::PropertyCallbackInfo<v8::Value>& info) {
+    Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
   ApiTestFuzzer::Fuzz();
   CHECK(v8_str("x")->Equals(name));
   info.GetReturnValue().Set(call_ic_function4);
@@ -12270,7 +12591,8 @@ THREADED_TEST(InterceptorCallICCacheableNotNeeded) {
   v8::Isolate* isolate = CcTest::isolate();
   v8::HandleScope scope(isolate);
   v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
-  templ->SetNamedPropertyHandler(InterceptorCallICGetter4);
+  templ->SetHandler(
+      v8::NamedPropertyHandlerConfiguration(InterceptorCallICGetter4));
   LocalContext context;
   context->Global()->Set(v8_str("o"), templ->NewInstance());
   call_ic_function4 =
@@ -12291,7 +12613,7 @@ THREADED_TEST(InterceptorCallICInvalidatedCacheable) {
   v8::Isolate* isolate = CcTest::isolate();
   v8::HandleScope scope(isolate);
   v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
-  templ->SetNamedPropertyHandler(NoBlockGetterX);
+  templ->SetHandler(v8::NamedPropertyHandlerConfiguration(NoBlockGetterX));
   LocalContext context;
   context->Global()->Set(v8_str("o"), templ->NewInstance());
   v8::Handle<Value> value = CompileRun(
@@ -12319,7 +12641,7 @@ THREADED_TEST(InterceptorCallICConstantFunctionUsed) {
   v8::Isolate* isolate = CcTest::isolate();
   v8::HandleScope scope(isolate);
   v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
-  templ->SetNamedPropertyHandler(NoBlockGetterX);
+  templ->SetHandler(v8::NamedPropertyHandlerConfiguration(NoBlockGetterX));
   LocalContext context;
   context->Global()->Set(v8_str("o"), templ->NewInstance());
   v8::Handle<Value> value = CompileRun(
@@ -12336,8 +12658,7 @@ THREADED_TEST(InterceptorCallICConstantFunctionUsed) {
 
 static v8::Handle<Value> call_ic_function5;
 static void InterceptorCallICGetter5(
-    Local<String> name,
-    const v8::PropertyCallbackInfo<v8::Value>& info) {
+    Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
   ApiTestFuzzer::Fuzz();
   if (v8_str("x")->Equals(name))
     info.GetReturnValue().Set(call_ic_function5);
@@ -12351,7 +12672,8 @@ THREADED_TEST(InterceptorCallICConstantFunctionNotNeeded) {
   v8::Isolate* isolate = CcTest::isolate();
   v8::HandleScope scope(isolate);
   v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
-  templ->SetNamedPropertyHandler(InterceptorCallICGetter5);
+  templ->SetHandler(
+      v8::NamedPropertyHandlerConfiguration(InterceptorCallICGetter5));
   LocalContext context;
   context->Global()->Set(v8_str("o"), templ->NewInstance());
   call_ic_function5 =
@@ -12370,8 +12692,7 @@ THREADED_TEST(InterceptorCallICConstantFunctionNotNeeded) {
 
 static v8::Handle<Value> call_ic_function6;
 static void InterceptorCallICGetter6(
-    Local<String> name,
-    const v8::PropertyCallbackInfo<v8::Value>& info) {
+    Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
   ApiTestFuzzer::Fuzz();
   if (v8_str("x")->Equals(name))
     info.GetReturnValue().Set(call_ic_function6);
@@ -12385,7 +12706,8 @@ THREADED_TEST(InterceptorCallICConstantFunctionNotNeededWrapped) {
   v8::Isolate* isolate = CcTest::isolate();
   v8::HandleScope scope(isolate);
   v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
-  templ->SetNamedPropertyHandler(InterceptorCallICGetter6);
+  templ->SetHandler(
+      v8::NamedPropertyHandlerConfiguration(InterceptorCallICGetter6));
   LocalContext context;
   context->Global()->Set(v8_str("o"), templ->NewInstance());
   call_ic_function6 =
@@ -12416,7 +12738,7 @@ THREADED_TEST(InterceptorCallICInvalidatedConstantFunction) {
   v8::Isolate* isolate = CcTest::isolate();
   v8::HandleScope scope(isolate);
   v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
-  templ->SetNamedPropertyHandler(NoBlockGetterX);
+  templ->SetHandler(v8::NamedPropertyHandlerConfiguration(NoBlockGetterX));
   LocalContext context;
   context->Global()->Set(v8_str("o"), templ->NewInstance());
   v8::Handle<Value> value = CompileRun(
@@ -12447,7 +12769,7 @@ THREADED_TEST(InterceptorCallICInvalidatedConstantFunctionViaGlobal) {
   v8::Isolate* isolate = CcTest::isolate();
   v8::HandleScope scope(isolate);
   v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
-  templ->SetNamedPropertyHandler(NoBlockGetterX);
+  templ->SetHandler(v8::NamedPropertyHandlerConfiguration(NoBlockGetterX));
   LocalContext context;
   context->Global()->Set(v8_str("o"), templ->NewInstance());
   v8::Handle<Value> value = CompileRun(
@@ -12473,7 +12795,7 @@ THREADED_TEST(InterceptorCallICCachedFromGlobal) {
   v8::Isolate* isolate = CcTest::isolate();
   v8::HandleScope scope(isolate);
   v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New(isolate);
-  templ_o->SetNamedPropertyHandler(NoBlockGetterX);
+  templ_o->SetHandler(v8::NamedPropertyHandlerConfiguration(NoBlockGetterX));
 
   LocalContext context;
   context->Global()->Set(v8_str("o"), templ_o->NewInstance());
@@ -12498,8 +12820,7 @@ THREADED_TEST(InterceptorCallICCachedFromGlobal) {
 }
 
 static void InterceptorCallICFastApi(
-    Local<String> name,
-    const v8::PropertyCallbackInfo<v8::Value>& info) {
+    Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
   ApiTestFuzzer::Fuzz();
   CheckReturnValue(info, FUNCTION_ADDR(InterceptorCallICFastApi));
   int* call_count =
@@ -12688,9 +13009,9 @@ THREADED_PROFILED_TEST(InterceptorCallICFastApi_TrivialSignature) {
   v8::Handle<v8::ObjectTemplate> proto_templ = fun_templ->PrototypeTemplate();
   proto_templ->Set(v8_str("method"), method_templ);
   v8::Handle<v8::ObjectTemplate> templ = fun_templ->InstanceTemplate();
-  templ->SetNamedPropertyHandler(
+  templ->SetHandler(v8::NamedPropertyHandlerConfiguration(
       InterceptorCallICFastApi, NULL, NULL, NULL, NULL,
-      v8::External::New(isolate, &interceptor_call_count));
+      v8::External::New(isolate, &interceptor_call_count)));
   LocalContext context;
   v8::Handle<v8::Function> fun = fun_templ->GetFunction();
   GenerateSomeGarbage();
@@ -12718,9 +13039,9 @@ THREADED_PROFILED_TEST(InterceptorCallICFastApi_SimpleSignature) {
   proto_templ->Set(v8_str("method"), method_templ);
   fun_templ->SetHiddenPrototype(true);
   v8::Handle<v8::ObjectTemplate> templ = fun_templ->InstanceTemplate();
-  templ->SetNamedPropertyHandler(
+  templ->SetHandler(v8::NamedPropertyHandlerConfiguration(
       InterceptorCallICFastApi, NULL, NULL, NULL, NULL,
-      v8::External::New(isolate, &interceptor_call_count));
+      v8::External::New(isolate, &interceptor_call_count)));
   LocalContext context;
   v8::Handle<v8::Function> fun = fun_templ->GetFunction();
   GenerateSomeGarbage();
@@ -12751,9 +13072,9 @@ THREADED_PROFILED_TEST(InterceptorCallICFastApi_SimpleSignature_Miss1) {
   proto_templ->Set(v8_str("method"), method_templ);
   fun_templ->SetHiddenPrototype(true);
   v8::Handle<v8::ObjectTemplate> templ = fun_templ->InstanceTemplate();
-  templ->SetNamedPropertyHandler(
+  templ->SetHandler(v8::NamedPropertyHandlerConfiguration(
       InterceptorCallICFastApi, NULL, NULL, NULL, NULL,
-      v8::External::New(isolate, &interceptor_call_count));
+      v8::External::New(isolate, &interceptor_call_count)));
   LocalContext context;
   v8::Handle<v8::Function> fun = fun_templ->GetFunction();
   GenerateSomeGarbage();
@@ -12790,9 +13111,9 @@ THREADED_PROFILED_TEST(InterceptorCallICFastApi_SimpleSignature_Miss2) {
   proto_templ->Set(v8_str("method"), method_templ);
   fun_templ->SetHiddenPrototype(true);
   v8::Handle<v8::ObjectTemplate> templ = fun_templ->InstanceTemplate();
-  templ->SetNamedPropertyHandler(
+  templ->SetHandler(v8::NamedPropertyHandlerConfiguration(
       InterceptorCallICFastApi, NULL, NULL, NULL, NULL,
-      v8::External::New(isolate, &interceptor_call_count));
+      v8::External::New(isolate, &interceptor_call_count)));
   LocalContext context;
   v8::Handle<v8::Function> fun = fun_templ->GetFunction();
   GenerateSomeGarbage();
@@ -12829,9 +13150,9 @@ THREADED_PROFILED_TEST(InterceptorCallICFastApi_SimpleSignature_Miss3) {
   proto_templ->Set(v8_str("method"), method_templ);
   fun_templ->SetHiddenPrototype(true);
   v8::Handle<v8::ObjectTemplate> templ = fun_templ->InstanceTemplate();
-  templ->SetNamedPropertyHandler(
+  templ->SetHandler(v8::NamedPropertyHandlerConfiguration(
       InterceptorCallICFastApi, NULL, NULL, NULL, NULL,
-      v8::External::New(isolate, &interceptor_call_count));
+      v8::External::New(isolate, &interceptor_call_count)));
   LocalContext context;
   v8::Handle<v8::Function> fun = fun_templ->GetFunction();
   GenerateSomeGarbage();
@@ -12853,7 +13174,7 @@ THREADED_PROFILED_TEST(InterceptorCallICFastApi_SimpleSignature_Miss3) {
   CHECK(try_catch.HasCaught());
   // TODO(verwaest): Adjust message.
   CHECK_EQ(v8_str("TypeError: undefined is not a function"),
-           try_catch.Exception()->ToString());
+           try_catch.Exception()->ToString(isolate));
   CHECK_EQ(42, context->Global()->Get(v8_str("saved_result"))->Int32Value());
   CHECK_GE(interceptor_call_count, 50);
 }
@@ -12872,9 +13193,9 @@ THREADED_PROFILED_TEST(InterceptorCallICFastApi_SimpleSignature_TypeError) {
   proto_templ->Set(v8_str("method"), method_templ);
   fun_templ->SetHiddenPrototype(true);
   v8::Handle<v8::ObjectTemplate> templ = fun_templ->InstanceTemplate();
-  templ->SetNamedPropertyHandler(
+  templ->SetHandler(v8::NamedPropertyHandlerConfiguration(
       InterceptorCallICFastApi, NULL, NULL, NULL, NULL,
-      v8::External::New(isolate, &interceptor_call_count));
+      v8::External::New(isolate, &interceptor_call_count)));
   LocalContext context;
   v8::Handle<v8::Function> fun = fun_templ->GetFunction();
   GenerateSomeGarbage();
@@ -12895,7 +13216,7 @@ THREADED_PROFILED_TEST(InterceptorCallICFastApi_SimpleSignature_TypeError) {
       "}");
   CHECK(try_catch.HasCaught());
   CHECK_EQ(v8_str("TypeError: Illegal invocation"),
-           try_catch.Exception()->ToString());
+           try_catch.Exception()->ToString(isolate));
   CHECK_EQ(42, context->Global()->Get(v8_str("saved_result"))->Int32Value());
   CHECK_GE(interceptor_call_count, 50);
 }
@@ -13028,7 +13349,7 @@ THREADED_PROFILED_TEST(CallICFastApi_SimpleSignature_Miss2) {
   CHECK(try_catch.HasCaught());
   // TODO(verwaest): Adjust message.
   CHECK_EQ(v8_str("TypeError: undefined is not a function"),
-           try_catch.Exception()->ToString());
+           try_catch.Exception()->ToString(isolate));
   CHECK_EQ(42, context->Global()->Get(v8_str("saved_result"))->Int32Value());
 }
 
@@ -13066,7 +13387,7 @@ THREADED_PROFILED_TEST(CallICFastApi_SimpleSignature_TypeError) {
       "}");
   CHECK(try_catch.HasCaught());
   CHECK_EQ(v8_str("TypeError: Illegal invocation"),
-           try_catch.Exception()->ToString());
+           try_catch.Exception()->ToString(isolate));
   CHECK_EQ(42, context->Global()->Get(v8_str("saved_result"))->Int32Value());
 }
 
@@ -13074,8 +13395,7 @@ THREADED_PROFILED_TEST(CallICFastApi_SimpleSignature_TypeError) {
 v8::Handle<Value> keyed_call_ic_function;
 
 static void InterceptorKeyedCallICGetter(
-    Local<String> name,
-    const v8::PropertyCallbackInfo<v8::Value>& info) {
+    Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
   ApiTestFuzzer::Fuzz();
   if (v8_str("x")->Equals(name)) {
     info.GetReturnValue().Set(keyed_call_ic_function);
@@ -13089,7 +13409,7 @@ THREADED_TEST(InterceptorKeyedCallICKeyChange1) {
   v8::Isolate* isolate = CcTest::isolate();
   v8::HandleScope scope(isolate);
   v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
-  templ->SetNamedPropertyHandler(NoBlockGetterX);
+  templ->SetHandler(v8::NamedPropertyHandlerConfiguration(NoBlockGetterX));
   LocalContext context;
   context->Global()->Set(v8_str("o"), templ->NewInstance());
   CompileRun(
@@ -13114,7 +13434,8 @@ THREADED_TEST(InterceptorKeyedCallICKeyChange2) {
   v8::Isolate* isolate = CcTest::isolate();
   v8::HandleScope scope(isolate);
   v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
-  templ->SetNamedPropertyHandler(InterceptorKeyedCallICGetter);
+  templ->SetHandler(
+      v8::NamedPropertyHandlerConfiguration(InterceptorKeyedCallICGetter));
   LocalContext context;
   context->Global()->Set(v8_str("proto1"), templ->NewInstance());
   keyed_call_ic_function =
@@ -13142,7 +13463,7 @@ THREADED_TEST(InterceptorKeyedCallICKeyChangeOnGlobal) {
   v8::Isolate* isolate = CcTest::isolate();
   v8::HandleScope scope(isolate);
   v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
-  templ->SetNamedPropertyHandler(NoBlockGetterX);
+  templ->SetHandler(v8::NamedPropertyHandlerConfiguration(NoBlockGetterX));
   LocalContext context;
   context->Global()->Set(v8_str("o"), templ->NewInstance());
   CompileRun(
@@ -13168,7 +13489,7 @@ THREADED_TEST(InterceptorKeyedCallICFromGlobal) {
   v8::Isolate* isolate = CcTest::isolate();
   v8::HandleScope scope(isolate);
   v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New(isolate);
-  templ_o->SetNamedPropertyHandler(NoBlockGetterX);
+  templ_o->SetHandler(v8::NamedPropertyHandlerConfiguration(NoBlockGetterX));
   LocalContext context;
   context->Global()->Set(v8_str("o"), templ_o->NewInstance());
 
@@ -13194,7 +13515,7 @@ THREADED_TEST(InterceptorKeyedCallICMapChangeBefore) {
   v8::Isolate* isolate = CcTest::isolate();
   v8::HandleScope scope(isolate);
   v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New(isolate);
-  templ_o->SetNamedPropertyHandler(NoBlockGetterX);
+  templ_o->SetHandler(v8::NamedPropertyHandlerConfiguration(NoBlockGetterX));
   LocalContext context;
   context->Global()->Set(v8_str("proto"), templ_o->NewInstance());
 
@@ -13217,7 +13538,7 @@ THREADED_TEST(InterceptorKeyedCallICMapChangeAfter) {
   v8::Isolate* isolate = CcTest::isolate();
   v8::HandleScope scope(isolate);
   v8::Handle<v8::ObjectTemplate> templ_o = ObjectTemplate::New(isolate);
-  templ_o->SetNamedPropertyHandler(NoBlockGetterX);
+  templ_o->SetHandler(v8::NamedPropertyHandlerConfiguration(NoBlockGetterX));
   LocalContext context;
   context->Global()->Set(v8_str("o"), templ_o->NewInstance());
 
@@ -13238,8 +13559,7 @@ THREADED_TEST(InterceptorKeyedCallICMapChangeAfter) {
 static int interceptor_call_count = 0;
 
 static void InterceptorICRefErrorGetter(
-    Local<String> name,
-    const v8::PropertyCallbackInfo<v8::Value>& info) {
+    Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
   ApiTestFuzzer::Fuzz();
   if (v8_str("x")->Equals(name) && interceptor_call_count++ < 20) {
     info.GetReturnValue().Set(call_ic_function2);
@@ -13254,7 +13574,8 @@ THREADED_TEST(InterceptorICReferenceErrors) {
   v8::Isolate* isolate = CcTest::isolate();
   v8::HandleScope scope(isolate);
   v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
-  templ->SetNamedPropertyHandler(InterceptorICRefErrorGetter);
+  templ->SetHandler(
+      v8::NamedPropertyHandlerConfiguration(InterceptorICRefErrorGetter));
   LocalContext context(0, templ, v8::Handle<Value>());
   call_ic_function2 = v8_compile("function h(x) { return x; }; h")->Run();
   v8::Handle<Value> value = CompileRun(
@@ -13282,8 +13603,7 @@ THREADED_TEST(InterceptorICReferenceErrors) {
 static int interceptor_ic_exception_get_count = 0;
 
 static void InterceptorICExceptionGetter(
-    Local<String> name,
-    const v8::PropertyCallbackInfo<v8::Value>& info) {
+    Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
   ApiTestFuzzer::Fuzz();
   if (v8_str("x")->Equals(name) && ++interceptor_ic_exception_get_count < 20) {
     info.GetReturnValue().Set(call_ic_function3);
@@ -13302,7 +13622,8 @@ THREADED_TEST(InterceptorICGetterExceptions) {
   v8::Isolate* isolate = CcTest::isolate();
   v8::HandleScope scope(isolate);
   v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
-  templ->SetNamedPropertyHandler(InterceptorICExceptionGetter);
+  templ->SetHandler(
+      v8::NamedPropertyHandlerConfiguration(InterceptorICExceptionGetter));
   LocalContext context(0, templ, v8::Handle<Value>());
   call_ic_function3 = v8_compile("function h(x) { return x; }; h")->Run();
   v8::Handle<Value> value = CompileRun(
@@ -13330,9 +13651,8 @@ THREADED_TEST(InterceptorICGetterExceptions) {
 static int interceptor_ic_exception_set_count = 0;
 
 static void InterceptorICExceptionSetter(
-      Local<String> key,
-      Local<Value> value,
-      const v8::PropertyCallbackInfo<v8::Value>& info) {
+    Local<Name> key, Local<Value> value,
+    const v8::PropertyCallbackInfo<v8::Value>& info) {
   ApiTestFuzzer::Fuzz();
   if (++interceptor_ic_exception_set_count > 20) {
     info.GetIsolate()->ThrowException(v8_num(42));
@@ -13347,7 +13667,8 @@ THREADED_TEST(InterceptorICSetterExceptions) {
   v8::Isolate* isolate = CcTest::isolate();
   v8::HandleScope scope(isolate);
   v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
-  templ->SetNamedPropertyHandler(0, InterceptorICExceptionSetter);
+  templ->SetHandler(
+      v8::NamedPropertyHandlerConfiguration(0, InterceptorICExceptionSetter));
   LocalContext context(0, templ, v8::Handle<Value>());
   v8::Handle<Value> value = CompileRun(
     "function f() {"
@@ -13366,8 +13687,8 @@ THREADED_TEST(NullNamedInterceptor) {
   v8::Isolate* isolate = CcTest::isolate();
   v8::HandleScope scope(isolate);
   v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
-  templ->SetNamedPropertyHandler(
-      static_cast<v8::NamedPropertyGetterCallback>(0));
+  templ->SetHandler(v8::NamedPropertyHandlerConfiguration(
+      static_cast<v8::GenericNamedPropertyGetterCallback>(0)));
   LocalContext context;
   templ->Set(CcTest::isolate(), "x", v8_num(42));
   v8::Handle<v8::Object> obj = templ->NewInstance();
@@ -13383,8 +13704,8 @@ THREADED_TEST(NullIndexedInterceptor) {
   v8::Isolate* isolate = CcTest::isolate();
   v8::HandleScope scope(isolate);
   v8::Handle<v8::ObjectTemplate> templ = ObjectTemplate::New(isolate);
-  templ->SetIndexedPropertyHandler(
-      static_cast<v8::IndexedPropertyGetterCallback>(0));
+  templ->SetHandler(v8::IndexedPropertyHandlerConfiguration(
+      static_cast<v8::IndexedPropertyGetterCallback>(0)));
   LocalContext context;
   templ->Set(CcTest::isolate(), "42", v8_num(42));
   v8::Handle<v8::Object> obj = templ->NewInstance();
@@ -13399,7 +13720,8 @@ THREADED_TEST(NamedPropertyHandlerGetterAttributes) {
   v8::Isolate* isolate = CcTest::isolate();
   v8::HandleScope scope(isolate);
   v8::Handle<v8::FunctionTemplate> templ = v8::FunctionTemplate::New(isolate);
-  templ->InstanceTemplate()->SetNamedPropertyHandler(InterceptorLoadXICGetter);
+  templ->InstanceTemplate()->SetHandler(
+      v8::NamedPropertyHandlerConfiguration(InterceptorLoadXICGetter));
   LocalContext env;
   env->Global()->Set(v8_str("obj"),
                      templ->GetFunction()->NewInstance());
@@ -13653,7 +13975,7 @@ THREADED_TEST(ObjectProtoToString) {
 
   // Normal ToString call should call replaced Object.prototype.toString
   Local<v8::Object> instance = templ->GetFunction()->NewInstance();
-  Local<String> value = instance->ToString();
+  Local<String> value = instance->ToString(isolate);
   CHECK(value->IsString() && value->Equals(customized_tostring));
 
   // ObjectProtoToString should not call replace toString function.
@@ -13690,7 +14012,7 @@ TEST(ObjectProtoToStringES6) {
 
   // Normal ToString call should call replaced Object.prototype.toString
   Local<v8::Object> instance = templ->GetFunction()->NewInstance();
-  Local<String> value = instance->ToString();
+  Local<String> value = instance->ToString(isolate);
   CHECK(value->IsString() && value->Equals(customized_tostring));
 
   // ObjectProtoToString should not call replace toString function.
@@ -13789,8 +14111,9 @@ TEST(ObjectProtoToStringES6) {
 
 
 THREADED_TEST(ObjectGetConstructorName) {
+  v8::Isolate* isolate = CcTest::isolate();
   LocalContext context;
-  v8::HandleScope scope(context->GetIsolate());
+  v8::HandleScope scope(isolate);
   v8_compile("function Parent() {};"
              "function Child() {};"
              "Child.prototype = new Parent();"
@@ -13800,16 +14123,17 @@ THREADED_TEST(ObjectGetConstructorName) {
              "var x = new outer.inner();")->Run();
 
   Local<v8::Value> p = context->Global()->Get(v8_str("p"));
-  CHECK(p->IsObject() && p->ToObject()->GetConstructorName()->Equals(
-      v8_str("Parent")));
+  CHECK(p->IsObject() &&
+        p->ToObject(isolate)->GetConstructorName()->Equals(v8_str("Parent")));
 
   Local<v8::Value> c = context->Global()->Get(v8_str("c"));
-  CHECK(c->IsObject() && c->ToObject()->GetConstructorName()->Equals(
-      v8_str("Child")));
+  CHECK(c->IsObject() &&
+        c->ToObject(isolate)->GetConstructorName()->Equals(v8_str("Child")));
 
   Local<v8::Value> x = context->Global()->Get(v8_str("x"));
-  CHECK(x->IsObject() && x->ToObject()->GetConstructorName()->Equals(
-      v8_str("outer.inner")));
+  CHECK(x->IsObject() &&
+        x->ToObject(isolate)->GetConstructorName()->Equals(
+            v8_str("outer.inner")));
 }
 
 
@@ -14357,7 +14681,7 @@ THREADED_TEST(NestedHandleScopeAndContexts) {
   v8::Local<Context> env = Context::New(isolate);
   env->Enter();
   v8::Handle<Value> value = NestedScope(env);
-  v8::Handle<String> str(value->ToString());
+  v8::Handle<String> str(value->ToString(isolate));
   CHECK(!str.IsEmpty());
   env->Exit();
 }
@@ -15865,9 +16189,14 @@ static void ForceSetSetter(v8::Local<v8::String> name,
   force_set_set_count++;
 }
 
+static void ForceSetInterceptGetter(
+    v8::Local<v8::Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
+  CHECK(name->IsString());
+  ForceSetGetter(Local<String>::Cast(name), info);
+}
+
 static void ForceSetInterceptSetter(
-    v8::Local<v8::String> name,
-    v8::Local<v8::Value> value,
+    v8::Local<v8::Name> name, v8::Local<v8::Value> value,
     const v8::PropertyCallbackInfo<v8::Value>& info) {
   force_set_set_count++;
   info.GetReturnValue().SetUndefined();
@@ -15927,7 +16256,8 @@ TEST(ForceSetWithInterceptor) {
   v8::Isolate* isolate = CcTest::isolate();
   v8::HandleScope scope(isolate);
   v8::Handle<v8::ObjectTemplate> templ = v8::ObjectTemplate::New(isolate);
-  templ->SetNamedPropertyHandler(ForceSetGetter, ForceSetInterceptSetter);
+  templ->SetHandler(v8::NamedPropertyHandlerConfiguration(
+      ForceSetInterceptGetter, ForceSetInterceptSetter));
   LocalContext context(NULL, templ);
   v8::Handle<v8::Object> global = context->Global();
 
@@ -15995,7 +16325,7 @@ static bool pass_on_delete = false;
 
 
 static void ForceDeleteDeleter(
-    v8::Local<v8::String> name,
+    v8::Local<v8::Name> name,
     const v8::PropertyCallbackInfo<v8::Boolean>& info) {
   force_delete_interceptor_count++;
   if (pass_on_delete) return;
@@ -16010,7 +16340,8 @@ THREADED_TEST(ForceDeleteWithInterceptor) {
   v8::Isolate* isolate = CcTest::isolate();
   v8::HandleScope scope(isolate);
   v8::Handle<v8::ObjectTemplate> templ = v8::ObjectTemplate::New(isolate);
-  templ->SetNamedPropertyHandler(0, 0, 0, ForceDeleteDeleter);
+  templ->SetHandler(
+      v8::NamedPropertyHandlerConfiguration(0, 0, 0, ForceDeleteDeleter));
   LocalContext context(NULL, templ);
   v8::Handle<v8::Object> global = context->Global();
 
@@ -16653,8 +16984,8 @@ THREADED_TEST(PixelArrayWithInterceptor) {
   }
   v8::Handle<v8::ObjectTemplate> templ =
       v8::ObjectTemplate::New(context->GetIsolate());
-  templ->SetIndexedPropertyHandler(NotHandledIndexedPropertyGetter,
-                                   NotHandledIndexedPropertySetter);
+  templ->SetHandler(v8::IndexedPropertyHandlerConfiguration(
+      NotHandledIndexedPropertyGetter, NotHandledIndexedPropertySetter));
   v8::Handle<v8::Object> obj = templ->NewInstance();
   obj->SetIndexedPropertiesToPixelData(pixel_data, kElementCount);
   context->Global()->Set(v8_str("pixels"), obj);
@@ -18022,7 +18353,7 @@ void PromiseRejectCallback(v8::PromiseRejectMessage message) {
     CcTest::global()->Set(v8_str("rejected"), message.GetPromise());
     CcTest::global()->Set(v8_str("value"), message.GetValue());
     v8::Handle<v8::StackTrace> stack_trace =
-        v8::Exception::GetMessage(message.GetValue())->GetStackTrace();
+        v8::Exception::CreateMessage(message.GetValue())->GetStackTrace();
     if (!stack_trace.IsEmpty()) {
       promise_reject_frame_count = stack_trace->GetFrameCount();
       if (promise_reject_frame_count > 0) {
@@ -18792,10 +19123,11 @@ class VisitorImpl : public v8::ExternalResourceVisitor {
 
 
 TEST(ExternalizeOldSpaceTwoByteCons) {
+  v8::Isolate* isolate = CcTest::isolate();
   LocalContext env;
-  v8::HandleScope scope(env->GetIsolate());
+  v8::HandleScope scope(isolate);
   v8::Local<v8::String> cons =
-      CompileRun("'Romeo Montague ' + 'Juliet Capulet'")->ToString();
+      CompileRun("'Romeo Montague ' + 'Juliet Capulet'")->ToString(isolate);
   CHECK(v8::Utils::OpenHandle(*cons)->IsConsString());
   CcTest::heap()->CollectAllAvailableGarbage();
   CHECK(CcTest::heap()->old_pointer_space()->Contains(
@@ -18814,10 +19146,11 @@ TEST(ExternalizeOldSpaceTwoByteCons) {
 
 
 TEST(ExternalizeOldSpaceOneByteCons) {
+  v8::Isolate* isolate = CcTest::isolate();
   LocalContext env;
-  v8::HandleScope scope(env->GetIsolate());
+  v8::HandleScope scope(isolate);
   v8::Local<v8::String> cons =
-      CompileRun("'Romeo Montague ' + 'Juliet Capulet'")->ToString();
+      CompileRun("'Romeo Montague ' + 'Juliet Capulet'")->ToString(isolate);
   CHECK(v8::Utils::OpenHandle(*cons)->IsConsString());
   CcTest::heap()->CollectAllAvailableGarbage();
   CHECK(CcTest::heap()->old_pointer_space()->Contains(
@@ -18836,8 +19169,9 @@ TEST(ExternalizeOldSpaceOneByteCons) {
 
 
 TEST(VisitExternalStrings) {
+  v8::Isolate* isolate = CcTest::isolate();
   LocalContext env;
-  v8::HandleScope scope(env->GetIsolate());
+  v8::HandleScope scope(isolate);
   const char* string = "Some string";
   uint16_t* two_byte_string = AsciiToTwoByteString(string);
   TestResource* resource[4];
@@ -18907,7 +19241,7 @@ TEST(ExternalInternalizedStringCollectedAtTearDown) {
     const char* s = "One string to test them all";
     TestOneByteResource* inscription =
         new TestOneByteResource(i::StrDup(s), &destroyed);
-    v8::Local<v8::String> ring = CompileRun("ring")->ToString();
+    v8::Local<v8::String> ring = CompileRun("ring")->ToString(isolate);
     CHECK(v8::Utils::OpenHandle(*ring)->IsInternalizedString());
     ring->MakeExternal(inscription);
     // Ring is still alive.  Orcs are roaming freely across our lands.
@@ -18922,6 +19256,9 @@ TEST(ExternalInternalizedStringCollectedAtTearDown) {
 
 
 TEST(ExternalInternalizedStringCollectedAtGC) {
+  // TODO(mvstanton): vector ics need weak support.
+  if (i::FLAG_vector_ics) return;
+
   int destroyed = 0;
   { LocalContext env;
     v8::HandleScope handle_scope(env->GetIsolate());
@@ -18929,7 +19266,7 @@ TEST(ExternalInternalizedStringCollectedAtGC) {
     const char* s = "One string to test them all";
     TestOneByteResource* inscription =
         new TestOneByteResource(i::StrDup(s), &destroyed);
-    v8::Local<v8::String> ring = CompileRun("ring")->ToString();
+    v8::Local<v8::String> ring = CompileRun("ring").As<v8::String>();
     CHECK(v8::Utils::OpenHandle(*ring)->IsInternalizedString());
     ring->MakeExternal(inscription);
     // Ring is still alive.  Orcs are roaming freely across our lands.
@@ -19068,7 +19405,7 @@ static void SpaghettiIncident(
     const v8::FunctionCallbackInfo<v8::Value>& args) {
   v8::HandleScope scope(args.GetIsolate());
   v8::TryCatch tc;
-  v8::Handle<v8::String> str(args[0]->ToString());
+  v8::Handle<v8::String> str(args[0]->ToString(args.GetIsolate()));
   USE(str);
   if (tc.HasCaught())
     tc.ReThrow();
@@ -19419,7 +19756,7 @@ static void SetterWhichSetsYOnThisTo23(
 }
 
 
-void FooGetInterceptor(Local<String> name,
+void FooGetInterceptor(Local<Name> name,
                        const v8::PropertyCallbackInfo<v8::Value>& info) {
   CHECK(v8::Utils::OpenHandle(*info.This())->IsJSObject());
   CHECK(v8::Utils::OpenHandle(*info.Holder())->IsJSObject());
@@ -19428,8 +19765,7 @@ void FooGetInterceptor(Local<String> name,
 }
 
 
-void FooSetInterceptor(Local<String> name,
-                       Local<Value> value,
+void FooSetInterceptor(Local<Name> name, Local<Value> value,
                        const v8::PropertyCallbackInfo<v8::Value>& info) {
   CHECK(v8::Utils::OpenHandle(*info.This())->IsJSObject());
   CHECK(v8::Utils::OpenHandle(*info.Holder())->IsJSObject());
@@ -19475,15 +19811,13 @@ script = v8_compile("new C2();");
 
 
 static void NamedPropertyGetterWhichReturns42(
-    Local<String> name,
-    const v8::PropertyCallbackInfo<v8::Value>& info) {
+    Local<Name> name, const v8::PropertyCallbackInfo<v8::Value>& info) {
   info.GetReturnValue().Set(v8_num(42));
 }
 
 
 static void NamedPropertySetterWhichSetsYOnThisTo23(
-    Local<String> name,
-    Local<Value> value,
+    Local<Name> name, Local<Value> value,
     const v8::PropertyCallbackInfo<v8::Value>& info) {
   if (name->Equals(v8_str("x"))) {
     Local<Object>::Cast(info.This())->Set(v8_str("y"), v8_num(23));
@@ -19495,8 +19829,9 @@ THREADED_TEST(InterceptorOnConstructorPrototype) {
   v8::Isolate* isolate = CcTest::isolate();
   v8::HandleScope scope(isolate);
   Local<ObjectTemplate> templ = ObjectTemplate::New(isolate);
-  templ->SetNamedPropertyHandler(NamedPropertyGetterWhichReturns42,
-                                 NamedPropertySetterWhichSetsYOnThisTo23);
+  templ->SetHandler(v8::NamedPropertyHandlerConfiguration(
+      NamedPropertyGetterWhichReturns42,
+      NamedPropertySetterWhichSetsYOnThisTo23));
   LocalContext context;
   context->Global()->Set(v8_str("P"), templ->NewInstance());
   CompileRun("function C1() {"
@@ -20788,8 +21123,8 @@ THREADED_TEST(Equals) {
 }
 
 
-static void Getter(v8::Local<v8::String> property,
-                   const v8::PropertyCallbackInfo<v8::Value>& info ) {
+static void Getter(v8::Local<v8::Name> property,
+                   const v8::PropertyCallbackInfo<v8::Value>& info) {
   info.GetReturnValue().Set(v8_str("42!"));
 }
 
@@ -20808,7 +21143,8 @@ TEST(NamedEnumeratorAndForIn) {
   v8::Context::Scope context_scope(context.local());
 
   v8::Handle<v8::ObjectTemplate> tmpl = v8::ObjectTemplate::New(isolate);
-  tmpl->SetNamedPropertyHandler(Getter, NULL, NULL, NULL, Enumerator);
+  tmpl->SetHandler(v8::NamedPropertyHandlerConfiguration(Getter, NULL, NULL,
+                                                         NULL, Enumerator));
   context->Global()->Set(v8_str("o"), tmpl->NewInstance());
   v8::Handle<v8::Array> result = v8::Handle<v8::Array>::Cast(CompileRun(
         "var result = []; for (var k in o) result.push(k); result"));
@@ -20953,8 +21289,7 @@ void HasOwnPropertyIndexedPropertyGetter(
 
 
 void HasOwnPropertyNamedPropertyGetter(
-    Local<String> property,
-    const v8::PropertyCallbackInfo<v8::Value>& info) {
+    Local<Name> property, const v8::PropertyCallbackInfo<v8::Value>& info) {
   if (property->Equals(v8_str("foo"))) info.GetReturnValue().Set(v8_str("yes"));
 }
 
@@ -20966,15 +21301,13 @@ void HasOwnPropertyIndexedPropertyQuery(
 
 
 void HasOwnPropertyNamedPropertyQuery(
-    Local<String> property,
-    const v8::PropertyCallbackInfo<v8::Integer>& info) {
+    Local<Name> property, const v8::PropertyCallbackInfo<v8::Integer>& info) {
   if (property->Equals(v8_str("foo"))) info.GetReturnValue().Set(1);
 }
 
 
 void HasOwnPropertyNamedPropertyQuery2(
-    Local<String> property,
-    const v8::PropertyCallbackInfo<v8::Integer>& info) {
+    Local<Name> property, const v8::PropertyCallbackInfo<v8::Integer>& info) {
   if (property->Equals(v8_str("bar"))) info.GetReturnValue().Set(1);
 }
 
@@ -21003,7 +21336,7 @@ TEST(HasOwnProperty) {
         "Bar.prototype = new Foo();"
         "new Bar();");
     CHECK(value->IsObject());
-    Handle<Object> object = value->ToObject();
+    Handle<Object> object = value->ToObject(isolate);
     CHECK(object->Has(v8_str("foo")));
     CHECK(!object->HasOwnProperty(v8_str("foo")));
     CHECK(object->HasOwnProperty(v8_str("bar")));
@@ -21013,7 +21346,8 @@ TEST(HasOwnProperty) {
   }
   { // Check named getter interceptors.
     Handle<ObjectTemplate> templ = ObjectTemplate::New(isolate);
-    templ->SetNamedPropertyHandler(HasOwnPropertyNamedPropertyGetter);
+    templ->SetHandler(v8::NamedPropertyHandlerConfiguration(
+        HasOwnPropertyNamedPropertyGetter));
     Handle<Object> instance = templ->NewInstance();
     CHECK(!instance->HasOwnProperty(v8_str("42")));
     CHECK(instance->HasOwnProperty(v8_str("foo")));
@@ -21021,7 +21355,8 @@ TEST(HasOwnProperty) {
   }
   { // Check indexed getter interceptors.
     Handle<ObjectTemplate> templ = ObjectTemplate::New(isolate);
-    templ->SetIndexedPropertyHandler(HasOwnPropertyIndexedPropertyGetter);
+    templ->SetHandler(v8::IndexedPropertyHandlerConfiguration(
+        HasOwnPropertyIndexedPropertyGetter));
     Handle<Object> instance = templ->NewInstance();
     CHECK(instance->HasOwnProperty(v8_str("42")));
     CHECK(!instance->HasOwnProperty(v8_str("43")));
@@ -21029,14 +21364,16 @@ TEST(HasOwnProperty) {
   }
   { // Check named query interceptors.
     Handle<ObjectTemplate> templ = ObjectTemplate::New(isolate);
-    templ->SetNamedPropertyHandler(0, 0, HasOwnPropertyNamedPropertyQuery);
+    templ->SetHandler(v8::NamedPropertyHandlerConfiguration(
+        0, 0, HasOwnPropertyNamedPropertyQuery));
     Handle<Object> instance = templ->NewInstance();
     CHECK(instance->HasOwnProperty(v8_str("foo")));
     CHECK(!instance->HasOwnProperty(v8_str("bar")));
   }
   { // Check indexed query interceptors.
     Handle<ObjectTemplate> templ = ObjectTemplate::New(isolate);
-    templ->SetIndexedPropertyHandler(0, 0, HasOwnPropertyIndexedPropertyQuery);
+    templ->SetHandler(v8::IndexedPropertyHandlerConfiguration(
+        0, 0, HasOwnPropertyIndexedPropertyQuery));
     Handle<Object> instance = templ->NewInstance();
     CHECK(instance->HasOwnProperty(v8_str("42")));
     CHECK(!instance->HasOwnProperty(v8_str("41")));
@@ -21050,9 +21387,9 @@ TEST(HasOwnProperty) {
   }
   { // Check that query wins on disagreement.
     Handle<ObjectTemplate> templ = ObjectTemplate::New(isolate);
-    templ->SetNamedPropertyHandler(HasOwnPropertyNamedPropertyGetter,
-                                   0,
-                                   HasOwnPropertyNamedPropertyQuery2);
+    templ->SetHandler(v8::NamedPropertyHandlerConfiguration(
+        HasOwnPropertyNamedPropertyGetter, 0,
+        HasOwnPropertyNamedPropertyQuery2));
     Handle<Object> instance = templ->NewInstance();
     CHECK(!instance->HasOwnProperty(v8_str("foo")));
     CHECK(instance->HasOwnProperty(v8_str("bar")));
@@ -21064,9 +21401,8 @@ TEST(IndexedInterceptorWithStringProto) {
   v8::Isolate* isolate = CcTest::isolate();
   v8::HandleScope scope(isolate);
   Handle<ObjectTemplate> templ = ObjectTemplate::New(isolate);
-  templ->SetIndexedPropertyHandler(NULL,
-                                   NULL,
-                                   HasOwnPropertyIndexedPropertyQuery);
+  templ->SetHandler(v8::IndexedPropertyHandlerConfiguration(
+      NULL, NULL, HasOwnPropertyIndexedPropertyQuery));
   LocalContext context;
   context->Global()->Set(v8_str("obj"), templ->NewInstance());
   CompileRun("var s = new String('foobar'); obj.__proto__ = s;");
@@ -21211,29 +21547,40 @@ THREADED_TEST(ReadOnlyIndexedProperties) {
 }
 
 
+static int CountLiveMapsInMapCache(i::Context* context) {
+  i::FixedArray* map_cache = i::FixedArray::cast(context->map_cache());
+  int length = map_cache->length();
+  int count = 0;
+  for (int i = 0; i < length; i++) {
+    i::Object* value = map_cache->get(i);
+    if (value->IsWeakCell() && !i::WeakCell::cast(value)->cleared()) count++;
+  }
+  return count;
+}
+
+
 THREADED_TEST(Regress1516) {
   LocalContext context;
   v8::HandleScope scope(context->GetIsolate());
 
+  // Object with 20 properties is not a common case, so it should be removed
+  // from the cache after GC.
   { v8::HandleScope temp_scope(context->GetIsolate());
-    CompileRun("({'a': 0})");
+    CompileRun(
+        "({"
+        "'a00': 0, 'a01': 0, 'a02': 0, 'a03': 0, 'a04': 0, "
+        "'a05': 0, 'a06': 0, 'a07': 0, 'a08': 0, 'a09': 0, "
+        "'a10': 0, 'a11': 0, 'a12': 0, 'a13': 0, 'a14': 0, "
+        "'a15': 0, 'a16': 0, 'a17': 0, 'a18': 0, 'a19': 0, "
+        "})");
   }
 
-  int elements;
-  { i::MapCache* map_cache =
-        i::MapCache::cast(CcTest::i_isolate()->context()->map_cache());
-    elements = map_cache->NumberOfElements();
-    CHECK_LE(1, elements);
-  }
+  int elements = CountLiveMapsInMapCache(CcTest::i_isolate()->context());
+  CHECK_LE(1, elements);
 
-  CcTest::heap()->CollectAllGarbage(
-      i::Heap::kAbortIncrementalMarkingMask);
-  { i::Object* raw_map_cache = CcTest::i_isolate()->context()->map_cache();
-    if (raw_map_cache != CcTest::heap()->undefined_value()) {
-      i::MapCache* map_cache = i::MapCache::cast(raw_map_cache);
-      CHECK_GT(elements, map_cache->NumberOfElements());
-    }
-  }
+  CcTest::heap()->CollectAllGarbage(i::Heap::kAbortIncrementalMarkingMask);
+
+  CHECK_GT(elements, CountLiveMapsInMapCache(CcTest::i_isolate()->context()));
 }
 
 
@@ -21242,12 +21589,11 @@ static bool BlockProtoNamedSecurityTestCallback(Local<v8::Object> global,
                                                 v8::AccessType type,
                                                 Local<Value> data) {
   // Only block read access to __proto__.
-  if (type == v8::ACCESS_GET &&
-      name->IsString() &&
-      name->ToString()->Length() == 9 &&
-      name->ToString()->Utf8Length() == 9) {
+  if (type == v8::ACCESS_GET && name->IsString() &&
+      name.As<v8::String>()->Length() == 9 &&
+      name.As<v8::String>()->Utf8Length() == 9) {
     char buffer[10];
-    CHECK_EQ(10, name->ToString()->WriteUtf8(buffer));
+    CHECK_EQ(10, name.As<v8::String>()->WriteUtf8(buffer));
     return strncmp(buffer, "__proto__", 9) != 0;
   }
 
@@ -21294,8 +21640,7 @@ THREADED_TEST(Regress93759) {
       context->Global();
 
   // Global object, the  prototype of proxy_object. No security checks.
-  Local<Object> global_object =
-      proxy_object->GetPrototype()->ToObject();
+  Local<Object> global_object = proxy_object->GetPrototype()->ToObject(isolate);
 
   // Hidden prototype without security check.
   Local<Object> hidden_prototype =
@@ -21339,7 +21684,7 @@ THREADED_TEST(Regress93759) {
 
   Local<Value> result5 = CompileRun("Object.getPrototypeOf(hidden)");
   CHECK(result5->Equals(
-      object_with_hidden->GetPrototype()->ToObject()->GetPrototype()));
+      object_with_hidden->GetPrototype()->ToObject(isolate)->GetPrototype()));
 
   Local<Value> result6 = CompileRun("Object.getPrototypeOf(phidden)");
   CHECK(result6.IsEmpty());
@@ -21375,8 +21720,8 @@ static void TestReceiver(Local<Value> expected_result,
                          const char* code) {
   Local<Value> result = CompileRun(code);
   CHECK(result->IsObject());
-  CHECK(expected_receiver->Equals(result->ToObject()->Get(1)));
-  CHECK(expected_result->Equals(result->ToObject()->Get(0)));
+  CHECK(expected_receiver->Equals(result.As<v8::Object>()->Get(1)));
+  CHECK(expected_result->Equals(result.As<v8::Object>()->Get(0)));
 }
 
 
@@ -21764,8 +22109,8 @@ static void DebugEventInObserver(const v8::Debug::EventDetails& event_details) {
   Handle<Object> exec_state = event_details.GetExecutionState();
   Handle<Value> break_id = exec_state->Get(v8_str("break_id"));
   CompileRun("function f(id) { new FrameDetails(id, 0); }");
-  Handle<Function> fun = Handle<Function>::Cast(
-      CcTest::global()->Get(v8_str("f"))->ToObject());
+  Handle<Function> fun =
+      Handle<Function>::Cast(CcTest::global()->Get(v8_str("f")));
   fun->Call(CcTest::global(), 1, &break_id);
 }
 
@@ -21789,7 +22134,7 @@ TEST(Regress385349) {
 }
 
 
-#ifdef DEBUG
+#ifdef ENABLE_DISASSEMBLER
 static int probes_counter = 0;
 static int misses_counter = 0;
 static int updates_counter = 0;
@@ -21823,7 +22168,7 @@ static const char* kMegamorphicTestProgram =
 
 
 static void StubCacheHelper(bool primary) {
-#ifdef DEBUG
+#ifdef ENABLE_DISASSEMBLER
   i::FLAG_native_code_counters = true;
   if (primary) {
     i::FLAG_test_primary_stub_cache = true;
@@ -22170,7 +22515,8 @@ static void Helper137002(bool do_store,
   LocalContext context;
   Local<ObjectTemplate> templ = ObjectTemplate::New(context->GetIsolate());
   if (interceptor) {
-    templ->SetNamedPropertyHandler(FooGetInterceptor, FooSetInterceptor);
+    templ->SetHandler(v8::NamedPropertyHandlerConfiguration(FooGetInterceptor,
+                                                            FooSetInterceptor));
   } else {
     templ->SetAccessor(v8_str("foo"),
                        GetterWhichReturns42,
@@ -22576,7 +22922,8 @@ void CatcherCallback(const v8::FunctionCallbackInfo<v8::Value>& args) {
 
 
 void HasOwnPropertyCallback(const v8::FunctionCallbackInfo<v8::Value>& args) {
-  args[0]->ToObject()->HasOwnProperty(args[1]->ToString());
+  args[0]->ToObject(args.GetIsolate())->HasOwnProperty(
+      args[1]->ToString(args.GetIsolate()));
 }
 
 
@@ -22792,8 +23139,6 @@ class RequestInterruptTestBase {
 
     TestBody();
 
-    isolate_->ClearInterrupt();
-
     // Verify we arrived here because interruptor was called
     // not due to a bug causing us to exit the loop too early.
     CHECK(!should_continue());
@@ -22943,7 +23288,8 @@ class RequestInterruptTestWithMethodCallAndInterceptor
     proto->Set(v8_str("shouldContinue"), Function::New(
         isolate_, ShouldContinueCallback, v8::External::New(isolate_, this)));
     v8::Local<v8::ObjectTemplate> instance_template = t->InstanceTemplate();
-    instance_template->SetNamedPropertyHandler(EmptyInterceptor);
+    instance_template->SetHandler(
+        v8::NamedPropertyHandlerConfiguration(EmptyInterceptor));
 
     env_->Global()->Set(v8_str("Klass"), t->GetFunction());
 
@@ -22952,9 +23298,7 @@ class RequestInterruptTestWithMethodCallAndInterceptor
 
  private:
   static void EmptyInterceptor(
-      Local<String> property,
-      const v8::PropertyCallbackInfo<v8::Value>& info) {
-  }
+      Local<Name> property, const v8::PropertyCallbackInfo<v8::Value>& info) {}
 };
 
 
@@ -23043,10 +23387,9 @@ TEST(RequestInterruptTestWithMathAbs) {
 }
 
 
-class ClearInterruptFromAnotherThread
-    : public RequestInterruptTestBase {
+class RequestMultipleInterrupts : public RequestInterruptTestBase {
  public:
-  ClearInterruptFromAnotherThread() : i_thread(this), sem2_(0) { }
+  RequestMultipleInterrupts() : i_thread(this), counter_(0) {}
 
   virtual void StartInterruptThread() {
     i_thread.Start();
@@ -23063,39 +23406,33 @@ class ClearInterruptFromAnotherThread
  private:
   class InterruptThread : public v8::base::Thread {
    public:
-    explicit InterruptThread(ClearInterruptFromAnotherThread* test)
+    enum { NUM_INTERRUPTS = 10 };
+    explicit InterruptThread(RequestMultipleInterrupts* test)
         : Thread(Options("RequestInterruptTest")), test_(test) {}
 
     virtual void Run() {
       test_->sem_.Wait();
-      test_->isolate_->RequestInterrupt(&OnInterrupt, test_);
-      test_->sem_.Wait();
-      test_->isolate_->ClearInterrupt();
-      test_->sem2_.Signal();
+      for (int i = 0; i < NUM_INTERRUPTS; i++) {
+        test_->isolate_->RequestInterrupt(&OnInterrupt, test_);
+      }
     }
 
     static void OnInterrupt(v8::Isolate* isolate, void* data) {
-      ClearInterruptFromAnotherThread* test =
-          reinterpret_cast<ClearInterruptFromAnotherThread*>(data);
-      test->sem_.Signal();
-      bool success = test->sem2_.WaitFor(v8::base::TimeDelta::FromSeconds(2));
-      // Crash instead of timeout to make this failure more prominent.
-      CHECK(success);
-      test->should_continue_ = false;
+      RequestMultipleInterrupts* test =
+          reinterpret_cast<RequestMultipleInterrupts*>(data);
+      test->should_continue_ = ++test->counter_ < NUM_INTERRUPTS;
     }
 
    private:
-     ClearInterruptFromAnotherThread* test_;
+    RequestMultipleInterrupts* test_;
   };
 
   InterruptThread i_thread;
-  v8::base::Semaphore sem2_;
+  int counter_;
 };
 
 
-TEST(ClearInterruptFromAnotherThread) {
-  ClearInterruptFromAnotherThread().RunTest();
-}
+TEST(RequestMultipleInterrupts) { RequestMultipleInterrupts().RunTest(); }
 
 
 static Local<Value> function_new_expected_env;
@@ -24127,6 +24464,7 @@ TEST(StreamingProducesParserCache) {
   const v8::ScriptCompiler::CachedData* cached_data = source.GetCachedData();
   CHECK(cached_data != NULL);
   CHECK(cached_data->data != NULL);
+  CHECK(!cached_data->rejected);
   CHECK_GT(cached_data->length, 0);
 }
 
@@ -24191,3 +24529,201 @@ TEST(StreamingUtf8ScriptWithMultipleMultibyteCharactersSomeSplit2) {
   const char* chunks[] = {chunk1, chunk2, "foo();", NULL};
   RunStreamingTest(chunks, v8::ScriptCompiler::StreamedSource::UTF8);
 }
+
+
+void TestInvalidCacheData(v8::ScriptCompiler::CompileOptions option) {
+  const char* garbage = "garbage garbage garbage garbage.";
+  const uint8_t* data = reinterpret_cast<const uint8_t*>(garbage);
+  int length = 16;
+  v8::ScriptCompiler::CachedData* cached_data =
+      new v8::ScriptCompiler::CachedData(data, length);
+  DCHECK(!cached_data->rejected);
+  v8::ScriptOrigin origin(v8_str("origin"));
+  v8::ScriptCompiler::Source source(v8_str("42"), origin, cached_data);
+  v8::Handle<v8::Script> script =
+      v8::ScriptCompiler::Compile(CcTest::isolate(), &source, option);
+  CHECK(cached_data->rejected);
+  CHECK_EQ(42, script->Run()->Int32Value());
+}
+
+
+TEST(InvalidCacheData) {
+  v8::V8::Initialize();
+  v8::HandleScope scope(CcTest::isolate());
+  LocalContext context;
+  TestInvalidCacheData(v8::ScriptCompiler::kConsumeParserCache);
+  TestInvalidCacheData(v8::ScriptCompiler::kConsumeCodeCache);
+}
+
+
+TEST(ParserCacheRejectedGracefully) {
+  i::FLAG_min_preparse_length = 0;
+  v8::V8::Initialize();
+  v8::HandleScope scope(CcTest::isolate());
+  LocalContext context;
+  // Produce valid cached data.
+  v8::ScriptOrigin origin(v8_str("origin"));
+  v8::Local<v8::String> source_str = v8_str("function foo() {}");
+  v8::ScriptCompiler::Source source(source_str, origin);
+  v8::Handle<v8::Script> script = v8::ScriptCompiler::Compile(
+      CcTest::isolate(), &source, v8::ScriptCompiler::kProduceParserCache);
+  CHECK(!script.IsEmpty());
+  const v8::ScriptCompiler::CachedData* original_cached_data =
+      source.GetCachedData();
+  CHECK(original_cached_data != NULL);
+  CHECK(original_cached_data->data != NULL);
+  CHECK(!original_cached_data->rejected);
+  CHECK_GT(original_cached_data->length, 0);
+  // Recompiling the same script with it won't reject the data.
+  {
+    v8::ScriptCompiler::Source source_with_cached_data(
+        source_str, origin,
+        new v8::ScriptCompiler::CachedData(original_cached_data->data,
+                                           original_cached_data->length));
+    v8::Handle<v8::Script> script =
+        v8::ScriptCompiler::Compile(CcTest::isolate(), &source_with_cached_data,
+                                    v8::ScriptCompiler::kConsumeParserCache);
+    CHECK(!script.IsEmpty());
+    const v8::ScriptCompiler::CachedData* new_cached_data =
+        source_with_cached_data.GetCachedData();
+    CHECK(new_cached_data != NULL);
+    CHECK(!new_cached_data->rejected);
+  }
+  // Compile an incompatible script with the cached data. The new script doesn't
+  // have the same starting position for the function as the old one, so the old
+  // cached data will be incompatible with it and will be rejected.
+  {
+    v8::Local<v8::String> incompatible_source_str =
+        v8_str("   function foo() {}");
+    v8::ScriptCompiler::Source source_with_cached_data(
+        incompatible_source_str, origin,
+        new v8::ScriptCompiler::CachedData(original_cached_data->data,
+                                           original_cached_data->length));
+    v8::Handle<v8::Script> script =
+        v8::ScriptCompiler::Compile(CcTest::isolate(), &source_with_cached_data,
+                                    v8::ScriptCompiler::kConsumeParserCache);
+    CHECK(!script.IsEmpty());
+    const v8::ScriptCompiler::CachedData* new_cached_data =
+        source_with_cached_data.GetCachedData();
+    CHECK(new_cached_data != NULL);
+    CHECK(new_cached_data->rejected);
+  }
+}
+
+
+TEST(StringConcatOverflow) {
+  v8::V8::Initialize();
+  v8::HandleScope scope(CcTest::isolate());
+  RandomLengthOneByteResource* r =
+      new RandomLengthOneByteResource(i::String::kMaxLength);
+  v8::Local<v8::String> str = v8::String::NewExternal(CcTest::isolate(), r);
+  CHECK(!str.IsEmpty());
+  v8::TryCatch try_catch;
+  v8::Local<v8::String> result = v8::String::Concat(str, str);
+  CHECK(result.IsEmpty());
+  CHECK(!try_catch.HasCaught());
+}
+
+
+TEST(TurboAsmDisablesNeuter) {
+  v8::V8::Initialize();
+  v8::HandleScope scope(CcTest::isolate());
+  LocalContext context;
+#if V8_TURBOFAN_TARGET
+  bool should_be_neuterable = !i::FLAG_turbo_asm;
+#else
+  bool should_be_neuterable = true;
+#endif
+  const char* load =
+      "function Module(stdlib, foreign, heap) {"
+      "  'use asm';"
+      "  var MEM32 = new stdlib.Int32Array(heap);"
+      "  function load() { return MEM32[0]; }"
+      "  return { load: load };"
+      "}"
+      "var buffer = new ArrayBuffer(4);"
+      "Module(this, {}, buffer).load();"
+      "buffer";
+
+  v8::Local<v8::ArrayBuffer> result = CompileRun(load).As<v8::ArrayBuffer>();
+  CHECK_EQ(should_be_neuterable, result->IsNeuterable());
+
+  const char* store =
+      "function Module(stdlib, foreign, heap) {"
+      "  'use asm';"
+      "  var MEM32 = new stdlib.Int32Array(heap);"
+      "  function store() { MEM32[0] = 0; }"
+      "  return { store: store };"
+      "}"
+      "var buffer = new ArrayBuffer(4);"
+      "Module(this, {}, buffer).store();"
+      "buffer";
+
+  result = CompileRun(store).As<v8::ArrayBuffer>();
+  CHECK_EQ(should_be_neuterable, result->IsNeuterable());
+}
+
+
+TEST(GetPrototypeAccessControl) {
+  i::FLAG_allow_natives_syntax = true;
+  v8::Isolate* isolate = CcTest::isolate();
+  v8::HandleScope handle_scope(isolate);
+  LocalContext env;
+
+  v8::Handle<v8::ObjectTemplate> obj_template =
+      v8::ObjectTemplate::New(isolate);
+  obj_template->SetAccessCheckCallbacks(BlockEverythingNamed,
+                                        BlockEverythingIndexed);
+
+  env->Global()->Set(v8_str("prohibited"), obj_template->NewInstance());
+
+  {
+    v8::TryCatch try_catch;
+    CompileRun(
+        "function f() { %_GetPrototype(prohibited); }"
+        "%OptimizeFunctionOnNextCall(f);"
+        "f();");
+    CHECK(try_catch.HasCaught());
+  }
+}
+
+
+TEST(GetPrototypeHidden) {
+  i::FLAG_allow_natives_syntax = true;
+  v8::Isolate* isolate = CcTest::isolate();
+  v8::HandleScope handle_scope(isolate);
+  LocalContext env;
+
+  Handle<FunctionTemplate> t = FunctionTemplate::New(isolate);
+  t->SetHiddenPrototype(true);
+  Handle<Object> proto = t->GetFunction()->NewInstance();
+  Handle<Object> object = Object::New(isolate);
+  Handle<Object> proto2 = Object::New(isolate);
+  object->SetPrototype(proto);
+  proto->SetPrototype(proto2);
+
+  env->Global()->Set(v8_str("object"), object);
+  env->Global()->Set(v8_str("proto"), proto);
+  env->Global()->Set(v8_str("proto2"), proto2);
+
+  v8::Handle<v8::Value> result = CompileRun("%_GetPrototype(object)");
+  CHECK(result->Equals(proto2));
+
+  result = CompileRun(
+      "function f() { return %_GetPrototype(object); }"
+      "%OptimizeFunctionOnNextCall(f);"
+      "f()");
+  CHECK(result->Equals(proto2));
+}
+
+
+TEST(ClassPrototypeCreationContext) {
+  i::FLAG_harmony_classes = true;
+  v8::Isolate* isolate = CcTest::isolate();
+  v8::HandleScope handle_scope(isolate);
+  LocalContext env;
+
+  Handle<Object> result = Handle<Object>::Cast(
+      CompileRun("'use strict'; class Example { }; Example.prototype"));
+  CHECK(env.local() == result->CreationContext());
+}
index b6e260e..2bcf022 100644 (file)
@@ -1375,14 +1375,14 @@ TEST(16) {
   __ pkhtb(r2, r0, Operand(r1, ASR, 8));
   __ str(r2, MemOperand(r4, OFFSET_OF(T, dst1)));
 
-  __ uxtb16(r2, Operand(r0, ROR, 8));
+  __ uxtb16(r2, r0, 8);
   __ str(r2, MemOperand(r4, OFFSET_OF(T, dst2)));
 
-  __ uxtb(r2, Operand(r0, ROR, 8));
+  __ uxtb(r2, r0, 8);
   __ str(r2, MemOperand(r4, OFFSET_OF(T, dst3)));
 
   __ ldr(r0, MemOperand(r4, OFFSET_OF(T, src2)));
-  __ uxtab(r2, r0, Operand(r1, ROR, 8));
+  __ uxtab(r2, r0, r1, 8);
   __ str(r2, MemOperand(r4, OFFSET_OF(T, dst4)));
 
   __ ldm(ia_w, sp, r4.bit() | pc.bit());
@@ -1606,6 +1606,214 @@ TEST(smmul) {
 }
 
 
+TEST(sxtb) {
+  CcTest::InitializeVM();
+  Isolate* const isolate = CcTest::i_isolate();
+  HandleScope scope(isolate);
+  RandomNumberGenerator* const rng = isolate->random_number_generator();
+  Assembler assm(isolate, nullptr, 0);
+  __ sxtb(r1, r1);
+  __ str(r1, MemOperand(r0));
+  __ bx(lr);
+  CodeDesc desc;
+  assm.GetCode(&desc);
+  Handle<Code> code = isolate->factory()->NewCode(
+      desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef OBJECT_PRINT
+  code->Print(std::cout);
+#endif
+  F3 f = FUNCTION_CAST<F3>(code->entry());
+  for (size_t i = 0; i < 128; ++i) {
+    int32_t r, x = rng->NextInt();
+    Object* dummy = CALL_GENERATED_CODE(f, &r, x, 0, 0, 0);
+    CHECK_EQ(static_cast<int32_t>(static_cast<int8_t>(x)), r);
+    USE(dummy);
+  }
+}
+
+
+TEST(sxtab) {
+  CcTest::InitializeVM();
+  Isolate* const isolate = CcTest::i_isolate();
+  HandleScope scope(isolate);
+  RandomNumberGenerator* const rng = isolate->random_number_generator();
+  Assembler assm(isolate, nullptr, 0);
+  __ sxtab(r1, r2, r1);
+  __ str(r1, MemOperand(r0));
+  __ bx(lr);
+  CodeDesc desc;
+  assm.GetCode(&desc);
+  Handle<Code> code = isolate->factory()->NewCode(
+      desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef OBJECT_PRINT
+  code->Print(std::cout);
+#endif
+  F3 f = FUNCTION_CAST<F3>(code->entry());
+  for (size_t i = 0; i < 128; ++i) {
+    int32_t r, x = rng->NextInt(), y = rng->NextInt();
+    Object* dummy = CALL_GENERATED_CODE(f, &r, x, y, 0, 0);
+    CHECK_EQ(static_cast<int32_t>(static_cast<int8_t>(x)) + y, r);
+    USE(dummy);
+  }
+}
+
+
+TEST(sxth) {
+  CcTest::InitializeVM();
+  Isolate* const isolate = CcTest::i_isolate();
+  HandleScope scope(isolate);
+  RandomNumberGenerator* const rng = isolate->random_number_generator();
+  Assembler assm(isolate, nullptr, 0);
+  __ sxth(r1, r1);
+  __ str(r1, MemOperand(r0));
+  __ bx(lr);
+  CodeDesc desc;
+  assm.GetCode(&desc);
+  Handle<Code> code = isolate->factory()->NewCode(
+      desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef OBJECT_PRINT
+  code->Print(std::cout);
+#endif
+  F3 f = FUNCTION_CAST<F3>(code->entry());
+  for (size_t i = 0; i < 128; ++i) {
+    int32_t r, x = rng->NextInt();
+    Object* dummy = CALL_GENERATED_CODE(f, &r, x, 0, 0, 0);
+    CHECK_EQ(static_cast<int32_t>(static_cast<int16_t>(x)), r);
+    USE(dummy);
+  }
+}
+
+
+TEST(sxtah) {
+  CcTest::InitializeVM();
+  Isolate* const isolate = CcTest::i_isolate();
+  HandleScope scope(isolate);
+  RandomNumberGenerator* const rng = isolate->random_number_generator();
+  Assembler assm(isolate, nullptr, 0);
+  __ sxtah(r1, r2, r1);
+  __ str(r1, MemOperand(r0));
+  __ bx(lr);
+  CodeDesc desc;
+  assm.GetCode(&desc);
+  Handle<Code> code = isolate->factory()->NewCode(
+      desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef OBJECT_PRINT
+  code->Print(std::cout);
+#endif
+  F3 f = FUNCTION_CAST<F3>(code->entry());
+  for (size_t i = 0; i < 128; ++i) {
+    int32_t r, x = rng->NextInt(), y = rng->NextInt();
+    Object* dummy = CALL_GENERATED_CODE(f, &r, x, y, 0, 0);
+    CHECK_EQ(static_cast<int32_t>(static_cast<int16_t>(x)) + y, r);
+    USE(dummy);
+  }
+}
+
+
+TEST(uxtb) {
+  CcTest::InitializeVM();
+  Isolate* const isolate = CcTest::i_isolate();
+  HandleScope scope(isolate);
+  RandomNumberGenerator* const rng = isolate->random_number_generator();
+  Assembler assm(isolate, nullptr, 0);
+  __ uxtb(r1, r1);
+  __ str(r1, MemOperand(r0));
+  __ bx(lr);
+  CodeDesc desc;
+  assm.GetCode(&desc);
+  Handle<Code> code = isolate->factory()->NewCode(
+      desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef OBJECT_PRINT
+  code->Print(std::cout);
+#endif
+  F3 f = FUNCTION_CAST<F3>(code->entry());
+  for (size_t i = 0; i < 128; ++i) {
+    int32_t r, x = rng->NextInt();
+    Object* dummy = CALL_GENERATED_CODE(f, &r, x, 0, 0, 0);
+    CHECK_EQ(static_cast<int32_t>(static_cast<uint8_t>(x)), r);
+    USE(dummy);
+  }
+}
+
+
+TEST(uxtab) {
+  CcTest::InitializeVM();
+  Isolate* const isolate = CcTest::i_isolate();
+  HandleScope scope(isolate);
+  RandomNumberGenerator* const rng = isolate->random_number_generator();
+  Assembler assm(isolate, nullptr, 0);
+  __ uxtab(r1, r2, r1);
+  __ str(r1, MemOperand(r0));
+  __ bx(lr);
+  CodeDesc desc;
+  assm.GetCode(&desc);
+  Handle<Code> code = isolate->factory()->NewCode(
+      desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef OBJECT_PRINT
+  code->Print(std::cout);
+#endif
+  F3 f = FUNCTION_CAST<F3>(code->entry());
+  for (size_t i = 0; i < 128; ++i) {
+    int32_t r, x = rng->NextInt(), y = rng->NextInt();
+    Object* dummy = CALL_GENERATED_CODE(f, &r, x, y, 0, 0);
+    CHECK_EQ(static_cast<int32_t>(static_cast<uint8_t>(x)) + y, r);
+    USE(dummy);
+  }
+}
+
+
+TEST(uxth) {
+  CcTest::InitializeVM();
+  Isolate* const isolate = CcTest::i_isolate();
+  HandleScope scope(isolate);
+  RandomNumberGenerator* const rng = isolate->random_number_generator();
+  Assembler assm(isolate, nullptr, 0);
+  __ uxth(r1, r1);
+  __ str(r1, MemOperand(r0));
+  __ bx(lr);
+  CodeDesc desc;
+  assm.GetCode(&desc);
+  Handle<Code> code = isolate->factory()->NewCode(
+      desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef OBJECT_PRINT
+  code->Print(std::cout);
+#endif
+  F3 f = FUNCTION_CAST<F3>(code->entry());
+  for (size_t i = 0; i < 128; ++i) {
+    int32_t r, x = rng->NextInt();
+    Object* dummy = CALL_GENERATED_CODE(f, &r, x, 0, 0, 0);
+    CHECK_EQ(static_cast<int32_t>(static_cast<uint16_t>(x)), r);
+    USE(dummy);
+  }
+}
+
+
+TEST(uxtah) {
+  CcTest::InitializeVM();
+  Isolate* const isolate = CcTest::i_isolate();
+  HandleScope scope(isolate);
+  RandomNumberGenerator* const rng = isolate->random_number_generator();
+  Assembler assm(isolate, nullptr, 0);
+  __ uxtah(r1, r2, r1);
+  __ str(r1, MemOperand(r0));
+  __ bx(lr);
+  CodeDesc desc;
+  assm.GetCode(&desc);
+  Handle<Code> code = isolate->factory()->NewCode(
+      desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef OBJECT_PRINT
+  code->Print(std::cout);
+#endif
+  F3 f = FUNCTION_CAST<F3>(code->entry());
+  for (size_t i = 0; i < 128; ++i) {
+    int32_t r, x = rng->NextInt(), y = rng->NextInt();
+    Object* dummy = CALL_GENERATED_CODE(f, &r, x, y, 0, 0);
+    CHECK_EQ(static_cast<int32_t>(static_cast<uint16_t>(x)) + y, r);
+    USE(dummy);
+  }
+}
+
+
 TEST(code_relative_offset) {
   // Test extracting the offset of a label from the beginning of the code
   // in a register.
index d943297..f59c3c4 100644 (file)
@@ -589,4 +589,458 @@ TEST(AssemblerIa32SSE) {
 }
 
 
+typedef int (*F9)(double x, double y, double z);
+TEST(AssemblerX64FMA_sd) {
+  CcTest::InitializeVM();
+  if (!CpuFeatures::IsSupported(FMA3)) return;
+
+  Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
+  HandleScope scope(isolate);
+  v8::internal::byte buffer[1024];
+  MacroAssembler assm(isolate, buffer, sizeof buffer);
+  {
+    CpuFeatureScope fscope(&assm, FMA3);
+    Label exit;
+    __ movsd(xmm0, Operand(esp, 1 * kPointerSize));
+    __ movsd(xmm1, Operand(esp, 3 * kPointerSize));
+    __ movsd(xmm2, Operand(esp, 5 * kPointerSize));
+    // argument in xmm0, xmm1 and xmm2
+    // xmm0 * xmm1 + xmm2
+    __ movaps(xmm3, xmm0);
+    __ mulsd(xmm3, xmm1);
+    __ addsd(xmm3, xmm2);  // Expected result in xmm3
+
+    __ sub(esp, Immediate(kDoubleSize));  // For memory operand
+    // vfmadd132sd
+    __ mov(eax, Immediate(1));  // Test number
+    __ movaps(xmm4, xmm0);
+    __ vfmadd132sd(xmm4, xmm2, xmm1);
+    __ ucomisd(xmm4, xmm3);
+    __ j(not_equal, &exit);
+    // vfmadd213sd
+    __ inc(eax);
+    __ movaps(xmm4, xmm1);
+    __ vfmadd213sd(xmm4, xmm0, xmm2);
+    __ ucomisd(xmm4, xmm3);
+    __ j(not_equal, &exit);
+    // vfmadd231sd
+    __ inc(eax);
+    __ movaps(xmm4, xmm2);
+    __ vfmadd231sd(xmm4, xmm0, xmm1);
+    __ ucomisd(xmm4, xmm3);
+    __ j(not_equal, &exit);
+
+    // vfmadd132sd
+    __ inc(eax);
+    __ movaps(xmm4, xmm0);
+    __ movsd(Operand(esp, 0), xmm1);
+    __ vfmadd132sd(xmm4, xmm2, Operand(esp, 0));
+    __ ucomisd(xmm4, xmm3);
+    __ j(not_equal, &exit);
+    // vfmadd213sd
+    __ inc(eax);
+    __ movaps(xmm4, xmm1);
+    __ movsd(Operand(esp, 0), xmm2);
+    __ vfmadd213sd(xmm4, xmm0, Operand(esp, 0));
+    __ ucomisd(xmm4, xmm3);
+    __ j(not_equal, &exit);
+    // vfmadd231sd
+    __ inc(eax);
+    __ movaps(xmm4, xmm2);
+    __ movsd(Operand(esp, 0), xmm1);
+    __ vfmadd231sd(xmm4, xmm0, Operand(esp, 0));
+    __ ucomisd(xmm4, xmm3);
+    __ j(not_equal, &exit);
+
+    // xmm0 * xmm1 - xmm2
+    __ movaps(xmm3, xmm0);
+    __ mulsd(xmm3, xmm1);
+    __ subsd(xmm3, xmm2);  // Expected result in xmm3
+
+    // vfmsub132sd
+    __ inc(eax);
+    __ movaps(xmm4, xmm0);
+    __ vfmsub132sd(xmm4, xmm2, xmm1);
+    __ ucomisd(xmm4, xmm3);
+    __ j(not_equal, &exit);
+    // vfmadd213sd
+    __ inc(eax);
+    __ movaps(xmm4, xmm1);
+    __ vfmsub213sd(xmm4, xmm0, xmm2);
+    __ ucomisd(xmm4, xmm3);
+    __ j(not_equal, &exit);
+    // vfmsub231sd
+    __ inc(eax);
+    __ movaps(xmm4, xmm2);
+    __ vfmsub231sd(xmm4, xmm0, xmm1);
+    __ ucomisd(xmm4, xmm3);
+    __ j(not_equal, &exit);
+
+    // vfmsub132sd
+    __ inc(eax);
+    __ movaps(xmm4, xmm0);
+    __ movsd(Operand(esp, 0), xmm1);
+    __ vfmsub132sd(xmm4, xmm2, Operand(esp, 0));
+    __ ucomisd(xmm4, xmm3);
+    __ j(not_equal, &exit);
+    // vfmsub213sd
+    __ inc(eax);
+    __ movaps(xmm4, xmm1);
+    __ movsd(Operand(esp, 0), xmm2);
+    __ vfmsub213sd(xmm4, xmm0, Operand(esp, 0));
+    __ ucomisd(xmm4, xmm3);
+    __ j(not_equal, &exit);
+    // vfmsub231sd
+    __ inc(eax);
+    __ movaps(xmm4, xmm2);
+    __ movsd(Operand(esp, 0), xmm1);
+    __ vfmsub231sd(xmm4, xmm0, Operand(esp, 0));
+    __ ucomisd(xmm4, xmm3);
+    __ j(not_equal, &exit);
+
+
+    // - xmm0 * xmm1 + xmm2
+    __ movaps(xmm3, xmm0);
+    __ mulsd(xmm3, xmm1);
+    __ Move(xmm4, (uint64_t)1 << 63);
+    __ xorpd(xmm3, xmm4);
+    __ addsd(xmm3, xmm2);  // Expected result in xmm3
+
+    // vfnmadd132sd
+    __ inc(eax);
+    __ movaps(xmm4, xmm0);
+    __ vfnmadd132sd(xmm4, xmm2, xmm1);
+    __ ucomisd(xmm4, xmm3);
+    __ j(not_equal, &exit);
+    // vfmadd213sd
+    __ inc(eax);
+    __ movaps(xmm4, xmm1);
+    __ vfnmadd213sd(xmm4, xmm0, xmm2);
+    __ ucomisd(xmm4, xmm3);
+    __ j(not_equal, &exit);
+    // vfnmadd231sd
+    __ inc(eax);
+    __ movaps(xmm4, xmm2);
+    __ vfnmadd231sd(xmm4, xmm0, xmm1);
+    __ ucomisd(xmm4, xmm3);
+    __ j(not_equal, &exit);
+
+    // vfnmadd132sd
+    __ inc(eax);
+    __ movaps(xmm4, xmm0);
+    __ movsd(Operand(esp, 0), xmm1);
+    __ vfnmadd132sd(xmm4, xmm2, Operand(esp, 0));
+    __ ucomisd(xmm4, xmm3);
+    __ j(not_equal, &exit);
+    // vfnmadd213sd
+    __ inc(eax);
+    __ movaps(xmm4, xmm1);
+    __ movsd(Operand(esp, 0), xmm2);
+    __ vfnmadd213sd(xmm4, xmm0, Operand(esp, 0));
+    __ ucomisd(xmm4, xmm3);
+    __ j(not_equal, &exit);
+    // vfnmadd231sd
+    __ inc(eax);
+    __ movaps(xmm4, xmm2);
+    __ movsd(Operand(esp, 0), xmm1);
+    __ vfnmadd231sd(xmm4, xmm0, Operand(esp, 0));
+    __ ucomisd(xmm4, xmm3);
+    __ j(not_equal, &exit);
+
+
+    // - xmm0 * xmm1 - xmm2
+    __ movaps(xmm3, xmm0);
+    __ mulsd(xmm3, xmm1);
+    __ Move(xmm4, (uint64_t)1 << 63);
+    __ xorpd(xmm3, xmm4);
+    __ subsd(xmm3, xmm2);  // Expected result in xmm3
+
+    // vfnmsub132sd
+    __ inc(eax);
+    __ movaps(xmm4, xmm0);
+    __ vfnmsub132sd(xmm4, xmm2, xmm1);
+    __ ucomisd(xmm4, xmm3);
+    __ j(not_equal, &exit);
+    // vfmsub213sd
+    __ inc(eax);
+    __ movaps(xmm4, xmm1);
+    __ vfnmsub213sd(xmm4, xmm0, xmm2);
+    __ ucomisd(xmm4, xmm3);
+    __ j(not_equal, &exit);
+    // vfnmsub231sd
+    __ inc(eax);
+    __ movaps(xmm4, xmm2);
+    __ vfnmsub231sd(xmm4, xmm0, xmm1);
+    __ ucomisd(xmm4, xmm3);
+    __ j(not_equal, &exit);
+
+    // vfnmsub132sd
+    __ inc(eax);
+    __ movaps(xmm4, xmm0);
+    __ movsd(Operand(esp, 0), xmm1);
+    __ vfnmsub132sd(xmm4, xmm2, Operand(esp, 0));
+    __ ucomisd(xmm4, xmm3);
+    __ j(not_equal, &exit);
+    // vfnmsub213sd
+    __ inc(eax);
+    __ movaps(xmm4, xmm1);
+    __ movsd(Operand(esp, 0), xmm2);
+    __ vfnmsub213sd(xmm4, xmm0, Operand(esp, 0));
+    __ ucomisd(xmm4, xmm3);
+    __ j(not_equal, &exit);
+    // vfnmsub231sd
+    __ inc(eax);
+    __ movaps(xmm4, xmm2);
+    __ movsd(Operand(esp, 0), xmm1);
+    __ vfnmsub231sd(xmm4, xmm0, Operand(esp, 0));
+    __ ucomisd(xmm4, xmm3);
+    __ j(not_equal, &exit);
+
+
+    __ xor_(eax, eax);
+    __ bind(&exit);
+    __ add(esp, Immediate(kDoubleSize));
+    __ ret(0);
+  }
+
+  CodeDesc desc;
+  assm.GetCode(&desc);
+  Handle<Code> code = isolate->factory()->NewCode(
+      desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef OBJECT_PRINT
+  OFStream os(stdout);
+  code->Print(os);
+#endif
+
+  F9 f = FUNCTION_CAST<F9>(code->entry());
+  CHECK_EQ(0, f(0.000092662107262076, -2.460774966188315, -1.0958787393627414));
+}
+
+
+typedef int (*F10)(float x, float y, float z);
+TEST(AssemblerX64FMA_ss) {
+  CcTest::InitializeVM();
+  if (!CpuFeatures::IsSupported(FMA3)) return;
+
+  Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
+  HandleScope scope(isolate);
+  v8::internal::byte buffer[1024];
+  MacroAssembler assm(isolate, buffer, sizeof buffer);
+  {
+    CpuFeatureScope fscope(&assm, FMA3);
+    Label exit;
+    __ movss(xmm0, Operand(esp, 1 * kPointerSize));
+    __ movss(xmm1, Operand(esp, 2 * kPointerSize));
+    __ movss(xmm2, Operand(esp, 3 * kPointerSize));
+    // arguments in xmm0, xmm1 and xmm2
+    // xmm0 * xmm1 + xmm2
+    __ movaps(xmm3, xmm0);
+    __ mulss(xmm3, xmm1);
+    __ addss(xmm3, xmm2);  // Expected result in xmm3
+
+    __ sub(esp, Immediate(kDoubleSize));  // For memory operand
+    // vfmadd132ss
+    __ mov(eax, Immediate(1));  // Test number
+    __ movaps(xmm4, xmm0);
+    __ vfmadd132ss(xmm4, xmm2, xmm1);
+    __ ucomiss(xmm4, xmm3);
+    __ j(not_equal, &exit);
+    // vfmadd213ss
+    __ inc(eax);
+    __ movaps(xmm4, xmm1);
+    __ vfmadd213ss(xmm4, xmm0, xmm2);
+    __ ucomiss(xmm4, xmm3);
+    __ j(not_equal, &exit);
+    // vfmadd231ss
+    __ inc(eax);
+    __ movaps(xmm4, xmm2);
+    __ vfmadd231ss(xmm4, xmm0, xmm1);
+    __ ucomiss(xmm4, xmm3);
+    __ j(not_equal, &exit);
+
+    // vfmadd132ss
+    __ inc(eax);
+    __ movaps(xmm4, xmm0);
+    __ movss(Operand(esp, 0), xmm1);
+    __ vfmadd132ss(xmm4, xmm2, Operand(esp, 0));
+    __ ucomiss(xmm4, xmm3);
+    __ j(not_equal, &exit);
+    // vfmadd213ss
+    __ inc(eax);
+    __ movaps(xmm4, xmm1);
+    __ movss(Operand(esp, 0), xmm2);
+    __ vfmadd213ss(xmm4, xmm0, Operand(esp, 0));
+    __ ucomiss(xmm4, xmm3);
+    __ j(not_equal, &exit);
+    // vfmadd231ss
+    __ inc(eax);
+    __ movaps(xmm4, xmm2);
+    __ movss(Operand(esp, 0), xmm1);
+    __ vfmadd231ss(xmm4, xmm0, Operand(esp, 0));
+    __ ucomiss(xmm4, xmm3);
+    __ j(not_equal, &exit);
+
+    // xmm0 * xmm1 - xmm2
+    __ movaps(xmm3, xmm0);
+    __ mulss(xmm3, xmm1);
+    __ subss(xmm3, xmm2);  // Expected result in xmm3
+
+    // vfmsub132ss
+    __ inc(eax);
+    __ movaps(xmm4, xmm0);
+    __ vfmsub132ss(xmm4, xmm2, xmm1);
+    __ ucomiss(xmm4, xmm3);
+    __ j(not_equal, &exit);
+    // vfmadd213ss
+    __ inc(eax);
+    __ movaps(xmm4, xmm1);
+    __ vfmsub213ss(xmm4, xmm0, xmm2);
+    __ ucomiss(xmm4, xmm3);
+    __ j(not_equal, &exit);
+    // vfmsub231ss
+    __ inc(eax);
+    __ movaps(xmm4, xmm2);
+    __ vfmsub231ss(xmm4, xmm0, xmm1);
+    __ ucomiss(xmm4, xmm3);
+    __ j(not_equal, &exit);
+
+    // vfmsub132ss
+    __ inc(eax);
+    __ movaps(xmm4, xmm0);
+    __ movss(Operand(esp, 0), xmm1);
+    __ vfmsub132ss(xmm4, xmm2, Operand(esp, 0));
+    __ ucomiss(xmm4, xmm3);
+    __ j(not_equal, &exit);
+    // vfmsub213ss
+    __ inc(eax);
+    __ movaps(xmm4, xmm1);
+    __ movss(Operand(esp, 0), xmm2);
+    __ vfmsub213ss(xmm4, xmm0, Operand(esp, 0));
+    __ ucomiss(xmm4, xmm3);
+    __ j(not_equal, &exit);
+    // vfmsub231ss
+    __ inc(eax);
+    __ movaps(xmm4, xmm2);
+    __ movss(Operand(esp, 0), xmm1);
+    __ vfmsub231ss(xmm4, xmm0, Operand(esp, 0));
+    __ ucomiss(xmm4, xmm3);
+    __ j(not_equal, &exit);
+
+
+    // - xmm0 * xmm1 + xmm2
+    __ movaps(xmm3, xmm0);
+    __ mulss(xmm3, xmm1);
+    __ Move(xmm4, (uint32_t)1 << 31);
+    __ xorps(xmm3, xmm4);
+    __ addss(xmm3, xmm2);  // Expected result in xmm3
+
+    // vfnmadd132ss
+    __ inc(eax);
+    __ movaps(xmm4, xmm0);
+    __ vfnmadd132ss(xmm4, xmm2, xmm1);
+    __ ucomiss(xmm4, xmm3);
+    __ j(not_equal, &exit);
+    // vfmadd213ss
+    __ inc(eax);
+    __ movaps(xmm4, xmm1);
+    __ vfnmadd213ss(xmm4, xmm0, xmm2);
+    __ ucomiss(xmm4, xmm3);
+    __ j(not_equal, &exit);
+    // vfnmadd231ss
+    __ inc(eax);
+    __ movaps(xmm4, xmm2);
+    __ vfnmadd231ss(xmm4, xmm0, xmm1);
+    __ ucomiss(xmm4, xmm3);
+    __ j(not_equal, &exit);
+
+    // vfnmadd132ss
+    __ inc(eax);
+    __ movaps(xmm4, xmm0);
+    __ movss(Operand(esp, 0), xmm1);
+    __ vfnmadd132ss(xmm4, xmm2, Operand(esp, 0));
+    __ ucomiss(xmm4, xmm3);
+    __ j(not_equal, &exit);
+    // vfnmadd213ss
+    __ inc(eax);
+    __ movaps(xmm4, xmm1);
+    __ movss(Operand(esp, 0), xmm2);
+    __ vfnmadd213ss(xmm4, xmm0, Operand(esp, 0));
+    __ ucomiss(xmm4, xmm3);
+    __ j(not_equal, &exit);
+    // vfnmadd231ss
+    __ inc(eax);
+    __ movaps(xmm4, xmm2);
+    __ movss(Operand(esp, 0), xmm1);
+    __ vfnmadd231ss(xmm4, xmm0, Operand(esp, 0));
+    __ ucomiss(xmm4, xmm3);
+    __ j(not_equal, &exit);
+
+
+    // - xmm0 * xmm1 - xmm2
+    __ movaps(xmm3, xmm0);
+    __ mulss(xmm3, xmm1);
+    __ Move(xmm4, (uint32_t)1 << 31);
+    __ xorps(xmm3, xmm4);
+    __ subss(xmm3, xmm2);  // Expected result in xmm3
+
+    // vfnmsub132ss
+    __ inc(eax);
+    __ movaps(xmm4, xmm0);
+    __ vfnmsub132ss(xmm4, xmm2, xmm1);
+    __ ucomiss(xmm4, xmm3);
+    __ j(not_equal, &exit);
+    // vfmsub213ss
+    __ inc(eax);
+    __ movaps(xmm4, xmm1);
+    __ vfnmsub213ss(xmm4, xmm0, xmm2);
+    __ ucomiss(xmm4, xmm3);
+    __ j(not_equal, &exit);
+    // vfnmsub231ss
+    __ inc(eax);
+    __ movaps(xmm4, xmm2);
+    __ vfnmsub231ss(xmm4, xmm0, xmm1);
+    __ ucomiss(xmm4, xmm3);
+    __ j(not_equal, &exit);
+
+    // vfnmsub132ss
+    __ inc(eax);
+    __ movaps(xmm4, xmm0);
+    __ movss(Operand(esp, 0), xmm1);
+    __ vfnmsub132ss(xmm4, xmm2, Operand(esp, 0));
+    __ ucomiss(xmm4, xmm3);
+    __ j(not_equal, &exit);
+    // vfnmsub213ss
+    __ inc(eax);
+    __ movaps(xmm4, xmm1);
+    __ movss(Operand(esp, 0), xmm2);
+    __ vfnmsub213ss(xmm4, xmm0, Operand(esp, 0));
+    __ ucomiss(xmm4, xmm3);
+    __ j(not_equal, &exit);
+    // vfnmsub231ss
+    __ inc(eax);
+    __ movaps(xmm4, xmm2);
+    __ movss(Operand(esp, 0), xmm1);
+    __ vfnmsub231ss(xmm4, xmm0, Operand(esp, 0));
+    __ ucomiss(xmm4, xmm3);
+    __ j(not_equal, &exit);
+
+
+    __ xor_(eax, eax);
+    __ bind(&exit);
+    __ add(esp, Immediate(kDoubleSize));
+    __ ret(0);
+  }
+
+  CodeDesc desc;
+  assm.GetCode(&desc);
+  Handle<Code> code = isolate->factory()->NewCode(
+      desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef OBJECT_PRINT
+  OFStream os(stdout);
+  code->Print(os);
+#endif
+
+  F10 f = FUNCTION_CAST<F10>(code->entry());
+  CHECK_EQ(0, f(9.26621069e-05f, -2.4607749f, -1.09587872f));
+}
 #undef __
index 3d305b6..23d0be6 100644 (file)
@@ -736,4 +736,454 @@ TEST(AssemblerX64SSE) {
   F6 f = FUNCTION_CAST<F6>(code->entry());
   CHECK_EQ(2, f(1.0, 2.0));
 }
+
+
+typedef int (*F7)(double x, double y, double z);
+TEST(AssemblerX64FMA_sd) {
+  CcTest::InitializeVM();
+  if (!CpuFeatures::IsSupported(FMA3)) return;
+
+  Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
+  HandleScope scope(isolate);
+  v8::internal::byte buffer[1024];
+  MacroAssembler assm(isolate, buffer, sizeof buffer);
+  {
+    CpuFeatureScope fscope(&assm, FMA3);
+    Label exit;
+    // argument in xmm0, xmm1 and xmm2
+    // xmm0 * xmm1 + xmm2
+    __ movaps(xmm3, xmm0);
+    __ mulsd(xmm3, xmm1);
+    __ addsd(xmm3, xmm2);  // Expected result in xmm3
+
+    __ subq(rsp, Immediate(kDoubleSize));  // For memory operand
+    // vfmadd132sd
+    __ movl(rax, Immediate(1));  // Test number
+    __ movaps(xmm8, xmm0);
+    __ vfmadd132sd(xmm8, xmm2, xmm1);
+    __ ucomisd(xmm8, xmm3);
+    __ j(not_equal, &exit);
+    // vfmadd213sd
+    __ incq(rax);
+    __ movaps(xmm8, xmm1);
+    __ vfmadd213sd(xmm8, xmm0, xmm2);
+    __ ucomisd(xmm8, xmm3);
+    __ j(not_equal, &exit);
+    // vfmadd231sd
+    __ incq(rax);
+    __ movaps(xmm8, xmm2);
+    __ vfmadd231sd(xmm8, xmm0, xmm1);
+    __ ucomisd(xmm8, xmm3);
+    __ j(not_equal, &exit);
+
+    // vfmadd132sd
+    __ incq(rax);
+    __ movaps(xmm8, xmm0);
+    __ movsd(Operand(rsp, 0), xmm1);
+    __ vfmadd132sd(xmm8, xmm2, Operand(rsp, 0));
+    __ ucomisd(xmm8, xmm3);
+    __ j(not_equal, &exit);
+    // vfmadd213sd
+    __ incq(rax);
+    __ movaps(xmm8, xmm1);
+    __ movsd(Operand(rsp, 0), xmm2);
+    __ vfmadd213sd(xmm8, xmm0, Operand(rsp, 0));
+    __ ucomisd(xmm8, xmm3);
+    __ j(not_equal, &exit);
+    // vfmadd231sd
+    __ incq(rax);
+    __ movaps(xmm8, xmm2);
+    __ movsd(Operand(rsp, 0), xmm1);
+    __ vfmadd231sd(xmm8, xmm0, Operand(rsp, 0));
+    __ ucomisd(xmm8, xmm3);
+    __ j(not_equal, &exit);
+
+    // xmm0 * xmm1 - xmm2
+    __ movaps(xmm3, xmm0);
+    __ mulsd(xmm3, xmm1);
+    __ subsd(xmm3, xmm2);  // Expected result in xmm3
+
+    // vfmsub132sd
+    __ incq(rax);
+    __ movaps(xmm8, xmm0);
+    __ vfmsub132sd(xmm8, xmm2, xmm1);
+    __ ucomisd(xmm8, xmm3);
+    __ j(not_equal, &exit);
+    // vfmadd213sd
+    __ incq(rax);
+    __ movaps(xmm8, xmm1);
+    __ vfmsub213sd(xmm8, xmm0, xmm2);
+    __ ucomisd(xmm8, xmm3);
+    __ j(not_equal, &exit);
+    // vfmsub231sd
+    __ incq(rax);
+    __ movaps(xmm8, xmm2);
+    __ vfmsub231sd(xmm8, xmm0, xmm1);
+    __ ucomisd(xmm8, xmm3);
+    __ j(not_equal, &exit);
+
+    // vfmsub132sd
+    __ incq(rax);
+    __ movaps(xmm8, xmm0);
+    __ movsd(Operand(rsp, 0), xmm1);
+    __ vfmsub132sd(xmm8, xmm2, Operand(rsp, 0));
+    __ ucomisd(xmm8, xmm3);
+    __ j(not_equal, &exit);
+    // vfmsub213sd
+    __ incq(rax);
+    __ movaps(xmm8, xmm1);
+    __ movsd(Operand(rsp, 0), xmm2);
+    __ vfmsub213sd(xmm8, xmm0, Operand(rsp, 0));
+    __ ucomisd(xmm8, xmm3);
+    __ j(not_equal, &exit);
+    // vfmsub231sd
+    __ incq(rax);
+    __ movaps(xmm8, xmm2);
+    __ movsd(Operand(rsp, 0), xmm1);
+    __ vfmsub231sd(xmm8, xmm0, Operand(rsp, 0));
+    __ ucomisd(xmm8, xmm3);
+    __ j(not_equal, &exit);
+
+
+    // - xmm0 * xmm1 + xmm2
+    __ movaps(xmm3, xmm0);
+    __ mulsd(xmm3, xmm1);
+    __ Move(xmm4, (uint64_t)1 << 63);
+    __ xorpd(xmm3, xmm4);
+    __ addsd(xmm3, xmm2);  // Expected result in xmm3
+
+    // vfnmadd132sd
+    __ incq(rax);
+    __ movaps(xmm8, xmm0);
+    __ vfnmadd132sd(xmm8, xmm2, xmm1);
+    __ ucomisd(xmm8, xmm3);
+    __ j(not_equal, &exit);
+    // vfmadd213sd
+    __ incq(rax);
+    __ movaps(xmm8, xmm1);
+    __ vfnmadd213sd(xmm8, xmm0, xmm2);
+    __ ucomisd(xmm8, xmm3);
+    __ j(not_equal, &exit);
+    // vfnmadd231sd
+    __ incq(rax);
+    __ movaps(xmm8, xmm2);
+    __ vfnmadd231sd(xmm8, xmm0, xmm1);
+    __ ucomisd(xmm8, xmm3);
+    __ j(not_equal, &exit);
+
+    // vfnmadd132sd
+    __ incq(rax);
+    __ movaps(xmm8, xmm0);
+    __ movsd(Operand(rsp, 0), xmm1);
+    __ vfnmadd132sd(xmm8, xmm2, Operand(rsp, 0));
+    __ ucomisd(xmm8, xmm3);
+    __ j(not_equal, &exit);
+    // vfnmadd213sd
+    __ incq(rax);
+    __ movaps(xmm8, xmm1);
+    __ movsd(Operand(rsp, 0), xmm2);
+    __ vfnmadd213sd(xmm8, xmm0, Operand(rsp, 0));
+    __ ucomisd(xmm8, xmm3);
+    __ j(not_equal, &exit);
+    // vfnmadd231sd
+    __ incq(rax);
+    __ movaps(xmm8, xmm2);
+    __ movsd(Operand(rsp, 0), xmm1);
+    __ vfnmadd231sd(xmm8, xmm0, Operand(rsp, 0));
+    __ ucomisd(xmm8, xmm3);
+    __ j(not_equal, &exit);
+
+
+    // - xmm0 * xmm1 - xmm2
+    __ movaps(xmm3, xmm0);
+    __ mulsd(xmm3, xmm1);
+    __ Move(xmm4, (uint64_t)1 << 63);
+    __ xorpd(xmm3, xmm4);
+    __ subsd(xmm3, xmm2);  // Expected result in xmm3
+
+    // vfnmsub132sd
+    __ incq(rax);
+    __ movaps(xmm8, xmm0);
+    __ vfnmsub132sd(xmm8, xmm2, xmm1);
+    __ ucomisd(xmm8, xmm3);
+    __ j(not_equal, &exit);
+    // vfmsub213sd
+    __ incq(rax);
+    __ movaps(xmm8, xmm1);
+    __ vfnmsub213sd(xmm8, xmm0, xmm2);
+    __ ucomisd(xmm8, xmm3);
+    __ j(not_equal, &exit);
+    // vfnmsub231sd
+    __ incq(rax);
+    __ movaps(xmm8, xmm2);
+    __ vfnmsub231sd(xmm8, xmm0, xmm1);
+    __ ucomisd(xmm8, xmm3);
+    __ j(not_equal, &exit);
+
+    // vfnmsub132sd
+    __ incq(rax);
+    __ movaps(xmm8, xmm0);
+    __ movsd(Operand(rsp, 0), xmm1);
+    __ vfnmsub132sd(xmm8, xmm2, Operand(rsp, 0));
+    __ ucomisd(xmm8, xmm3);
+    __ j(not_equal, &exit);
+    // vfnmsub213sd
+    __ incq(rax);
+    __ movaps(xmm8, xmm1);
+    __ movsd(Operand(rsp, 0), xmm2);
+    __ vfnmsub213sd(xmm8, xmm0, Operand(rsp, 0));
+    __ ucomisd(xmm8, xmm3);
+    __ j(not_equal, &exit);
+    // vfnmsub231sd
+    __ incq(rax);
+    __ movaps(xmm8, xmm2);
+    __ movsd(Operand(rsp, 0), xmm1);
+    __ vfnmsub231sd(xmm8, xmm0, Operand(rsp, 0));
+    __ ucomisd(xmm8, xmm3);
+    __ j(not_equal, &exit);
+
+
+    __ xorl(rax, rax);
+    __ bind(&exit);
+    __ addq(rsp, Immediate(kDoubleSize));
+    __ ret(0);
+  }
+
+  CodeDesc desc;
+  assm.GetCode(&desc);
+  Handle<Code> code = isolate->factory()->NewCode(
+      desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef OBJECT_PRINT
+  OFStream os(stdout);
+  code->Print(os);
+#endif
+
+  F7 f = FUNCTION_CAST<F7>(code->entry());
+  CHECK_EQ(0, f(0.000092662107262076, -2.460774966188315, -1.0958787393627414));
+}
+
+
+typedef int (*F8)(float x, float y, float z);
+TEST(AssemblerX64FMA_ss) {
+  CcTest::InitializeVM();
+  if (!CpuFeatures::IsSupported(FMA3)) return;
+
+  Isolate* isolate = reinterpret_cast<Isolate*>(CcTest::isolate());
+  HandleScope scope(isolate);
+  v8::internal::byte buffer[1024];
+  MacroAssembler assm(isolate, buffer, sizeof buffer);
+  {
+    CpuFeatureScope fscope(&assm, FMA3);
+    Label exit;
+    // arguments in xmm0, xmm1 and xmm2
+    // xmm0 * xmm1 + xmm2
+    __ movaps(xmm3, xmm0);
+    __ mulss(xmm3, xmm1);
+    __ addss(xmm3, xmm2);  // Expected result in xmm3
+
+    __ subq(rsp, Immediate(kDoubleSize));  // For memory operand
+    // vfmadd132ss
+    __ movl(rax, Immediate(1));  // Test number
+    __ movaps(xmm8, xmm0);
+    __ vfmadd132ss(xmm8, xmm2, xmm1);
+    __ ucomiss(xmm8, xmm3);
+    __ j(not_equal, &exit);
+    // vfmadd213ss
+    __ incq(rax);
+    __ movaps(xmm8, xmm1);
+    __ vfmadd213ss(xmm8, xmm0, xmm2);
+    __ ucomiss(xmm8, xmm3);
+    __ j(not_equal, &exit);
+    // vfmadd231ss
+    __ incq(rax);
+    __ movaps(xmm8, xmm2);
+    __ vfmadd231ss(xmm8, xmm0, xmm1);
+    __ ucomiss(xmm8, xmm3);
+    __ j(not_equal, &exit);
+
+    // vfmadd132ss
+    __ incq(rax);
+    __ movaps(xmm8, xmm0);
+    __ movss(Operand(rsp, 0), xmm1);
+    __ vfmadd132ss(xmm8, xmm2, Operand(rsp, 0));
+    __ ucomiss(xmm8, xmm3);
+    __ j(not_equal, &exit);
+    // vfmadd213ss
+    __ incq(rax);
+    __ movaps(xmm8, xmm1);
+    __ movss(Operand(rsp, 0), xmm2);
+    __ vfmadd213ss(xmm8, xmm0, Operand(rsp, 0));
+    __ ucomiss(xmm8, xmm3);
+    __ j(not_equal, &exit);
+    // vfmadd231ss
+    __ incq(rax);
+    __ movaps(xmm8, xmm2);
+    __ movss(Operand(rsp, 0), xmm1);
+    __ vfmadd231ss(xmm8, xmm0, Operand(rsp, 0));
+    __ ucomiss(xmm8, xmm3);
+    __ j(not_equal, &exit);
+
+    // xmm0 * xmm1 - xmm2
+    __ movaps(xmm3, xmm0);
+    __ mulss(xmm3, xmm1);
+    __ subss(xmm3, xmm2);  // Expected result in xmm3
+
+    // vfmsub132ss
+    __ incq(rax);
+    __ movaps(xmm8, xmm0);
+    __ vfmsub132ss(xmm8, xmm2, xmm1);
+    __ ucomiss(xmm8, xmm3);
+    __ j(not_equal, &exit);
+    // vfmadd213ss
+    __ incq(rax);
+    __ movaps(xmm8, xmm1);
+    __ vfmsub213ss(xmm8, xmm0, xmm2);
+    __ ucomiss(xmm8, xmm3);
+    __ j(not_equal, &exit);
+    // vfmsub231ss
+    __ incq(rax);
+    __ movaps(xmm8, xmm2);
+    __ vfmsub231ss(xmm8, xmm0, xmm1);
+    __ ucomiss(xmm8, xmm3);
+    __ j(not_equal, &exit);
+
+    // vfmsub132ss
+    __ incq(rax);
+    __ movaps(xmm8, xmm0);
+    __ movss(Operand(rsp, 0), xmm1);
+    __ vfmsub132ss(xmm8, xmm2, Operand(rsp, 0));
+    __ ucomiss(xmm8, xmm3);
+    __ j(not_equal, &exit);
+    // vfmsub213ss
+    __ incq(rax);
+    __ movaps(xmm8, xmm1);
+    __ movss(Operand(rsp, 0), xmm2);
+    __ vfmsub213ss(xmm8, xmm0, Operand(rsp, 0));
+    __ ucomiss(xmm8, xmm3);
+    __ j(not_equal, &exit);
+    // vfmsub231ss
+    __ incq(rax);
+    __ movaps(xmm8, xmm2);
+    __ movss(Operand(rsp, 0), xmm1);
+    __ vfmsub231ss(xmm8, xmm0, Operand(rsp, 0));
+    __ ucomiss(xmm8, xmm3);
+    __ j(not_equal, &exit);
+
+
+    // - xmm0 * xmm1 + xmm2
+    __ movaps(xmm3, xmm0);
+    __ mulss(xmm3, xmm1);
+    __ Move(xmm4, (uint32_t)1 << 31);
+    __ xorps(xmm3, xmm4);
+    __ addss(xmm3, xmm2);  // Expected result in xmm3
+
+    // vfnmadd132ss
+    __ incq(rax);
+    __ movaps(xmm8, xmm0);
+    __ vfnmadd132ss(xmm8, xmm2, xmm1);
+    __ ucomiss(xmm8, xmm3);
+    __ j(not_equal, &exit);
+    // vfmadd213ss
+    __ incq(rax);
+    __ movaps(xmm8, xmm1);
+    __ vfnmadd213ss(xmm8, xmm0, xmm2);
+    __ ucomiss(xmm8, xmm3);
+    __ j(not_equal, &exit);
+    // vfnmadd231ss
+    __ incq(rax);
+    __ movaps(xmm8, xmm2);
+    __ vfnmadd231ss(xmm8, xmm0, xmm1);
+    __ ucomiss(xmm8, xmm3);
+    __ j(not_equal, &exit);
+
+    // vfnmadd132ss
+    __ incq(rax);
+    __ movaps(xmm8, xmm0);
+    __ movss(Operand(rsp, 0), xmm1);
+    __ vfnmadd132ss(xmm8, xmm2, Operand(rsp, 0));
+    __ ucomiss(xmm8, xmm3);
+    __ j(not_equal, &exit);
+    // vfnmadd213ss
+    __ incq(rax);
+    __ movaps(xmm8, xmm1);
+    __ movss(Operand(rsp, 0), xmm2);
+    __ vfnmadd213ss(xmm8, xmm0, Operand(rsp, 0));
+    __ ucomiss(xmm8, xmm3);
+    __ j(not_equal, &exit);
+    // vfnmadd231ss
+    __ incq(rax);
+    __ movaps(xmm8, xmm2);
+    __ movss(Operand(rsp, 0), xmm1);
+    __ vfnmadd231ss(xmm8, xmm0, Operand(rsp, 0));
+    __ ucomiss(xmm8, xmm3);
+    __ j(not_equal, &exit);
+
+
+    // - xmm0 * xmm1 - xmm2
+    __ movaps(xmm3, xmm0);
+    __ mulss(xmm3, xmm1);
+    __ Move(xmm4, (uint32_t)1 << 31);
+    __ xorps(xmm3, xmm4);
+    __ subss(xmm3, xmm2);  // Expected result in xmm3
+
+    // vfnmsub132ss
+    __ incq(rax);
+    __ movaps(xmm8, xmm0);
+    __ vfnmsub132ss(xmm8, xmm2, xmm1);
+    __ ucomiss(xmm8, xmm3);
+    __ j(not_equal, &exit);
+    // vfmsub213ss
+    __ incq(rax);
+    __ movaps(xmm8, xmm1);
+    __ vfnmsub213ss(xmm8, xmm0, xmm2);
+    __ ucomiss(xmm8, xmm3);
+    __ j(not_equal, &exit);
+    // vfnmsub231ss
+    __ incq(rax);
+    __ movaps(xmm8, xmm2);
+    __ vfnmsub231ss(xmm8, xmm0, xmm1);
+    __ ucomiss(xmm8, xmm3);
+    __ j(not_equal, &exit);
+
+    // vfnmsub132ss
+    __ incq(rax);
+    __ movaps(xmm8, xmm0);
+    __ movss(Operand(rsp, 0), xmm1);
+    __ vfnmsub132ss(xmm8, xmm2, Operand(rsp, 0));
+    __ ucomiss(xmm8, xmm3);
+    __ j(not_equal, &exit);
+    // vfnmsub213ss
+    __ incq(rax);
+    __ movaps(xmm8, xmm1);
+    __ movss(Operand(rsp, 0), xmm2);
+    __ vfnmsub213ss(xmm8, xmm0, Operand(rsp, 0));
+    __ ucomiss(xmm8, xmm3);
+    __ j(not_equal, &exit);
+    // vfnmsub231ss
+    __ incq(rax);
+    __ movaps(xmm8, xmm2);
+    __ movss(Operand(rsp, 0), xmm1);
+    __ vfnmsub231ss(xmm8, xmm0, Operand(rsp, 0));
+    __ ucomiss(xmm8, xmm3);
+    __ j(not_equal, &exit);
+
+
+    __ xorl(rax, rax);
+    __ bind(&exit);
+    __ addq(rsp, Immediate(kDoubleSize));
+    __ ret(0);
+  }
+
+  CodeDesc desc;
+  assm.GetCode(&desc);
+  Handle<Code> code = isolate->factory()->NewCode(
+      desc, Code::ComputeFlags(Code::STUB), Handle<Code>());
+#ifdef OBJECT_PRINT
+  OFStream os(stdout);
+  code->Print(os);
+#endif
+
+  F8 f = FUNCTION_CAST<F8>(code->entry());
+  CHECK_EQ(0, f(9.26621069e-05f, -2.4607749f, -1.09587872f));
+}
 #undef __
index 31d5eba..096d5c7 100644 (file)
@@ -41,7 +41,7 @@ TEST(List) {
   Isolate* isolate = CcTest::i_isolate();
   Zone zone(isolate);
   AstValueFactory value_factory(&zone, 0);
-  AstNodeFactory<AstNullVisitor> factory(&value_factory);
+  AstNodeFactory factory(&value_factory);
   AstNode* node = factory.NewEmptyStatement(RelocInfo::kNoPosition);
   list->Add(node);
   CHECK_EQ(1, list->length());
index d3c06ba..a05231e 100644 (file)
@@ -310,10 +310,10 @@ TEST(FeedbackVectorPreservedAcrossRecompiles) {
 
   // Verify that we gathered feedback.
   int expected_slots = 0;
-  int expected_ic_slots = FLAG_vector_ics ? 2 : 1;
+  int expected_ic_slots = 1;
   CHECK_EQ(expected_slots, feedback_vector->Slots());
   CHECK_EQ(expected_ic_slots, feedback_vector->ICSlots());
-  FeedbackVectorICSlot slot_for_a(FLAG_vector_ics ? 1 : 0);
+  FeedbackVectorICSlot slot_for_a(0);
   CHECK(feedback_vector->Get(slot_for_a)->IsJSFunction());
 
   CompileRun("%OptimizeFunctionOnNextCall(f); f(fun1);");
@@ -348,20 +348,19 @@ TEST(FeedbackVectorUnaffectedByScopeChanges) {
           *v8::Handle<v8::Function>::Cast(
               CcTest::global()->Get(v8_str("morphing_call"))));
 
-  int expected_slots = 0;
-  int expected_ic_slots = FLAG_vector_ics ? 2 : 1;
-  CHECK_EQ(expected_slots, f->shared()->feedback_vector()->Slots());
-  CHECK_EQ(expected_ic_slots, f->shared()->feedback_vector()->ICSlots());
-
-  // And yet it's not compiled.
+  // Not compiled, and so no feedback vector allocated yet.
   CHECK(!f->shared()->is_compiled());
+  CHECK_EQ(0, f->shared()->feedback_vector()->Slots());
+  CHECK_EQ(0, f->shared()->feedback_vector()->ICSlots());
 
   CompileRun("morphing_call();");
 
-  // The vector should have the same size despite the new scoping.
+  // Now a feedback vector is allocated.
+  CHECK(f->shared()->is_compiled());
+  int expected_slots = 0;
+  int expected_ic_slots = FLAG_vector_ics ? 2 : 1;
   CHECK_EQ(expected_slots, f->shared()->feedback_vector()->Slots());
   CHECK_EQ(expected_ic_slots, f->shared()->feedback_vector()->ICSlots());
-  CHECK(f->shared()->is_compiled());
 }
 
 
index 5d38a16..c3c65fd 100644 (file)
@@ -621,7 +621,7 @@ static void DebugEventBreakPointHitCount(
         last_function_hit[0] = '\0';
       } else {
         CHECK(result->IsString());
-        v8::Handle<v8::String> function_name(result->ToString());
+        v8::Handle<v8::String> function_name(result.As<v8::String>());
         function_name->WriteUtf8(last_function_hit);
       }
     }
@@ -656,7 +656,7 @@ static void DebugEventBreakPointHitCount(
         last_script_name_hit[0] = '\0';
       } else {
         CHECK(result->IsString());
-        v8::Handle<v8::String> script_name(result->ToString());
+        v8::Handle<v8::String> script_name(result.As<v8::String>());
         script_name->WriteUtf8(last_script_name_hit);
       }
     }
@@ -775,7 +775,7 @@ static void DebugEventEvaluate(
       v8::Handle<v8::Value> result =
           evaluate_check_function->Call(exec_state, argc, argv);
       if (!result->IsTrue()) {
-        v8::String::Utf8Value utf8(checks[i].expected->ToString());
+        v8::String::Utf8Value utf8(checks[i].expected);
         V8_Fatal(__FILE__, __LINE__, "%s != %s", checks[i].expr, *utf8);
       }
     }
@@ -849,7 +849,7 @@ static void DebugEventStepSequence(
     v8::Handle<v8::Value> result = frame_function_name->Call(exec_state,
                                                              argc, argv);
     CHECK(result->IsString());
-    v8::String::Utf8Value function_name(result->ToString());
+    v8::String::Utf8Value function_name(result->ToString(CcTest::isolate()));
     CHECK_EQ(1, StrLength(*function_name));
     CHECK_EQ((*function_name)[0],
               expected_step_sequence[break_point_hit_count]);
@@ -2860,7 +2860,7 @@ TEST(DebugStepKeyedLoadLoop) {
   foo->Call(env->Global(), kArgc, args);
 
   // With stepping all break locations are hit.
-  CHECK_EQ(35, break_point_hit_count);
+  CHECK_EQ(45, break_point_hit_count);
 
   v8::Debug::SetDebugEventListener(NULL);
   CheckDebuggerUnloaded();
@@ -2908,7 +2908,7 @@ TEST(DebugStepKeyedStoreLoop) {
   foo->Call(env->Global(), kArgc, args);
 
   // With stepping all break locations are hit.
-  CHECK_EQ(34, break_point_hit_count);
+  CHECK_EQ(44, break_point_hit_count);
 
   v8::Debug::SetDebugEventListener(NULL);
   CheckDebuggerUnloaded();
@@ -2952,7 +2952,7 @@ TEST(DebugStepNamedLoadLoop) {
   foo->Call(env->Global(), 0, NULL);
 
   // With stepping all break locations are hit.
-  CHECK_EQ(55, break_point_hit_count);
+  CHECK_EQ(65, break_point_hit_count);
 
   v8::Debug::SetDebugEventListener(NULL);
   CheckDebuggerUnloaded();
@@ -2995,9 +2995,7 @@ static void DoDebugStepNamedStoreLoop(int expected) {
 
 
 // Test of the stepping mechanism for named load in a loop.
-TEST(DebugStepNamedStoreLoop) {
-  DoDebugStepNamedStoreLoop(24);
-}
+TEST(DebugStepNamedStoreLoop) { DoDebugStepNamedStoreLoop(34); }
 
 
 // Test the stepping mechanism with different ICs.
@@ -3330,14 +3328,14 @@ TEST(DebugStepFor) {
   break_point_hit_count = 0;
   v8::Handle<v8::Value> argv_10[argc] = { v8::Number::New(isolate, 10) };
   foo->Call(env->Global(), argc, argv_10);
-  CHECK_EQ(23, break_point_hit_count);
+  CHECK_EQ(45, break_point_hit_count);
 
   // Looping 100 times.
   step_action = StepIn;
   break_point_hit_count = 0;
   v8::Handle<v8::Value> argv_100[argc] = { v8::Number::New(isolate, 100) };
   foo->Call(env->Global(), argc, argv_100);
-  CHECK_EQ(203, break_point_hit_count);
+  CHECK_EQ(405, break_point_hit_count);
 
   // Get rid of the debug event listener.
   v8::Debug::SetDebugEventListener(NULL);
@@ -3381,7 +3379,7 @@ TEST(DebugStepForContinue) {
   v8::Handle<v8::Value> argv_10[argc] = { v8::Number::New(isolate, 10) };
   result = foo->Call(env->Global(), argc, argv_10);
   CHECK_EQ(5, result->Int32Value());
-  CHECK_EQ(52, break_point_hit_count);
+  CHECK_EQ(62, break_point_hit_count);
 
   // Looping 100 times.
   step_action = StepIn;
@@ -3389,7 +3387,7 @@ TEST(DebugStepForContinue) {
   v8::Handle<v8::Value> argv_100[argc] = { v8::Number::New(isolate, 100) };
   result = foo->Call(env->Global(), argc, argv_100);
   CHECK_EQ(50, result->Int32Value());
-  CHECK_EQ(457, break_point_hit_count);
+  CHECK_EQ(557, break_point_hit_count);
 
   // Get rid of the debug event listener.
   v8::Debug::SetDebugEventListener(NULL);
@@ -3434,7 +3432,7 @@ TEST(DebugStepForBreak) {
   v8::Handle<v8::Value> argv_10[argc] = { v8::Number::New(isolate, 10) };
   result = foo->Call(env->Global(), argc, argv_10);
   CHECK_EQ(9, result->Int32Value());
-  CHECK_EQ(55, break_point_hit_count);
+  CHECK_EQ(64, break_point_hit_count);
 
   // Looping 100 times.
   step_action = StepIn;
@@ -3442,7 +3440,7 @@ TEST(DebugStepForBreak) {
   v8::Handle<v8::Value> argv_100[argc] = { v8::Number::New(isolate, 100) };
   result = foo->Call(env->Global(), argc, argv_100);
   CHECK_EQ(99, result->Int32Value());
-  CHECK_EQ(505, break_point_hit_count);
+  CHECK_EQ(604, break_point_hit_count);
 
   // Get rid of the debug event listener.
   v8::Debug::SetDebugEventListener(NULL);
@@ -3473,7 +3471,7 @@ TEST(DebugStepForIn) {
   step_action = StepIn;
   break_point_hit_count = 0;
   foo->Call(env->Global(), 0, NULL);
-  CHECK_EQ(6, break_point_hit_count);
+  CHECK_EQ(8, break_point_hit_count);
 
   // Create a function for testing stepping. Run it to allow it to get
   // optimized.
@@ -3490,7 +3488,7 @@ TEST(DebugStepForIn) {
   step_action = StepIn;
   break_point_hit_count = 0;
   foo->Call(env->Global(), 0, NULL);
-  CHECK_EQ(8, break_point_hit_count);
+  CHECK_EQ(10, break_point_hit_count);
 
   // Get rid of the debug event listener.
   v8::Debug::SetDebugEventListener(NULL);
@@ -4352,9 +4350,10 @@ static void IndexedEnum(const v8::PropertyCallbackInfo<v8::Array>& info) {
 }
 
 
-static void NamedGetter(v8::Local<v8::String> name,
+static void NamedGetter(v8::Local<v8::Name> name,
                         const v8::PropertyCallbackInfo<v8::Value>& info) {
-  v8::String::Utf8Value n(name);
+  if (name->IsSymbol()) return;
+  v8::String::Utf8Value n(v8::Local<v8::String>::Cast(name));
   if (strcmp(*n, "a") == 0) {
     info.GetReturnValue().Set(v8::String::NewFromUtf8(info.GetIsolate(), "AA"));
     return;
@@ -4387,26 +4386,26 @@ TEST(InterceptorPropertyMirror) {
 
   // Create object with named interceptor.
   v8::Handle<v8::ObjectTemplate> named = v8::ObjectTemplate::New(isolate);
-  named->SetNamedPropertyHandler(NamedGetter, NULL, NULL, NULL, NamedEnum);
+  named->SetHandler(v8::NamedPropertyHandlerConfiguration(
+      NamedGetter, NULL, NULL, NULL, NamedEnum));
   env->Global()->Set(
       v8::String::NewFromUtf8(isolate, "intercepted_named"),
       named->NewInstance());
 
   // Create object with indexed interceptor.
   v8::Handle<v8::ObjectTemplate> indexed = v8::ObjectTemplate::New(isolate);
-  indexed->SetIndexedPropertyHandler(IndexedGetter,
-                                     NULL,
-                                     NULL,
-                                     NULL,
-                                     IndexedEnum);
+  indexed->SetHandler(v8::IndexedPropertyHandlerConfiguration(
+      IndexedGetter, NULL, NULL, NULL, IndexedEnum));
   env->Global()->Set(
       v8::String::NewFromUtf8(isolate, "intercepted_indexed"),
       indexed->NewInstance());
 
   // Create object with both named and indexed interceptor.
   v8::Handle<v8::ObjectTemplate> both = v8::ObjectTemplate::New(isolate);
-  both->SetNamedPropertyHandler(NamedGetter, NULL, NULL, NULL, NamedEnum);
-  both->SetIndexedPropertyHandler(IndexedGetter, NULL, NULL, NULL, IndexedEnum);
+  both->SetHandler(v8::NamedPropertyHandlerConfiguration(
+      NamedGetter, NULL, NULL, NULL, NamedEnum));
+  both->SetHandler(v8::IndexedPropertyHandlerConfiguration(
+      IndexedGetter, NULL, NULL, NULL, IndexedEnum));
   env->Global()->Set(
       v8::String::NewFromUtf8(isolate, "intercepted_both"),
       both->NewInstance());
@@ -6161,7 +6160,8 @@ static void DebugEventDebugBreak(
         last_function_hit[0] = '\0';
       } else {
         CHECK(result->IsString());
-        v8::Handle<v8::String> function_name(result->ToString());
+        v8::Handle<v8::String> function_name(
+            result->ToString(CcTest::isolate()));
         function_name->WriteUtf8(last_function_hit);
       }
     }
@@ -7044,7 +7044,8 @@ static void DebugEventBreakDeoptimize(
       if (!result->IsUndefined()) {
         char fn[80];
         CHECK(result->IsString());
-        v8::Handle<v8::String> function_name(result->ToString());
+        v8::Handle<v8::String> function_name(
+            result->ToString(CcTest::isolate()));
         function_name->WriteUtf8(fn);
         if (strcmp(fn, "bar") == 0) {
           i::Deoptimizer::DeoptimizeAll(CcTest::i_isolate());
@@ -7109,12 +7110,12 @@ static void DebugEventBreakWithOptimizedStack(
         v8::Handle<v8::Value> result =
             frame_function_name->Call(exec_state, argc, argv);
         CHECK(result->IsString());
-        v8::Handle<v8::String> function_name(result->ToString());
+        v8::Handle<v8::String> function_name(result->ToString(isolate));
         CHECK(function_name->Equals(v8::String::NewFromUtf8(isolate, "loop")));
         // Get the name of the first argument in frame i.
         result = frame_argument_name->Call(exec_state, argc, argv);
         CHECK(result->IsString());
-        v8::Handle<v8::String> argument_name(result->ToString());
+        v8::Handle<v8::String> argument_name(result->ToString(isolate));
         CHECK(argument_name->Equals(v8::String::NewFromUtf8(isolate, "count")));
         // Get the value of the first argument in frame i. If the
         // funtion is optimized the value will be undefined, otherwise
@@ -7127,7 +7128,7 @@ static void DebugEventBreakWithOptimizedStack(
         // Get the name of the first local variable.
         result = frame_local_name->Call(exec_state, argc, argv);
         CHECK(result->IsString());
-        v8::Handle<v8::String> local_name(result->ToString());
+        v8::Handle<v8::String> local_name(result->ToString(isolate));
         CHECK(local_name->Equals(v8::String::NewFromUtf8(isolate, "local")));
         // Get the value of the first local variable. If the function
         // is optimized the value will be undefined, otherwise it will
@@ -7620,3 +7621,61 @@ TEST(DebugBreakOnExceptionInObserveCallback) {
   CHECK(CompileRun("callbackRan")->BooleanValue());
   CHECK_EQ(1, exception_event_counter);
 }
+
+
+static void DebugHarmonyScopingListener(
+    const v8::Debug::EventDetails& event_details) {
+  v8::DebugEvent event = event_details.GetEvent();
+  if (event != v8::Break) return;
+
+  int break_id = CcTest::i_isolate()->debug()->break_id();
+
+  char script[128];
+  i::Vector<char> script_vector(script, sizeof(script));
+  SNPrintF(script_vector, "%%GetFrameCount(%d)", break_id);
+  ExpectInt32(script, 1);
+
+  SNPrintF(script_vector, "var frame = new FrameMirror(%d, 0);", break_id);
+  CompileRun(script);
+  ExpectInt32("frame.evaluate('x').value_", 1);
+  ExpectInt32("frame.evaluate('y').value_", 2);
+
+  CompileRun("var allScopes = frame.allScopes()");
+  ExpectInt32("allScopes.length", 2);
+
+  ExpectBoolean("allScopes[0].scopeType() === ScopeType.Script", true);
+
+  ExpectInt32("allScopes[0].scopeObject().value_.x", 1);
+
+  ExpectInt32("allScopes[0].scopeObject().value_.y", 2);
+
+  CompileRun("allScopes[0].setVariableValue('x', 5);");
+  CompileRun("allScopes[0].setVariableValue('y', 6);");
+  ExpectInt32("frame.evaluate('x + y').value_", 11);
+}
+
+
+TEST(DebugBreakInLexicalScopes) {
+  i::FLAG_harmony_scoping = true;
+  i::FLAG_allow_natives_syntax = true;
+
+  DebugLocalContext env;
+  v8::Isolate* isolate = env->GetIsolate();
+  v8::HandleScope scope(isolate);
+  v8::Debug::SetDebugEventListener(DebugHarmonyScopingListener);
+
+  CompileRun(
+      "'use strict';            \n"
+      "let x = 1;               \n");
+  ExpectInt32(
+      "'use strict';            \n"
+      "let y = 2;               \n"
+      "debugger;                \n"
+      "x * y",
+      30);
+  ExpectInt32(
+      "x = 1; y = 2; \n"
+      "debugger;"
+      "x * y",
+      30);
+}
index f2ccdab..06afdd2 100644 (file)
@@ -70,9 +70,9 @@ class DeclarationContext {
   int query_count() const { return query_count_; }
 
  protected:
-  virtual v8::Handle<Value> Get(Local<String> key);
-  virtual v8::Handle<Value> Set(Local<String> key, Local<Value> value);
-  virtual v8::Handle<Integer> Query(Local<String> key);
+  virtual v8::Handle<Value> Get(Local<Name> key);
+  virtual v8::Handle<Value> Set(Local<Name> key, Local<Value> value);
+  virtual v8::Handle<Integer> Query(Local<Name> key);
 
   void InitializeIfNeeded();
 
@@ -88,12 +88,11 @@ class DeclarationContext {
 
   // The handlers are called as static functions that forward
   // to the instance specific virtual methods.
-  static void HandleGet(Local<String> key,
+  static void HandleGet(Local<Name> key,
                         const v8::PropertyCallbackInfo<v8::Value>& info);
-  static void HandleSet(Local<String> key,
-                        Local<Value> value,
+  static void HandleSet(Local<Name> key, Local<Value> value,
                         const v8::PropertyCallbackInfo<v8::Value>& info);
-  static void HandleQuery(Local<String> key,
+  static void HandleQuery(Local<Name> key,
                           const v8::PropertyCallbackInfo<v8::Integer>& info);
 
   v8::Isolate* isolate() const { return CcTest::isolate(); }
@@ -122,11 +121,8 @@ void DeclarationContext::InitializeIfNeeded() {
   HandleScope scope(isolate);
   Local<FunctionTemplate> function = FunctionTemplate::New(isolate);
   Local<Value> data = External::New(CcTest::isolate(), this);
-  GetHolder(function)->SetNamedPropertyHandler(&HandleGet,
-                                               &HandleSet,
-                                               &HandleQuery,
-                                               0, 0,
-                                               data);
+  GetHolder(function)->SetHandler(v8::NamedPropertyHandlerConfiguration(
+      &HandleGet, &HandleSet, &HandleQuery, 0, 0, data));
   Local<Context> context = Context::New(isolate,
                                         0,
                                         function->InstanceTemplate(),
@@ -178,8 +174,7 @@ void DeclarationContext::Check(const char* source,
 
 
 void DeclarationContext::HandleGet(
-    Local<String> key,
-    const v8::PropertyCallbackInfo<v8::Value>& info) {
+    Local<Name> key, const v8::PropertyCallbackInfo<v8::Value>& info) {
   DeclarationContext* context = GetInstance(info.Data());
   context->get_count_++;
   info.GetReturnValue().Set(context->Get(key));
@@ -187,8 +182,7 @@ void DeclarationContext::HandleGet(
 
 
 void DeclarationContext::HandleSet(
-    Local<String> key,
-    Local<Value> value,
+    Local<Name> key, Local<Value> value,
     const v8::PropertyCallbackInfo<v8::Value>& info) {
   DeclarationContext* context = GetInstance(info.Data());
   context->set_count_++;
@@ -197,8 +191,7 @@ void DeclarationContext::HandleSet(
 
 
 void DeclarationContext::HandleQuery(
-    Local<String> key,
-    const v8::PropertyCallbackInfo<v8::Integer>& info) {
+    Local<Name> key, const v8::PropertyCallbackInfo<v8::Integer>& info) {
   DeclarationContext* context = GetInstance(info.Data());
   context->query_count_++;
   info.GetReturnValue().Set(context->Query(key));
@@ -211,18 +204,17 @@ DeclarationContext* DeclarationContext::GetInstance(Local<Value> data) {
 }
 
 
-v8::Handle<Value> DeclarationContext::Get(Local<String> key) {
+v8::Handle<Value> DeclarationContext::Get(Local<Name> key) {
   return v8::Handle<Value>();
 }
 
 
-v8::Handle<Value> DeclarationContext::Set(Local<String> key,
-                                          Local<Value> value) {
+v8::Handle<Value> DeclarationContext::Set(Local<Name> key, Local<Value> value) {
   return v8::Handle<Value>();
 }
 
 
-v8::Handle<Integer> DeclarationContext::Query(Local<String> key) {
+v8::Handle<Integer> DeclarationContext::Query(Local<Name> key) {
   return v8::Handle<Integer>();
 }
 
@@ -272,7 +264,7 @@ TEST(Unknown) {
 
 class AbsentPropertyContext: public DeclarationContext {
  protected:
-  virtual v8::Handle<Integer> Query(Local<String> key) {
+  virtual v8::Handle<Integer> Query(Local<Name> key) {
     return v8::Handle<Integer>();
   }
 };
@@ -336,7 +328,7 @@ class AppearingPropertyContext: public DeclarationContext {
   AppearingPropertyContext() : state_(DECLARE) { }
 
  protected:
-  virtual v8::Handle<Integer> Query(Local<String> key) {
+  virtual v8::Handle<Integer> Query(Local<Name> key) {
     switch (state_) {
       case DECLARE:
         // Force declaration by returning that the
@@ -405,7 +397,7 @@ class ExistsInPrototypeContext: public DeclarationContext {
  public:
   ExistsInPrototypeContext() { InitializeIfNeeded(); }
  protected:
-  virtual v8::Handle<Integer> Query(Local<String> key) {
+  virtual v8::Handle<Integer> Query(Local<Name> key) {
     // Let it seem that the property exists in the prototype object.
     return Integer::New(isolate(), v8::None);
   }
@@ -464,7 +456,7 @@ TEST(ExistsInPrototype) {
 
 class AbsentInPrototypeContext: public DeclarationContext {
  protected:
-  virtual v8::Handle<Integer> Query(Local<String> key) {
+  virtual v8::Handle<Integer> Query(Local<Name> key) {
     // Let it seem that the property is absent in the prototype object.
     return Handle<Integer>();
   }
@@ -499,7 +491,7 @@ class ExistsInHiddenPrototypeContext: public DeclarationContext {
   }
 
  protected:
-  virtual v8::Handle<Integer> Query(Local<String> key) {
+  virtual v8::Handle<Integer> Query(Local<Name> key) {
     // Let it seem that the property exists in the hidden prototype object.
     return Integer::New(isolate(), v8::None);
   }
@@ -644,37 +636,215 @@ TEST(CrossScriptReferences) {
 }
 
 
-TEST(CrossScriptReferencesHarmony) {
+TEST(CrossScriptReferences_Simple) {
+  i::FLAG_harmony_scoping = true;
+  i::FLAG_use_strict = true;
+
+  v8::Isolate* isolate = CcTest::isolate();
+  HandleScope scope(isolate);
+
+  {
+    SimpleContext context;
+    context.Check("let x = 1; x", EXPECT_RESULT, Number::New(isolate, 1));
+    context.Check("let x = 5; x", EXPECT_EXCEPTION);
+  }
+}
+
+
+TEST(CrossScriptReferences_Simple2) {
+  i::FLAG_harmony_scoping = true;
   i::FLAG_use_strict = true;
+
+  v8::Isolate* isolate = CcTest::isolate();
+  HandleScope scope(isolate);
+
+  for (int k = 0; k < 100; k++) {
+    SimpleContext context;
+    bool cond = (k % 2) == 0;
+    if (cond) {
+      context.Check("let x = 1; x", EXPECT_RESULT, Number::New(isolate, 1));
+      context.Check("let z = 4; z", EXPECT_RESULT, Number::New(isolate, 4));
+    } else {
+      context.Check("let z = 1; z", EXPECT_RESULT, Number::New(isolate, 1));
+      context.Check("let x = 4; x", EXPECT_RESULT, Number::New(isolate, 4));
+    }
+    context.Check("let y = 2; x", EXPECT_RESULT,
+                  Number::New(isolate, cond ? 1 : 4));
+  }
+}
+
+
+TEST(CrossScriptReferencesHarmony) {
   i::FLAG_harmony_scoping = true;
   i::FLAG_harmony_modules = true;
 
   v8::Isolate* isolate = CcTest::isolate();
   HandleScope scope(isolate);
 
+  // Check that simple cross-script global scope access works.
   const char* decs[] = {
-    "var x = 1; x", "x", "this.x",
-    "function x() { return 1 }; x()", "x()", "this.x()",
-    "let x = 1; x", "x", "this.x",
-    "const x = 1; x", "x", "this.x",
-    "module x { export let a = 1 }; x.a", "x.a", "this.x.a",
+    "'use strict'; var x = 1; x", "x",
+    "'use strict'; function x() { return 1 }; x()", "x()",
+    "'use strict'; let x = 1; x", "x",
+    "'use strict'; const x = 1; x", "x",
+    "'use strict'; module x { export let a = 1 }; x.a", "x.a",
     NULL
   };
 
-  for (int i = 0; decs[i] != NULL; i += 3) {
+  for (int i = 0; decs[i] != NULL; i += 2) {
     SimpleContext context;
     context.Check(decs[i], EXPECT_RESULT, Number::New(isolate, 1));
     context.Check(decs[i+1], EXPECT_RESULT, Number::New(isolate, 1));
-    // TODO(rossberg): The current ES6 draft spec does not reflect lexical
-    // bindings on the global object. However, this will probably change, in
-    // which case we reactivate the following test.
-    if (i/3 < 2) {
-      context.Check(decs[i+2], EXPECT_RESULT, Number::New(isolate, 1));
-    }
+  }
+
+  // Check that cross-script global scope access works with late declarations.
+  {
+    SimpleContext context;
+    context.Check("function d0() { return x0 }",  // dynamic lookup
+                  EXPECT_RESULT, Undefined(isolate));
+    context.Check("this.x0 = -1;"
+                  "d0()",
+                  EXPECT_RESULT, Number::New(isolate, -1));
+    context.Check("'use strict';"
+                  "function f0() { let y = 10; return x0 + y }"
+                  "function g0() { let y = 10; return eval('x0 + y') }"
+                  "function h0() { let y = 10; return (1,eval)('x0') + y }"
+                  "x0 + f0() + g0() + h0()",
+                  EXPECT_RESULT, Number::New(isolate, 26));
+
+    context.Check("'use strict';"
+                  "let x1 = 1;"
+                  "function f1() { let y = 10; return x1 + y }"
+                  "function g1() { let y = 10; return eval('x1 + y') }"
+                  "function h1() { let y = 10; return (1,eval)('x1') + y }"
+                  "function i1() { "
+                  "  let y = 10; return (typeof x2 === 'undefined' ? 0 : 2) + y"
+                  "}"
+                  "function j1() { let y = 10; return eval('x2 + y') }"
+                  "function k1() { let y = 10; return (1,eval)('x2') + y }"
+                  "function cl() { "
+                  "  let y = 10; "
+                  "  return { "
+                  "    f: function(){ return x1 + y },"
+                  "    g: function(){ return eval('x1 + y') },"
+                  "    h: function(){ return (1,eval)('x1') + y },"
+                  "    i: function(){"
+                  "      return (typeof x2 == 'undefined' ? 0 : 2) + y"
+                  "    },"
+                  "    j: function(){ return eval('x2 + y') },"
+                  "    k: function(){ return (1,eval)('x2') + y },"
+                  "  }"
+                  "}"
+                  "let o = cl();"
+                  "x1 + eval('x1') + (1,eval)('x1') + f1() + g1() + h1();",
+                  EXPECT_RESULT, Number::New(isolate, 36));
+    context.Check("x1 + eval('x1') + (1,eval)('x1') + f1() + g1() + h1();",
+                  EXPECT_RESULT, Number::New(isolate, 36));
+    context.Check("o.f() + o.g() + o.h();",
+                  EXPECT_RESULT, Number::New(isolate, 33));
+    context.Check("i1() + o.i();",
+                  EXPECT_RESULT, Number::New(isolate, 20));
+
+    context.Check("'use strict';"
+                  "let x2 = 2;"
+                  "function f2() { let y = 20; return x2 + y }"
+                  "function g2() { let y = 20; return eval('x2 + y') }"
+                  "function h2() { let y = 20; return (1,eval)('x2') + y }"
+                  "function i2() { let y = 20; return x1 + y }"
+                  "function j2() { let y = 20; return eval('x1 + y') }"
+                  "function k2() { let y = 20; return (1,eval)('x1') + y }"
+                  "x2 + eval('x2') + (1,eval)('x2') + f2() + g2() + h2();",
+                  EXPECT_RESULT, Number::New(isolate, 72));
+    context.Check("x1 + eval('x1') + (1,eval)('x1') + f1() + g1() + h1();",
+                  EXPECT_RESULT, Number::New(isolate, 36));
+    context.Check("i1() + j1() + k1();",
+                  EXPECT_RESULT, Number::New(isolate, 36));
+    context.Check("i2() + j2() + k2();",
+                  EXPECT_RESULT, Number::New(isolate, 63));
+    context.Check("o.f() + o.g() + o.h();",
+                  EXPECT_RESULT, Number::New(isolate, 33));
+    context.Check("o.i() + o.j() + o.k();",
+                  EXPECT_RESULT, Number::New(isolate, 36));
+    context.Check("i1() + o.i();",
+                  EXPECT_RESULT, Number::New(isolate, 24));
+
+    context.Check("'use strict';"
+                  "let x0 = 100;"
+                  "x0 + eval('x0') + (1,eval)('x0') + "
+                  "    d0() + f0() + g0() + h0();",
+                  EXPECT_RESULT, Number::New(isolate, 730));
+    context.Check("x0 + eval('x0') + (1,eval)('x0') + "
+                  "    d0() + f0() + g0() + h0();",
+                  EXPECT_RESULT, Number::New(isolate, 730));
+    context.Check("delete this.x0;"
+                  "x0 + eval('x0') + (1,eval)('x0') + "
+                  "    d0() + f0() + g0() + h0();",
+                  EXPECT_RESULT, Number::New(isolate, 730));
+    context.Check("this.x1 = 666;"
+                  "x1 + eval('x1') + (1,eval)('x1') + f1() + g1() + h1();",
+                  EXPECT_RESULT, Number::New(isolate, 36));
+    context.Check("delete this.x1;"
+                  "x1 + eval('x1') + (1,eval)('x1') + f1() + g1() + h1();",
+                  EXPECT_RESULT, Number::New(isolate, 36));
+  }
+
+  // Check that caching does respect scopes.
+  {
+    SimpleContext context;
+    const char* script1 = "(function(){ return y1 })()";
+    const char* script2 = "(function(){ return y2 })()";
+
+    context.Check(script1, EXPECT_EXCEPTION);
+    context.Check("this.y1 = 1; this.y2 = 2; 0;",
+                  EXPECT_RESULT, Number::New(isolate, 0));
+    context.Check(script1,
+                  EXPECT_RESULT, Number::New(isolate, 1));
+    context.Check("'use strict'; let y1 = 3; 0;",
+                  EXPECT_RESULT, Number::New(isolate, 0));
+    context.Check(script1,
+                  EXPECT_RESULT, Number::New(isolate, 3));
+    context.Check("y1 = 4;",
+                  EXPECT_RESULT, Number::New(isolate, 4));
+    context.Check(script1,
+                  EXPECT_RESULT, Number::New(isolate, 4));
+
+    context.Check(script2,
+                  EXPECT_RESULT, Number::New(isolate, 2));
+    context.Check("'use strict'; let y2 = 5; 0;",
+                  EXPECT_RESULT, Number::New(isolate, 0));
+    context.Check(script1,
+                  EXPECT_RESULT, Number::New(isolate, 4));
+    context.Check(script2,
+                  EXPECT_RESULT, Number::New(isolate, 5));
   }
 }
 
 
+TEST(GlobalLexicalOSR) {
+  i::FLAG_use_strict = true;
+  i::FLAG_harmony_scoping = true;
+  i::FLAG_harmony_modules = true;
+
+  v8::Isolate* isolate = CcTest::isolate();
+  HandleScope scope(isolate);
+  SimpleContext context;
+
+  context.Check("'use strict';"
+                "let x = 1; x;",
+                EXPECT_RESULT, Number::New(isolate, 1));
+  context.Check("'use strict';"
+                "let y = 2*x;"
+                "++x;"
+                "let z = 0;"
+                "const limit = 100000;"
+                "for (var i = 0; i < limit; ++i) {"
+                "  z += x + y;"
+                "}"
+                "z;",
+                EXPECT_RESULT, Number::New(isolate, 400000));
+}
+
+
 TEST(CrossScriptConflicts) {
   i::FLAG_use_strict = true;
   i::FLAG_harmony_scoping = true;
@@ -704,12 +874,12 @@ TEST(CrossScriptConflicts) {
       SimpleContext context;
       context.Check(firsts[i], EXPECT_RESULT,
                     Number::New(CcTest::isolate(), 1));
-      // TODO(rossberg): All tests should actually be errors in Harmony,
-      // but we currently do not detect the cases where the first declaration
-      // is not lexical.
-      context.Check(seconds[j],
-                    i < 2 ? EXPECT_RESULT : EXPECT_ERROR,
-                    Number::New(CcTest::isolate(), 2));
+      bool success_case = i < 2 && j < 2;
+      Local<Value> success_result;
+      if (success_case) success_result = Number::New(CcTest::isolate(), 2);
+
+      context.Check(seconds[j], success_case ? EXPECT_RESULT : EXPECT_EXCEPTION,
+                    success_result);
     }
   }
 }
@@ -740,10 +910,244 @@ TEST(CrossScriptDynamicLookup) {
         EXPECT_RESULT, Number::New(CcTest::isolate(), 1));
     context.Check(
         "'use strict';"
-        "g({});"
+        "g({});0",
+        EXPECT_RESULT, Number::New(CcTest::isolate(), 0));
+    context.Check("f({})", EXPECT_RESULT, Number::New(CcTest::isolate(), 15));
+    context.Check("h({})", EXPECT_RESULT, number_string);
+  }
+}
+
+
+TEST(CrossScriptGlobal) {
+  i::FLAG_harmony_scoping = true;
+
+  HandleScope handle_scope(CcTest::isolate());
+  {
+    SimpleContext context;
+
+    context.Check(
+        "var global = this;"
+        "global.x = 255;"
         "x",
+        EXPECT_RESULT, Number::New(CcTest::isolate(), 255));
+    context.Check(
+        "'use strict';"
+        "let x = 1;"
+        "global.x",
+        EXPECT_RESULT, Number::New(CcTest::isolate(), 255));
+    context.Check("global.x = 15; x", EXPECT_RESULT,
+                  Number::New(CcTest::isolate(), 1));
+    context.Check("x = 221; global.x", EXPECT_RESULT,
+                  Number::New(CcTest::isolate(), 15));
+    context.Check(
+        "z = 15;"
+        "function f() { return z; };"
+        "for (var k = 0; k < 3; k++) { f(); }"
+        "f()",
         EXPECT_RESULT, Number::New(CcTest::isolate(), 15));
+    context.Check(
+        "'use strict';"
+        "let z = 5; f()",
+        EXPECT_RESULT, Number::New(CcTest::isolate(), 5));
+    context.Check(
+        "function f() { konst = 10; return konst; };"
+        "f()",
+        EXPECT_RESULT, Number::New(CcTest::isolate(), 10));
+    context.Check(
+        "'use strict';"
+        "const konst = 255;"
+        "f()",
+        EXPECT_EXCEPTION);
+  }
+}
+
+
+TEST(CrossScriptStaticLookupUndeclared) {
+  i::FLAG_harmony_scoping = true;
+
+  HandleScope handle_scope(CcTest::isolate());
+
+  {
+    SimpleContext context;
+    Local<String> undefined_string = String::NewFromUtf8(
+        CcTest::isolate(), "undefined", String::kInternalizedString);
+    Local<String> number_string = String::NewFromUtf8(
+        CcTest::isolate(), "number", String::kInternalizedString);
+
+    context.Check(
+        "function f(o) { return x; }"
+        "function g(v) { x = v; }"
+        "function h(o) { return typeof x; }",
+        EXPECT_RESULT, Undefined(CcTest::isolate()));
+    context.Check("h({})", EXPECT_RESULT, undefined_string);
+    context.Check(
+        "'use strict';"
+        "let x = 1;"
+        "f({})",
+        EXPECT_RESULT, Number::New(CcTest::isolate(), 1));
+    context.Check(
+        "'use strict';"
+        "g(15);x",
+        EXPECT_RESULT, Number::New(CcTest::isolate(), 15));
+    context.Check("h({})", EXPECT_RESULT, number_string);
     context.Check("f({})", EXPECT_RESULT, Number::New(CcTest::isolate(), 15));
     context.Check("h({})", EXPECT_RESULT, number_string);
   }
 }
+
+
+TEST(CrossScriptLoadICs) {
+  i::FLAG_harmony_scoping = true;
+  i::FLAG_allow_natives_syntax = true;
+
+  HandleScope handle_scope(CcTest::isolate());
+
+  {
+    SimpleContext context;
+    context.Check(
+        "x = 15;"
+        "function f() { return x; }"
+        "function g() { return x; }"
+        "f()",
+        EXPECT_RESULT, Number::New(CcTest::isolate(), 15));
+    context.Check(
+        "'use strict';"
+        "let x = 5;"
+        "f()",
+        EXPECT_RESULT, Number::New(CcTest::isolate(), 5));
+    for (int k = 0; k < 3; k++) {
+      context.Check("g()", EXPECT_RESULT, Number::New(CcTest::isolate(), 5));
+    }
+    for (int k = 0; k < 3; k++) {
+      context.Check("f()", EXPECT_RESULT, Number::New(CcTest::isolate(), 5));
+    }
+    context.Check("%OptimizeFunctionOnNextCall(g); g()", EXPECT_RESULT,
+                  Number::New(CcTest::isolate(), 5));
+    context.Check("%OptimizeFunctionOnNextCall(f); f()", EXPECT_RESULT,
+                  Number::New(CcTest::isolate(), 5));
+  }
+  {
+    SimpleContext context;
+    context.Check(
+        "x = 15;"
+        "function f() { return x; }"
+        "f()",
+        EXPECT_RESULT, Number::New(CcTest::isolate(), 15));
+    for (int k = 0; k < 3; k++) {
+      context.Check("f()", EXPECT_RESULT, Number::New(CcTest::isolate(), 15));
+    }
+    context.Check("%OptimizeFunctionOnNextCall(f); f()", EXPECT_RESULT,
+                  Number::New(CcTest::isolate(), 15));
+    context.Check(
+        "'use strict';"
+        "let x = 5;"
+        "f()",
+        EXPECT_RESULT, Number::New(CcTest::isolate(), 5));
+    for (int k = 0; k < 3; k++) {
+      context.Check("f()", EXPECT_RESULT, Number::New(CcTest::isolate(), 5));
+    }
+    context.Check("%OptimizeFunctionOnNextCall(f); f()", EXPECT_RESULT,
+                  Number::New(CcTest::isolate(), 5));
+  }
+}
+
+
+TEST(CrossScriptStoreICs) {
+  i::FLAG_harmony_scoping = true;
+  i::FLAG_allow_natives_syntax = true;
+
+  HandleScope handle_scope(CcTest::isolate());
+
+  {
+    SimpleContext context;
+    context.Check(
+        "var global = this;"
+        "x = 15;"
+        "function f(v) { x = v; }"
+        "function g(v) { x = v; }"
+        "f(10); x",
+        EXPECT_RESULT, Number::New(CcTest::isolate(), 10));
+    context.Check(
+        "'use strict';"
+        "let x = 5;"
+        "f(7); x",
+        EXPECT_RESULT, Number::New(CcTest::isolate(), 7));
+    context.Check("global.x", EXPECT_RESULT,
+                  Number::New(CcTest::isolate(), 10));
+    for (int k = 0; k < 3; k++) {
+      context.Check("g(31); x", EXPECT_RESULT,
+                    Number::New(CcTest::isolate(), 31));
+    }
+    context.Check("global.x", EXPECT_RESULT,
+                  Number::New(CcTest::isolate(), 10));
+    for (int k = 0; k < 3; k++) {
+      context.Check("f(32); x", EXPECT_RESULT,
+                    Number::New(CcTest::isolate(), 32));
+    }
+    context.Check("global.x", EXPECT_RESULT,
+                  Number::New(CcTest::isolate(), 10));
+    context.Check("%OptimizeFunctionOnNextCall(g); g(18); x", EXPECT_RESULT,
+                  Number::New(CcTest::isolate(), 18));
+    context.Check("global.x", EXPECT_RESULT,
+                  Number::New(CcTest::isolate(), 10));
+    context.Check("%OptimizeFunctionOnNextCall(f); f(33); x", EXPECT_RESULT,
+                  Number::New(CcTest::isolate(), 33));
+    context.Check("global.x", EXPECT_RESULT,
+                  Number::New(CcTest::isolate(), 10));
+  }
+  {
+    SimpleContext context;
+    context.Check(
+        "var global = this;"
+        "x = 15;"
+        "function f(v) { x = v; }"
+        "f(10); x",
+        EXPECT_RESULT, Number::New(CcTest::isolate(), 10));
+    for (int k = 0; k < 3; k++) {
+      context.Check("f(18); x", EXPECT_RESULT,
+                    Number::New(CcTest::isolate(), 18));
+    }
+    context.Check("%OptimizeFunctionOnNextCall(f); f(20); x", EXPECT_RESULT,
+                  Number::New(CcTest::isolate(), 20));
+    context.Check(
+        "'use strict';"
+        "let x = 5;"
+        "f(8); x",
+        EXPECT_RESULT, Number::New(CcTest::isolate(), 8));
+    context.Check("global.x", EXPECT_RESULT,
+                  Number::New(CcTest::isolate(), 20));
+    for (int k = 0; k < 3; k++) {
+      context.Check("f(13); x", EXPECT_RESULT,
+                    Number::New(CcTest::isolate(), 13));
+    }
+    context.Check("global.x", EXPECT_RESULT,
+                  Number::New(CcTest::isolate(), 20));
+    context.Check("%OptimizeFunctionOnNextCall(f); f(41); x", EXPECT_RESULT,
+                  Number::New(CcTest::isolate(), 41));
+    context.Check("global.x", EXPECT_RESULT,
+                  Number::New(CcTest::isolate(), 20));
+  }
+}
+
+
+TEST(CrossScriptAssignmentToConst) {
+  i::FLAG_harmony_scoping = true;
+  i::FLAG_allow_natives_syntax = true;
+
+  HandleScope handle_scope(CcTest::isolate());
+
+  {
+    SimpleContext context;
+
+    context.Check("function f() { x = 27; }", EXPECT_RESULT,
+                  Undefined(CcTest::isolate()));
+    context.Check("'use strict';const x = 1; x", EXPECT_RESULT,
+                  Number::New(CcTest::isolate(), 1));
+    context.Check("f();", EXPECT_EXCEPTION);
+    context.Check("x", EXPECT_RESULT, Number::New(CcTest::isolate(), 1));
+    context.Check("f();", EXPECT_EXCEPTION);
+    context.Check("x", EXPECT_RESULT, Number::New(CcTest::isolate(), 1));
+    context.Check("%OptimizeFunctionOnNextCall(f);f();", EXPECT_EXCEPTION);
+    context.Check("x", EXPECT_RESULT, Number::New(CcTest::isolate(), 1));
+  }
+}
index 39356b1..095c636 100644 (file)
@@ -410,14 +410,32 @@ TEST(Type3) {
             "e6843895       pkhbt r3, r4, r5, lsl #17");
     COMPARE(pkhtb(r3, r4, Operand(r5, ASR, 17)),
             "e68438d5       pkhtb r3, r4, r5, asr #17");
-    COMPARE(uxtb(r9, Operand(r10, ROR, 0)),
-            "e6ef907a       uxtb r9, r10");
-    COMPARE(uxtb(r3, Operand(r4, ROR, 8)),
-            "e6ef3474       uxtb r3, r4, ror #8");
-    COMPARE(uxtab(r3, r4, Operand(r5, ROR, 8)),
-            "e6e43475       uxtab r3, r4, r5, ror #8");
-    COMPARE(uxtb16(r3, Operand(r4, ROR, 8)),
-            "e6cf3474       uxtb16 r3, r4, ror #8");
+
+    COMPARE(sxtb(r1, r7, 0, eq), "06af1077       sxtbeq r1, r7");
+    COMPARE(sxtb(r0, r0, 8, ne), "16af0470       sxtbne r0, r0, ror #8");
+    COMPARE(sxtb(r9, r10, 16), "e6af987a       sxtb r9, r10, ror #16");
+    COMPARE(sxtb(r4, r3, 24), "e6af4c73       sxtb r4, r3, ror #24");
+
+    COMPARE(sxtab(r3, r4, r5), "e6a43075       sxtab r3, r4, r5");
+
+    COMPARE(sxth(r5, r0), "e6bf5070       sxth r5, r0");
+    COMPARE(sxth(r5, r9, 8), "e6bf5479       sxth r5, r9, ror #8");
+    COMPARE(sxth(r5, r9, 16, hi), "86bf5879       sxthhi r5, r9, ror #16");
+    COMPARE(sxth(r8, r9, 24, cc), "36bf8c79       sxthcc r8, r9, ror #24");
+
+    COMPARE(sxtah(r3, r4, r5, 16), "e6b43875       sxtah r3, r4, r5, ror #16");
+
+    COMPARE(uxtb(r9, r10), "e6ef907a       uxtb r9, r10");
+    COMPARE(uxtb(r3, r4, 8), "e6ef3474       uxtb r3, r4, ror #8");
+
+    COMPARE(uxtab(r3, r4, r5, 8), "e6e43475       uxtab r3, r4, r5, ror #8");
+
+    COMPARE(uxtb16(r3, r4, 8), "e6cf3474       uxtb16 r3, r4, ror #8");
+
+    COMPARE(uxth(r9, r10), "e6ff907a       uxth r9, r10");
+    COMPARE(uxth(r3, r4, 8), "e6ff3474       uxth r3, r4, ror #8");
+
+    COMPARE(uxtah(r3, r4, r5, 24), "e6f43c75       uxtah r3, r4, r5, ror #24");
   }
 
   COMPARE(smmla(r0, r1, r2, r3), "e7503211       smmla r0, r1, r2, r3");
index 755c9d5..a2eaa15 100644 (file)
@@ -51,7 +51,7 @@ TEST(DisasmIa320) {
   CcTest::InitializeVM();
   Isolate* isolate = CcTest::i_isolate();
   HandleScope scope(isolate);
-  v8::internal::byte buffer[2048];
+  v8::internal::byte buffer[4096];
   Assembler assm(isolate, buffer, sizeof buffer);
   DummyStaticFunction(NULL);  // just bloody use it (DELETE; debugging)
 
@@ -401,6 +401,14 @@ TEST(DisasmIa320) {
     __ xorps(xmm0, Operand(ebx, ecx, times_4, 10000));
 
     // Arithmetic operation
+    __ addss(xmm1, xmm0);
+    __ addss(xmm1, Operand(ebx, ecx, times_4, 10000));
+    __ mulss(xmm1, xmm0);
+    __ mulss(xmm1, Operand(ebx, ecx, times_4, 10000));
+    __ subss(xmm1, xmm0);
+    __ subss(xmm1, Operand(ebx, ecx, times_4, 10000));
+    __ divss(xmm1, xmm0);
+    __ divss(xmm1, Operand(ebx, ecx, times_4, 10000));
     __ addps(xmm1, xmm0);
     __ addps(xmm1, Operand(ebx, ecx, times_4, 10000));
     __ subps(xmm1, xmm0);
@@ -409,6 +417,9 @@ TEST(DisasmIa320) {
     __ mulps(xmm1, Operand(ebx, ecx, times_4, 10000));
     __ divps(xmm1, xmm0);
     __ divps(xmm1, Operand(ebx, ecx, times_4, 10000));
+
+    __ ucomiss(xmm0, xmm1);
+    __ ucomiss(xmm0, Operand(ebx, ecx, times_4, 10000));
   }
   {
     __ cvttss2si(edx, Operand(ebx, ecx, times_4, 10000));
@@ -471,6 +482,83 @@ TEST(DisasmIa320) {
     }
   }
 
+  // AVX instruction
+  {
+    if (CpuFeatures::IsSupported(AVX)) {
+      CpuFeatureScope scope(&assm, AVX);
+      __ vaddsd(xmm0, xmm1, xmm2);
+      __ vaddsd(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
+      __ vmulsd(xmm0, xmm1, xmm2);
+      __ vmulsd(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
+      __ vsubsd(xmm0, xmm1, xmm2);
+      __ vsubsd(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
+      __ vdivsd(xmm0, xmm1, xmm2);
+      __ vdivsd(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
+    }
+  }
+
+  // FMA3 instruction
+  {
+    if (CpuFeatures::IsSupported(FMA3)) {
+      CpuFeatureScope scope(&assm, FMA3);
+      __ vfmadd132sd(xmm0, xmm1, xmm2);
+      __ vfmadd132sd(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
+      __ vfmadd213sd(xmm0, xmm1, xmm2);
+      __ vfmadd213sd(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
+      __ vfmadd231sd(xmm0, xmm1, xmm2);
+      __ vfmadd231sd(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
+
+      __ vfmsub132sd(xmm0, xmm1, xmm2);
+      __ vfmsub132sd(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
+      __ vfmsub213sd(xmm0, xmm1, xmm2);
+      __ vfmsub213sd(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
+      __ vfmsub231sd(xmm0, xmm1, xmm2);
+      __ vfmsub231sd(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
+
+      __ vfnmadd132sd(xmm0, xmm1, xmm2);
+      __ vfnmadd132sd(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
+      __ vfnmadd213sd(xmm0, xmm1, xmm2);
+      __ vfnmadd213sd(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
+      __ vfnmadd231sd(xmm0, xmm1, xmm2);
+      __ vfnmadd231sd(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
+
+      __ vfnmsub132sd(xmm0, xmm1, xmm2);
+      __ vfnmsub132sd(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
+      __ vfnmsub213sd(xmm0, xmm1, xmm2);
+      __ vfnmsub213sd(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
+      __ vfnmsub231sd(xmm0, xmm1, xmm2);
+      __ vfnmsub231sd(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
+
+      __ vfmadd132ss(xmm0, xmm1, xmm2);
+      __ vfmadd132ss(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
+      __ vfmadd213ss(xmm0, xmm1, xmm2);
+      __ vfmadd213ss(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
+      __ vfmadd231ss(xmm0, xmm1, xmm2);
+      __ vfmadd231ss(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
+
+      __ vfmsub132ss(xmm0, xmm1, xmm2);
+      __ vfmsub132ss(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
+      __ vfmsub213ss(xmm0, xmm1, xmm2);
+      __ vfmsub213ss(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
+      __ vfmsub231ss(xmm0, xmm1, xmm2);
+      __ vfmsub231ss(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
+
+      __ vfnmadd132ss(xmm0, xmm1, xmm2);
+      __ vfnmadd132ss(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
+      __ vfnmadd213ss(xmm0, xmm1, xmm2);
+      __ vfnmadd213ss(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
+      __ vfnmadd231ss(xmm0, xmm1, xmm2);
+      __ vfnmadd231ss(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
+
+      __ vfnmsub132ss(xmm0, xmm1, xmm2);
+      __ vfnmsub132ss(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
+      __ vfnmsub213ss(xmm0, xmm1, xmm2);
+      __ vfnmsub213ss(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
+      __ vfnmsub231ss(xmm0, xmm1, xmm2);
+      __ vfnmsub231ss(xmm0, xmm1, Operand(ebx, ecx, times_4, 10000));
+    }
+  }
+
   // xchg.
   {
     __ xchg(eax, eax);
index fac9226..6cd58ec 100644 (file)
@@ -51,7 +51,7 @@ TEST(DisasmX64) {
   CcTest::InitializeVM();
   Isolate* isolate = CcTest::i_isolate();
   HandleScope scope(isolate);
-  v8::internal::byte buffer[2048];
+  v8::internal::byte buffer[4096];
   Assembler assm(isolate, buffer, sizeof buffer);
   DummyStaticFunction(NULL);  // just bloody use it (DELETE; debugging)
 
@@ -394,6 +394,14 @@ TEST(DisasmX64) {
     __ xorps(xmm0, Operand(rbx, rcx, times_4, 10000));
 
     // Arithmetic operation
+    __ addss(xmm1, xmm0);
+    __ addss(xmm1, Operand(rbx, rcx, times_4, 10000));
+    __ mulss(xmm1, xmm0);
+    __ mulss(xmm1, Operand(rbx, rcx, times_4, 10000));
+    __ subss(xmm1, xmm0);
+    __ subss(xmm1, Operand(rbx, rcx, times_4, 10000));
+    __ divss(xmm1, xmm0);
+    __ divss(xmm1, Operand(rbx, rcx, times_4, 10000));
     __ addps(xmm1, xmm0);
     __ addps(xmm1, Operand(rbx, rcx, times_4, 10000));
     __ subps(xmm1, xmm0);
@@ -402,6 +410,9 @@ TEST(DisasmX64) {
     __ mulps(xmm1, Operand(rbx, rcx, times_4, 10000));
     __ divps(xmm1, xmm0);
     __ divps(xmm1, Operand(rbx, rcx, times_4, 10000));
+
+    __ ucomiss(xmm0, xmm1);
+    __ ucomiss(xmm0, Operand(rbx, rcx, times_4, 10000));
   }
   // SSE 2 instructions
   {
@@ -464,6 +475,89 @@ TEST(DisasmX64) {
     }
   }
 
+  // AVX instruction
+  {
+    if (CpuFeatures::IsSupported(AVX)) {
+      CpuFeatureScope scope(&assm, AVX);
+      __ vaddsd(xmm0, xmm1, xmm2);
+      __ vaddsd(xmm0, xmm1, Operand(rbx, rcx, times_4, 10000));
+      __ vmulsd(xmm0, xmm1, xmm2);
+      __ vmulsd(xmm0, xmm1, Operand(rbx, rcx, times_4, 10000));
+      __ vsubsd(xmm0, xmm1, xmm2);
+      __ vsubsd(xmm0, xmm1, Operand(rbx, rcx, times_4, 10000));
+      __ vdivsd(xmm0, xmm1, xmm2);
+      __ vdivsd(xmm0, xmm1, Operand(rbx, rcx, times_4, 10000));
+    }
+  }
+
+  // FMA3 instruction
+  {
+    if (CpuFeatures::IsSupported(FMA3)) {
+      CpuFeatureScope scope(&assm, FMA3);
+      __ vfmadd132sd(xmm0, xmm1, xmm2);
+      __ vfmadd132sd(xmm0, xmm1, Operand(rbx, rcx, times_4, 10000));
+      __ vfmadd213sd(xmm0, xmm1, xmm2);
+      __ vfmadd213sd(xmm0, xmm1, Operand(rbx, rcx, times_4, 10000));
+      __ vfmadd231sd(xmm0, xmm1, xmm2);
+      __ vfmadd231sd(xmm0, xmm1, Operand(rbx, rcx, times_4, 10000));
+
+      __ vfmadd132sd(xmm9, xmm10, xmm11);
+      __ vfmadd132sd(xmm9, xmm10, Operand(r9, r11, times_4, 10000));
+      __ vfmadd213sd(xmm9, xmm10, xmm11);
+      __ vfmadd213sd(xmm9, xmm10, Operand(r9, r11, times_4, 10000));
+      __ vfmadd231sd(xmm9, xmm10, xmm11);
+      __ vfmadd231sd(xmm9, xmm10, Operand(r9, r11, times_4, 10000));
+
+      __ vfmsub132sd(xmm0, xmm1, xmm2);
+      __ vfmsub132sd(xmm0, xmm1, Operand(rbx, rcx, times_4, 10000));
+      __ vfmsub213sd(xmm0, xmm1, xmm2);
+      __ vfmsub213sd(xmm0, xmm1, Operand(rbx, rcx, times_4, 10000));
+      __ vfmsub231sd(xmm0, xmm1, xmm2);
+      __ vfmsub231sd(xmm0, xmm1, Operand(rbx, rcx, times_4, 10000));
+
+      __ vfnmadd132sd(xmm0, xmm1, xmm2);
+      __ vfnmadd132sd(xmm0, xmm1, Operand(rbx, rcx, times_4, 10000));
+      __ vfnmadd213sd(xmm0, xmm1, xmm2);
+      __ vfnmadd213sd(xmm0, xmm1, Operand(rbx, rcx, times_4, 10000));
+      __ vfnmadd231sd(xmm0, xmm1, xmm2);
+      __ vfnmadd231sd(xmm0, xmm1, Operand(rbx, rcx, times_4, 10000));
+
+      __ vfnmsub132sd(xmm0, xmm1, xmm2);
+      __ vfnmsub132sd(xmm0, xmm1, Operand(rbx, rcx, times_4, 10000));
+      __ vfnmsub213sd(xmm0, xmm1, xmm2);
+      __ vfnmsub213sd(xmm0, xmm1, Operand(rbx, rcx, times_4, 10000));
+      __ vfnmsub231sd(xmm0, xmm1, xmm2);
+      __ vfnmsub231sd(xmm0, xmm1, Operand(rbx, rcx, times_4, 10000));
+
+      __ vfmadd132ss(xmm0, xmm1, xmm2);
+      __ vfmadd132ss(xmm0, xmm1, Operand(rbx, rcx, times_4, 10000));
+      __ vfmadd213ss(xmm0, xmm1, xmm2);
+      __ vfmadd213ss(xmm0, xmm1, Operand(rbx, rcx, times_4, 10000));
+      __ vfmadd231ss(xmm0, xmm1, xmm2);
+      __ vfmadd231ss(xmm0, xmm1, Operand(rbx, rcx, times_4, 10000));
+
+      __ vfmsub132ss(xmm0, xmm1, xmm2);
+      __ vfmsub132ss(xmm0, xmm1, Operand(rbx, rcx, times_4, 10000));
+      __ vfmsub213ss(xmm0, xmm1, xmm2);
+      __ vfmsub213ss(xmm0, xmm1, Operand(rbx, rcx, times_4, 10000));
+      __ vfmsub231ss(xmm0, xmm1, xmm2);
+      __ vfmsub231ss(xmm0, xmm1, Operand(rbx, rcx, times_4, 10000));
+
+      __ vfnmadd132ss(xmm0, xmm1, xmm2);
+      __ vfnmadd132ss(xmm0, xmm1, Operand(rbx, rcx, times_4, 10000));
+      __ vfnmadd213ss(xmm0, xmm1, xmm2);
+      __ vfnmadd213ss(xmm0, xmm1, Operand(rbx, rcx, times_4, 10000));
+      __ vfnmadd231ss(xmm0, xmm1, xmm2);
+      __ vfnmadd231ss(xmm0, xmm1, Operand(rbx, rcx, times_4, 10000));
+
+      __ vfnmsub132ss(xmm0, xmm1, xmm2);
+      __ vfnmsub132ss(xmm0, xmm1, Operand(rbx, rcx, times_4, 10000));
+      __ vfnmsub213ss(xmm0, xmm1, xmm2);
+      __ vfnmsub213ss(xmm0, xmm1, Operand(rbx, rcx, times_4, 10000));
+      __ vfnmsub231ss(xmm0, xmm1, xmm2);
+      __ vfnmsub231ss(xmm0, xmm1, Operand(rbx, rcx, times_4, 10000));
+    }
+  }
   // xchg.
   {
     __ xchgq(rax, rax);
index 28a15f2..fa2f195 100644 (file)
@@ -24,7 +24,8 @@ TEST(VectorStructure) {
   Factory* factory = isolate->factory();
 
   // Empty vectors are the empty fixed array.
-  Handle<TypeFeedbackVector> vector = factory->NewTypeFeedbackVector(0, 0);
+  FeedbackVectorSpec empty;
+  Handle<TypeFeedbackVector> vector = factory->NewTypeFeedbackVector(empty);
   CHECK(Handle<FixedArray>::cast(vector)
             .is_identical_to(factory->empty_fixed_array()));
   // Which can nonetheless be queried.
@@ -33,15 +34,24 @@ TEST(VectorStructure) {
   CHECK_EQ(0, vector->Slots());
   CHECK_EQ(0, vector->ICSlots());
 
-  vector = factory->NewTypeFeedbackVector(1, 0);
+  FeedbackVectorSpec one_slot(1, 0);
+  vector = factory->NewTypeFeedbackVector(one_slot);
   CHECK_EQ(1, vector->Slots());
   CHECK_EQ(0, vector->ICSlots());
 
-  vector = factory->NewTypeFeedbackVector(0, 1);
+  FeedbackVectorSpec one_icslot(0, 1);
+  if (FLAG_vector_ics) {
+    one_icslot.SetKind(0, Code::CALL_IC);
+  }
+  vector = factory->NewTypeFeedbackVector(one_icslot);
   CHECK_EQ(0, vector->Slots());
   CHECK_EQ(1, vector->ICSlots());
 
-  vector = factory->NewTypeFeedbackVector(3, 5);
+  FeedbackVectorSpec spec(3, 5);
+  if (FLAG_vector_ics) {
+    for (int i = 0; i < 5; i++) spec.SetKind(i, Code::CALL_IC);
+  }
+  vector = factory->NewTypeFeedbackVector(spec);
   CHECK_EQ(3, vector->Slots());
   CHECK_EQ(5, vector->ICSlots());
 
@@ -53,6 +63,7 @@ TEST(VectorStructure) {
   }
 
   int index = vector->GetIndex(FeedbackVectorSlot(0));
+
   CHECK_EQ(TypeFeedbackVector::kReservedIndexCount + metadata_length, index);
   CHECK(FeedbackVectorSlot(0) == vector->ToSlot(index));
 
@@ -78,11 +89,7 @@ TEST(VectorICMetadata) {
   Isolate* isolate = CcTest::i_isolate();
   Factory* factory = isolate->factory();
 
-  Handle<TypeFeedbackVector> vector =
-      factory->NewTypeFeedbackVector(10, 3 * 10);
-  CHECK_EQ(10, vector->Slots());
-  CHECK_EQ(3 * 10, vector->ICSlots());
-
+  FeedbackVectorSpec spec(10, 3 * 10);
   // Set metadata.
   for (int i = 0; i < 30; i++) {
     Code::Kind kind;
@@ -93,16 +100,20 @@ TEST(VectorICMetadata) {
     } else {
       kind = Code::KEYED_LOAD_IC;
     }
-    vector->SetKind(FeedbackVectorICSlot(i), kind);
+    spec.SetKind(i, kind);
   }
 
+  Handle<TypeFeedbackVector> vector = factory->NewTypeFeedbackVector(spec);
+  CHECK_EQ(10, vector->Slots());
+  CHECK_EQ(3 * 10, vector->ICSlots());
+
   // Meanwhile set some feedback values and type feedback values to
   // verify the data structure remains intact.
   vector->change_ic_with_type_info_count(100);
   vector->change_ic_generic_count(3333);
   vector->Set(FeedbackVectorSlot(0), *vector);
 
-  // Verify the metadata remains the same.
+  // Verify the metadata is correctly set up from the spec.
   for (int i = 0; i < 30; i++) {
     Code::Kind kind = vector->GetKind(FeedbackVectorICSlot(i));
     if (i % 3 == 0) {
@@ -125,7 +136,8 @@ TEST(VectorSlotClearing) {
   // We only test clearing FeedbackVectorSlots, not FeedbackVectorICSlots.
   // The reason is that FeedbackVectorICSlots need a full code environment
   // to fully test (See VectorICProfilerStatistics test below).
-  Handle<TypeFeedbackVector> vector = factory->NewTypeFeedbackVector(5, 0);
+  FeedbackVectorSpec spec(5, 0);
+  Handle<TypeFeedbackVector> vector = factory->NewTypeFeedbackVector(spec);
 
   // Fill with information
   vector->Set(FeedbackVectorSlot(0), Smi::FromInt(1));
@@ -188,7 +200,7 @@ TEST(VectorICProfilerStatistics) {
   CHECK_EQ(1, feedback_vector->ic_with_type_info_count());
   CHECK_EQ(0, feedback_vector->ic_generic_count());
 
-  int ic_slot = FLAG_vector_ics ? 1 : 0;
+  int ic_slot = 0;
   CHECK(
       feedback_vector->Get(FeedbackVectorICSlot(ic_slot))->IsAllocationSite());
   heap->CollectAllGarbage(i::Heap::kNoGCFlags);
@@ -217,7 +229,7 @@ TEST(VectorCallICStates) {
   // There should be one IC.
   Handle<TypeFeedbackVector> feedback_vector =
       Handle<TypeFeedbackVector>(f->shared()->feedback_vector(), isolate);
-  FeedbackVectorICSlot slot(FLAG_vector_ics ? 1 : 0);
+  FeedbackVectorICSlot slot(0);
   CallICNexus nexus(feedback_vector, slot);
   CHECK_EQ(MONOMORPHIC, nexus.StateFromFeedback());
   // CallIC doesn't return map feedback.
@@ -239,4 +251,58 @@ TEST(VectorCallICStates) {
   heap->CollectAllGarbage(i::Heap::kNoGCFlags);
   CHECK_EQ(MONOMORPHIC, nexus.StateFromFeedback());
 }
+
+
+TEST(VectorLoadICStates) {
+  if (i::FLAG_always_opt || !i::FLAG_vector_ics) return;
+  CcTest::InitializeVM();
+  LocalContext context;
+  v8::HandleScope scope(context->GetIsolate());
+  Isolate* isolate = CcTest::i_isolate();
+  Heap* heap = isolate->heap();
+
+  // Make sure function f has a call that uses a type feedback slot.
+  CompileRun(
+      "var o = { foo: 3 };"
+      "function f(a) { return a.foo; } f(o);");
+  Handle<JSFunction> f = v8::Utils::OpenHandle(
+      *v8::Handle<v8::Function>::Cast(CcTest::global()->Get(v8_str("f"))));
+  // There should be one IC.
+  Handle<TypeFeedbackVector> feedback_vector =
+      Handle<TypeFeedbackVector>(f->shared()->feedback_vector(), isolate);
+  FeedbackVectorICSlot slot(0);
+  LoadICNexus nexus(feedback_vector, slot);
+  CHECK_EQ(PREMONOMORPHIC, nexus.StateFromFeedback());
+
+  CompileRun("f(o)");
+  CHECK_EQ(MONOMORPHIC, nexus.StateFromFeedback());
+  // Verify that the monomorphic map is the one we expect.
+  Handle<JSObject> o = v8::Utils::OpenHandle(
+      *v8::Handle<v8::Object>::Cast(CcTest::global()->Get(v8_str("o"))));
+  CHECK_EQ(o->map(), nexus.FindFirstMap());
+
+  // Now go polymorphic.
+  CompileRun("f({ blarg: 3, foo: 2 })");
+  CHECK_EQ(POLYMORPHIC, nexus.StateFromFeedback());
+
+  CompileRun(
+      "delete o.foo;"
+      "f(o)");
+  CHECK_EQ(POLYMORPHIC, nexus.StateFromFeedback());
+
+  CompileRun("f({ blarg: 3, torino: 10, foo: 2 })");
+  CHECK_EQ(POLYMORPHIC, nexus.StateFromFeedback());
+  MapHandleList maps;
+  nexus.FindAllMaps(&maps);
+  CHECK_EQ(4, maps.length());
+
+  // Finally driven megamorphic.
+  CompileRun("f({ blarg: 3, gran: 3, torino: 10, foo: 2 })");
+  CHECK_EQ(MEGAMORPHIC, nexus.StateFromFeedback());
+  CHECK_EQ(NULL, nexus.FindFirstMap());
+
+  // After a collection, state should not be reset to PREMONOMORPHIC.
+  heap->CollectAllGarbage(i::Heap::kNoGCFlags);
+  CHECK_EQ(MEGAMORPHIC, nexus.StateFromFeedback());
+}
 }
index 8f9b484..94a5be4 100644 (file)
@@ -890,9 +890,10 @@ class OneByteResource : public v8::String::ExternalOneByteStringResource {
 }  // namespace
 
 TEST(HeapSnapshotJSONSerialization) {
+  v8::Isolate* isolate = CcTest::isolate();
   LocalContext env;
-  v8::HandleScope scope(env->GetIsolate());
-  v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
+  v8::HandleScope scope(isolate);
+  v8::HeapProfiler* heap_profiler = isolate->GetHeapProfiler();
 
 #define STRING_LITERAL_FOR_TEST \
   "\"String \\n\\r\\u0008\\u0081\\u0101\\u0801\\u8001\""
@@ -923,7 +924,7 @@ TEST(HeapSnapshotJSONSerialization) {
 
   // Verify that snapshot object has required fields.
   v8::Local<v8::Object> parsed_snapshot =
-      env->Global()->Get(v8_str("parsed"))->ToObject();
+      env->Global()->Get(v8_str("parsed"))->ToObject(isolate);
   CHECK(parsed_snapshot->Has(v8_str("snapshot")));
   CHECK(parsed_snapshot->Has(v8_str("nodes")));
   CHECK(parsed_snapshot->Has(v8_str("edges")));
@@ -979,17 +980,18 @@ TEST(HeapSnapshotJSONSerialization) {
       "  \"s\", property_type)");
   CHECK(!string_obj_pos_val.IsEmpty());
   int string_obj_pos =
-      static_cast<int>(string_obj_pos_val->ToNumber()->Value());
+      static_cast<int>(string_obj_pos_val->ToNumber(isolate)->Value());
   v8::Local<v8::Object> nodes_array =
-      parsed_snapshot->Get(v8_str("nodes"))->ToObject();
+      parsed_snapshot->Get(v8_str("nodes"))->ToObject(isolate);
   int string_index = static_cast<int>(
-      nodes_array->Get(string_obj_pos + 1)->ToNumber()->Value());
+      nodes_array->Get(string_obj_pos + 1)->ToNumber(isolate)->Value());
   CHECK_GT(string_index, 0);
   v8::Local<v8::Object> strings_array =
-      parsed_snapshot->Get(v8_str("strings"))->ToObject();
-  v8::Local<v8::String> string = strings_array->Get(string_index)->ToString();
+      parsed_snapshot->Get(v8_str("strings"))->ToObject(isolate);
+  v8::Local<v8::String> string =
+      strings_array->Get(string_index)->ToString(isolate);
   v8::Local<v8::String> ref_string =
-      CompileRun(STRING_LITERAL_FOR_TEST)->ToString();
+      CompileRun(STRING_LITERAL_FOR_TEST)->ToString(isolate);
 #undef STRING_LITERAL_FOR_TEST
   CHECK_EQ(*v8::String::Utf8Value(ref_string),
            *v8::String::Utf8Value(string));
@@ -1719,9 +1721,6 @@ TEST(GlobalObjectFields) {
   const v8::HeapGraphNode* native_context =
       GetProperty(global, v8::HeapGraphEdge::kInternal, "native_context");
   CHECK_NE(NULL, native_context);
-  const v8::HeapGraphNode* global_context =
-      GetProperty(global, v8::HeapGraphEdge::kInternal, "global_context");
-  CHECK_NE(NULL, global_context);
   const v8::HeapGraphNode* global_proxy =
       GetProperty(global, v8::HeapGraphEdge::kInternal, "global_proxy");
   CHECK_NE(NULL, global_proxy);
@@ -1917,6 +1916,47 @@ TEST(FastCaseAccessors) {
 }
 
 
+TEST(FastCaseRedefinedAccessors) {
+  LocalContext env;
+  v8::HandleScope scope(env->GetIsolate());
+  v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
+
+  CompileRun(
+      "var obj1 = {};\n"
+      "Object.defineProperty(obj1, 'prop', { "
+      "  get: function() { return 42; },\n"
+      "  set: function(value) { return this.prop_ = value; },\n"
+      "  configurable: true,\n"
+      "  enumerable: true,\n"
+      "});\n"
+      "Object.defineProperty(obj1, 'prop', { "
+      "  get: function() { return 153; },\n"
+      "  set: function(value) { return this.prop_ = value; },\n"
+      "  configurable: true,\n"
+      "  enumerable: true,\n"
+      "});\n");
+  v8::Local<v8::Object> js_global =
+      env->Global()->GetPrototype().As<v8::Object>();
+  i::Handle<i::JSObject> js_obj1 =
+      v8::Utils::OpenHandle(*js_global->Get(v8_str("obj1")).As<v8::Object>());
+  USE(js_obj1);
+
+  const v8::HeapSnapshot* snapshot =
+      heap_profiler->TakeHeapSnapshot(v8_str("fastCaseAccessors"));
+  CHECK(ValidateSnapshot(snapshot));
+  const v8::HeapGraphNode* global = GetGlobalObject(snapshot);
+  CHECK_NE(NULL, global);
+  const v8::HeapGraphNode* obj1 =
+      GetProperty(global, v8::HeapGraphEdge::kProperty, "obj1");
+  CHECK_NE(NULL, obj1);
+  const v8::HeapGraphNode* func;
+  func = GetProperty(obj1, v8::HeapGraphEdge::kProperty, "get prop");
+  CHECK_NE(NULL, func);
+  func = GetProperty(obj1, v8::HeapGraphEdge::kProperty, "set prop");
+  CHECK_NE(NULL, func);
+}
+
+
 TEST(SlowCaseAccessors) {
   LocalContext env;
   v8::HandleScope scope(env->GetIsolate());
@@ -1952,9 +1992,10 @@ TEST(SlowCaseAccessors) {
 
 
 TEST(HiddenPropertiesFastCase) {
+  v8::Isolate* isolate = CcTest::isolate();
   LocalContext env;
-  v8::HandleScope scope(env->GetIsolate());
-  v8::HeapProfiler* heap_profiler = env->GetIsolate()->GetHeapProfiler();
+  v8::HandleScope scope(isolate);
+  v8::HeapProfiler* heap_profiler = isolate->GetHeapProfiler();
 
   CompileRun(
       "function C(x) { this.a = this; this.b = x; }\n"
@@ -1973,7 +2014,7 @@ TEST(HiddenPropertiesFastCase) {
   v8::Handle<v8::Value> cHandle =
       env->Global()->Get(v8::String::NewFromUtf8(env->GetIsolate(), "c"));
   CHECK(!cHandle.IsEmpty() && cHandle->IsObject());
-  cHandle->ToObject()->SetHiddenValue(v8_str("key"), v8_str("val"));
+  cHandle->ToObject(isolate)->SetHiddenValue(v8_str("key"), v8_str("val"));
 
   snapshot = heap_profiler->TakeHeapSnapshot(
       v8_str("HiddenPropertiesFastCase2"));
index 543a89d..f8a7df2 100644 (file)
@@ -1579,7 +1579,7 @@ TEST(TestInternalWeakLists) {
   }
 
   // Force compilation cache cleanup.
-  CcTest::heap()->NotifyContextDisposed();
+  CcTest::heap()->NotifyContextDisposed(true);
   CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
 
   // Dispose the native contexts one by one.
@@ -1694,6 +1694,64 @@ TEST(TestInternalWeakListsTraverseWithGC) {
 }
 
 
+TEST(TestSizeOfRegExpCode) {
+  if (!FLAG_regexp_optimization) return;
+
+  v8::V8::Initialize();
+
+  Isolate* isolate = CcTest::i_isolate();
+  HandleScope scope(isolate);
+
+  LocalContext context;
+
+  // Adjust source below and this check to match
+  // RegExpImple::kRegExpTooLargeToOptimize.
+  DCHECK_EQ(i::RegExpImpl::kRegExpTooLargeToOptimize, 10 * KB);
+
+  // Compile a regexp that is much larger if we are using regexp optimizations.
+  CompileRun(
+      "var reg_exp_source = '(?:a|bc|def|ghij|klmno|pqrstu)';"
+      "var half_size_reg_exp;"
+      "while (reg_exp_source.length < 10 * 1024) {"
+      "  half_size_reg_exp = reg_exp_source;"
+      "  reg_exp_source = reg_exp_source + reg_exp_source;"
+      "}"
+      // Flatten string.
+      "reg_exp_source.match(/f/);");
+
+  // Get initial heap size after several full GCs, which will stabilize
+  // the heap size and return with sweeping finished completely.
+  CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
+  CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
+  CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
+  CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
+  CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
+  MarkCompactCollector* collector = CcTest::heap()->mark_compact_collector();
+  if (collector->sweeping_in_progress()) {
+    collector->EnsureSweepingCompleted();
+  }
+  int initial_size = static_cast<int>(CcTest::heap()->SizeOfObjects());
+
+  CompileRun("'foo'.match(reg_exp_source);");
+  CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
+  int size_with_regexp = static_cast<int>(CcTest::heap()->SizeOfObjects());
+
+  CompileRun("'foo'.match(half_size_reg_exp);");
+  CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
+  int size_with_optimized_regexp =
+      static_cast<int>(CcTest::heap()->SizeOfObjects());
+
+  int size_of_regexp_code = size_with_regexp - initial_size;
+
+  CHECK_LE(size_of_regexp_code, 1 * MB);
+
+  // Small regexp is half the size, but compiles to more than twice the code
+  // due to the optimization steps.
+  CHECK_GE(size_with_optimized_regexp,
+           size_with_regexp + size_of_regexp_code * 2);
+}
+
+
 TEST(TestSizeOfObjects) {
   v8::V8::Initialize();
 
@@ -2338,7 +2396,8 @@ TEST(OptimizedAllocationAlwaysInNewSpace) {
       "f(1); f(2); f(3);"
       "%OptimizeFunctionOnNextCall(f);"
       "f(4);");
-  CHECK_EQ(4, res->ToObject()->GetRealNamedProperty(v8_str("x"))->Int32Value());
+  CHECK_EQ(
+      4, res.As<v8::Object>()->GetRealNamedProperty(v8_str("x"))->Int32Value());
 
   Handle<JSObject> o =
       v8::Utils::OpenHandle(*v8::Handle<v8::Object>::Cast(res));
@@ -2476,12 +2535,21 @@ TEST(OptimizedPretenuringMixedInObjectProperties) {
   FieldIndex idx1 = FieldIndex::ForPropertyIndex(o->map(), 0);
   FieldIndex idx2 = FieldIndex::ForPropertyIndex(o->map(), 1);
   CHECK(CcTest::heap()->InOldPointerSpace(o->RawFastPropertyAt(idx1)));
-  CHECK(CcTest::heap()->InOldDataSpace(o->RawFastPropertyAt(idx2)));
+  if (!o->IsUnboxedDoubleField(idx2)) {
+    CHECK(CcTest::heap()->InOldDataSpace(o->RawFastPropertyAt(idx2)));
+  } else {
+    CHECK_EQ(1.1, o->RawFastDoublePropertyAt(idx2));
+  }
 
   JSObject* inner_object =
       reinterpret_cast<JSObject*>(o->RawFastPropertyAt(idx1));
   CHECK(CcTest::heap()->InOldPointerSpace(inner_object));
-  CHECK(CcTest::heap()->InOldDataSpace(inner_object->RawFastPropertyAt(idx1)));
+  if (!inner_object->IsUnboxedDoubleField(idx1)) {
+    CHECK(
+        CcTest::heap()->InOldDataSpace(inner_object->RawFastPropertyAt(idx1)));
+  } else {
+    CHECK_EQ(2.2, inner_object->RawFastDoublePropertyAt(idx1));
+  }
   CHECK(CcTest::heap()->InOldPointerSpace(
       inner_object->RawFastPropertyAt(idx2)));
 }
@@ -3285,18 +3353,18 @@ TEST(IncrementalMarkingClearsTypeFeedbackInfo) {
 
   int expected_slots = 2;
   CHECK_EQ(expected_slots, feedback_vector->ICSlots());
-  for (int i = 0; i < expected_slots; i++) {
-    CHECK(feedback_vector->Get(FeedbackVectorICSlot(i))->IsJSFunction());
-  }
+  int slot1 = 0;
+  int slot2 = 1;
+  CHECK(feedback_vector->Get(FeedbackVectorICSlot(slot1))->IsJSFunction());
+  CHECK(feedback_vector->Get(FeedbackVectorICSlot(slot2))->IsJSFunction());
 
   SimulateIncrementalMarking(CcTest::heap());
   CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
 
-  CHECK_EQ(expected_slots, feedback_vector->ICSlots());
-  for (int i = 0; i < expected_slots; i++) {
-    CHECK_EQ(feedback_vector->Get(FeedbackVectorICSlot(i)),
-             *TypeFeedbackVector::UninitializedSentinel(CcTest::i_isolate()));
-  }
+  CHECK_EQ(feedback_vector->Get(FeedbackVectorICSlot(slot1)),
+           *TypeFeedbackVector::UninitializedSentinel(CcTest::i_isolate()));
+  CHECK_EQ(feedback_vector->Get(FeedbackVectorICSlot(slot2)),
+           *TypeFeedbackVector::UninitializedSentinel(CcTest::i_isolate()));
 }
 
 
@@ -3315,6 +3383,25 @@ static Code* FindFirstIC(Code* code, Code::Kind kind) {
 }
 
 
+static void CheckVectorIC(Handle<JSFunction> f, int ic_slot_index,
+                          InlineCacheState desired_state) {
+  Handle<TypeFeedbackVector> vector =
+      Handle<TypeFeedbackVector>(f->shared()->feedback_vector());
+  FeedbackVectorICSlot slot(ic_slot_index);
+  LoadICNexus nexus(vector, slot);
+  CHECK(nexus.StateFromFeedback() == desired_state);
+}
+
+
+static void CheckVectorICCleared(Handle<JSFunction> f, int ic_slot_index) {
+  Handle<TypeFeedbackVector> vector =
+      Handle<TypeFeedbackVector>(f->shared()->feedback_vector());
+  FeedbackVectorICSlot slot(ic_slot_index);
+  LoadICNexus nexus(vector, slot);
+  CHECK(IC::IsCleared(&nexus));
+}
+
+
 TEST(IncrementalMarkingPreservesMonomorphicIC) {
   if (i::FLAG_always_opt) return;
   CcTest::InitializeVM();
@@ -3330,13 +3417,23 @@ TEST(IncrementalMarkingPreservesMonomorphicIC) {
               CcTest::global()->Get(v8_str("f"))));
 
   Code* ic_before = FindFirstIC(f->shared()->code(), Code::LOAD_IC);
-  CHECK(ic_before->ic_state() == MONOMORPHIC);
+  if (FLAG_vector_ics) {
+    CheckVectorIC(f, 0, MONOMORPHIC);
+    CHECK(ic_before->ic_state() == DEFAULT);
+  } else {
+    CHECK(ic_before->ic_state() == MONOMORPHIC);
+  }
 
   SimulateIncrementalMarking(CcTest::heap());
   CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
 
   Code* ic_after = FindFirstIC(f->shared()->code(), Code::LOAD_IC);
-  CHECK(ic_after->ic_state() == MONOMORPHIC);
+  if (FLAG_vector_ics) {
+    CheckVectorIC(f, 0, MONOMORPHIC);
+    CHECK(ic_after->ic_state() == DEFAULT);
+  } else {
+    CHECK(ic_after->ic_state() == MONOMORPHIC);
+  }
 }
 
 
@@ -3362,7 +3459,12 @@ TEST(IncrementalMarkingClearsMonomorphicIC) {
               CcTest::global()->Get(v8_str("f"))));
 
   Code* ic_before = FindFirstIC(f->shared()->code(), Code::LOAD_IC);
-  CHECK(ic_before->ic_state() == MONOMORPHIC);
+  if (FLAG_vector_ics) {
+    CheckVectorIC(f, 0, MONOMORPHIC);
+    CHECK(ic_before->ic_state() == DEFAULT);
+  } else {
+    CHECK(ic_before->ic_state() == MONOMORPHIC);
+  }
 
   // Fire context dispose notification.
   CcTest::isolate()->ContextDisposedNotification();
@@ -3370,7 +3472,60 @@ TEST(IncrementalMarkingClearsMonomorphicIC) {
   CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
 
   Code* ic_after = FindFirstIC(f->shared()->code(), Code::LOAD_IC);
-  CHECK(IC::IsCleared(ic_after));
+  if (FLAG_vector_ics) {
+    CheckVectorICCleared(f, 0);
+    CHECK(ic_after->ic_state() == DEFAULT);
+  } else {
+    CHECK(IC::IsCleared(ic_after));
+  }
+}
+
+
+TEST(IncrementalMarkingPreservesPolymorphicIC) {
+  if (i::FLAG_always_opt) return;
+  CcTest::InitializeVM();
+  v8::HandleScope scope(CcTest::isolate());
+  v8::Local<v8::Value> obj1, obj2;
+
+  {
+    LocalContext env;
+    CompileRun("function fun() { this.x = 1; }; var obj = new fun();");
+    obj1 = env->Global()->Get(v8_str("obj"));
+  }
+
+  {
+    LocalContext env;
+    CompileRun("function fun() { this.x = 2; }; var obj = new fun();");
+    obj2 = env->Global()->Get(v8_str("obj"));
+  }
+
+  // Prepare function f that contains a polymorphic IC for objects
+  // originating from two different native contexts.
+  CcTest::global()->Set(v8_str("obj1"), obj1);
+  CcTest::global()->Set(v8_str("obj2"), obj2);
+  CompileRun("function f(o) { return o.x; } f(obj1); f(obj1); f(obj2);");
+  Handle<JSFunction> f = v8::Utils::OpenHandle(
+      *v8::Handle<v8::Function>::Cast(CcTest::global()->Get(v8_str("f"))));
+
+  Code* ic_before = FindFirstIC(f->shared()->code(), Code::LOAD_IC);
+  if (FLAG_vector_ics) {
+    CheckVectorIC(f, 0, POLYMORPHIC);
+    CHECK(ic_before->ic_state() == DEFAULT);
+  } else {
+    CHECK(ic_before->ic_state() == POLYMORPHIC);
+  }
+
+  // Fire context dispose notification.
+  SimulateIncrementalMarking(CcTest::heap());
+  CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
+
+  Code* ic_after = FindFirstIC(f->shared()->code(), Code::LOAD_IC);
+  if (FLAG_vector_ics) {
+    CheckVectorIC(f, 0, POLYMORPHIC);
+    CHECK(ic_after->ic_state() == DEFAULT);
+  } else {
+    CHECK(ic_after->ic_state() == POLYMORPHIC);
+  }
 }
 
 
@@ -3403,7 +3558,12 @@ TEST(IncrementalMarkingClearsPolymorphicIC) {
               CcTest::global()->Get(v8_str("f"))));
 
   Code* ic_before = FindFirstIC(f->shared()->code(), Code::LOAD_IC);
-  CHECK(ic_before->ic_state() == POLYMORPHIC);
+  if (FLAG_vector_ics) {
+    CheckVectorIC(f, 0, POLYMORPHIC);
+    CHECK(ic_before->ic_state() == DEFAULT);
+  } else {
+    CHECK(ic_before->ic_state() == POLYMORPHIC);
+  }
 
   // Fire context dispose notification.
   CcTest::isolate()->ContextDisposedNotification();
@@ -3411,7 +3571,12 @@ TEST(IncrementalMarkingClearsPolymorphicIC) {
   CcTest::heap()->CollectAllGarbage(Heap::kNoGCFlags);
 
   Code* ic_after = FindFirstIC(f->shared()->code(), Code::LOAD_IC);
-  CHECK(IC::IsCleared(ic_after));
+  if (FLAG_vector_ics) {
+    CheckVectorICCleared(f, 0);
+    CHECK(ic_before->ic_state() == DEFAULT);
+  } else {
+    CHECK(IC::IsCleared(ic_after));
+  }
 }
 
 
@@ -4155,8 +4320,9 @@ TEST(NoWeakHashTableLeakWithIncrementalMarking) {
                "bar%d();"
                "bar%d();"
                "bar%d();"
-               "%%OptimizeFunctionOnNextCall(bar%d);"
-               "bar%d();", i, i, i, i, i, i, i, i);
+               "%%OptimizeFwunctionOnNextCall(bar%d);"
+               "bar%d();",
+               i, i, i, i, i, i, i, i);
       CompileRun(source.start());
     }
     heap->CollectAllGarbage(i::Heap::kNoGCFlags);
@@ -4288,7 +4454,7 @@ void CheckWeakness(const char* source) {
   v8::Persistent<v8::Object> garbage;
   {
     v8::HandleScope scope(isolate);
-    garbage.Reset(isolate, CompileRun(source)->ToObject());
+    garbage.Reset(isolate, CompileRun(source)->ToObject(isolate));
   }
   weak_ic_cleared = false;
   garbage.SetWeak(static_cast<void*>(&garbage), &ClearWeakIC);
@@ -4315,6 +4481,25 @@ TEST(WeakMapInMonomorphicLoadIC) {
 }
 
 
+TEST(WeakMapInPolymorphicLoadIC) {
+  CheckWeakness(
+      "function loadIC(obj) {"
+      "  return obj.name;"
+      "}"
+      " (function() {"
+      "   var proto = {'name' : 'weak'};"
+      "   var obj = Object.create(proto);"
+      "   loadIC(obj);"
+      "   loadIC(obj);"
+      "   loadIC(obj);"
+      "   var poly = Object.create(proto);"
+      "   poly.x = true;"
+      "   loadIC(poly);"
+      "   return proto;"
+      " })();");
+}
+
+
 TEST(WeakMapInMonomorphicKeyedLoadIC) {
   CheckWeakness("function keyedLoadIC(obj, field) {"
                 "  return obj[field];"
@@ -4330,6 +4515,25 @@ TEST(WeakMapInMonomorphicKeyedLoadIC) {
 }
 
 
+TEST(WeakMapInPolymorphicKeyedLoadIC) {
+  CheckWeakness(
+      "function keyedLoadIC(obj, field) {"
+      "  return obj[field];"
+      "}"
+      " (function() {"
+      "   var proto = {'name' : 'weak'};"
+      "   var obj = Object.create(proto);"
+      "   keyedLoadIC(obj, 'name');"
+      "   keyedLoadIC(obj, 'name');"
+      "   keyedLoadIC(obj, 'name');"
+      "   var poly = Object.create(proto);"
+      "   poly.x = true;"
+      "   keyedLoadIC(poly, 'name');"
+      "   return proto;"
+      " })();");
+}
+
+
 TEST(WeakMapInMonomorphicStoreIC) {
   CheckWeakness("function storeIC(obj, value) {"
                 "  obj.name = value;"
@@ -4345,6 +4549,25 @@ TEST(WeakMapInMonomorphicStoreIC) {
 }
 
 
+TEST(WeakMapInPolymorphicStoreIC) {
+  CheckWeakness(
+      "function storeIC(obj, value) {"
+      "  obj.name = value;"
+      "}"
+      " (function() {"
+      "   var proto = {'name' : 'weak'};"
+      "   var obj = Object.create(proto);"
+      "   storeIC(obj, 'x');"
+      "   storeIC(obj, 'x');"
+      "   storeIC(obj, 'x');"
+      "   var poly = Object.create(proto);"
+      "   poly.x = true;"
+      "   storeIC(poly, 'x');"
+      "   return proto;"
+      " })();");
+}
+
+
 TEST(WeakMapInMonomorphicKeyedStoreIC) {
   CheckWeakness("function keyedStoreIC(obj, field, value) {"
                 "  obj[field] = value;"
@@ -4360,6 +4583,25 @@ TEST(WeakMapInMonomorphicKeyedStoreIC) {
 }
 
 
+TEST(WeakMapInPolymorphicKeyedStoreIC) {
+  CheckWeakness(
+      "function keyedStoreIC(obj, field, value) {"
+      "  obj[field] = value;"
+      "}"
+      " (function() {"
+      "   var proto = {'name' : 'weak'};"
+      "   var obj = Object.create(proto);"
+      "   keyedStoreIC(obj, 'x');"
+      "   keyedStoreIC(obj, 'x');"
+      "   keyedStoreIC(obj, 'x');"
+      "   var poly = Object.create(proto);"
+      "   poly.x = true;"
+      "   keyedStoreIC(poly, 'x');"
+      "   return proto;"
+      " })();");
+}
+
+
 TEST(WeakMapInMonomorphicCompareNilIC) {
   CheckWeakness("function compareNilIC(obj) {"
                 "  return obj == null;"
@@ -4375,6 +4617,94 @@ TEST(WeakMapInMonomorphicCompareNilIC) {
 }
 
 
+Handle<JSFunction> GetFunctionByName(Isolate* isolate, const char* name) {
+  Handle<String> str = isolate->factory()->InternalizeUtf8String(name);
+  Handle<Object> obj =
+      Object::GetProperty(isolate->global_object(), str).ToHandleChecked();
+  return Handle<JSFunction>::cast(obj);
+}
+
+
+void CheckIC(Code* code, Code::Kind kind, InlineCacheState state) {
+  Code* ic = FindFirstIC(code, kind);
+  CHECK(ic->is_inline_cache_stub());
+  CHECK(ic->ic_state() == state);
+}
+
+
+TEST(MonomorphicStaysMonomorphicAfterGC) {
+  if (FLAG_always_opt) return;
+  // TODO(mvstanton): vector ics need weak support!
+  if (FLAG_vector_ics) return;
+  CcTest::InitializeVM();
+  Isolate* isolate = CcTest::i_isolate();
+  Heap* heap = isolate->heap();
+  v8::HandleScope scope(CcTest::isolate());
+  CompileRun(
+      "function loadIC(obj) {"
+      "  return obj.name;"
+      "}"
+      "function testIC() {"
+      "  var proto = {'name' : 'weak'};"
+      "  var obj = Object.create(proto);"
+      "  loadIC(obj);"
+      "  loadIC(obj);"
+      "  loadIC(obj);"
+      "  return proto;"
+      "};");
+  Handle<JSFunction> loadIC = GetFunctionByName(isolate, "loadIC");
+  {
+    v8::HandleScope scope(CcTest::isolate());
+    CompileRun("(testIC())");
+  }
+  heap->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+  CheckIC(loadIC->code(), Code::LOAD_IC, MONOMORPHIC);
+  {
+    v8::HandleScope scope(CcTest::isolate());
+    CompileRun("(testIC())");
+  }
+  CheckIC(loadIC->code(), Code::LOAD_IC, MONOMORPHIC);
+}
+
+
+TEST(PolymorphicStaysPolymorphicAfterGC) {
+  if (FLAG_always_opt) return;
+  // TODO(mvstanton): vector ics need weak support!
+  if (FLAG_vector_ics) return;
+  CcTest::InitializeVM();
+  Isolate* isolate = CcTest::i_isolate();
+  Heap* heap = isolate->heap();
+  v8::HandleScope scope(CcTest::isolate());
+  CompileRun(
+      "function loadIC(obj) {"
+      "  return obj.name;"
+      "}"
+      "function testIC() {"
+      "  var proto = {'name' : 'weak'};"
+      "  var obj = Object.create(proto);"
+      "  loadIC(obj);"
+      "  loadIC(obj);"
+      "  loadIC(obj);"
+      "  var poly = Object.create(proto);"
+      "  poly.x = true;"
+      "  loadIC(poly);"
+      "  return proto;"
+      "};");
+  Handle<JSFunction> loadIC = GetFunctionByName(isolate, "loadIC");
+  {
+    v8::HandleScope scope(CcTest::isolate());
+    CompileRun("(testIC())");
+  }
+  heap->CollectAllGarbage(Heap::kAbortIncrementalMarkingMask);
+  CheckIC(loadIC->code(), Code::LOAD_IC, POLYMORPHIC);
+  {
+    v8::HandleScope scope(CcTest::isolate());
+    CompileRun("(testIC())");
+  }
+  CheckIC(loadIC->code(), Code::LOAD_IC, POLYMORPHIC);
+}
+
+
 TEST(WeakCell) {
   CcTest::InitializeVM();
   Isolate* isolate = CcTest::i_isolate();
@@ -4520,7 +4850,7 @@ TEST(Regress357137) {
   CcTest::InitializeVM();
   v8::Isolate* isolate = CcTest::isolate();
   v8::HandleScope hscope(isolate);
-  v8::Handle<v8::ObjectTemplate> global =v8::ObjectTemplate::New(isolate);
+  v8::Handle<v8::ObjectTemplate> global = v8::ObjectTemplate::New(isolate);
   global->Set(v8::String::NewFromUtf8(isolate, "interrupt"),
               v8::FunctionTemplate::New(isolate, RequestInterrupt));
   v8::Local<v8::Context> context = v8::Context::New(isolate, NULL, global);
@@ -4533,7 +4863,7 @@ TEST(Regress357137) {
       "eval('function f() {' + locals + 'return function() { return v0; }; }');"
       "interrupt();"  // This triggers a fake stack overflow in f.
       "f()()");
-  CHECK_EQ(42.0, result->ToNumber()->Value());
+  CHECK_EQ(42.0, result->ToNumber(isolate)->Value());
 }
 
 
@@ -4742,6 +5072,23 @@ TEST(Regress3631) {
 }
 
 
+TEST(Regress442710) {
+  CcTest::InitializeVM();
+  Isolate* isolate = CcTest::i_isolate();
+  Heap* heap = isolate->heap();
+  Factory* factory = isolate->factory();
+
+  HandleScope sc(isolate);
+  Handle<GlobalObject> global(CcTest::i_isolate()->context()->global_object());
+  Handle<JSArray> array = factory->NewJSArray(2);
+
+  Handle<String> name = factory->InternalizeUtf8String("testArray");
+  JSReceiver::SetProperty(global, name, array, SLOPPY).Check();
+  CompileRun("testArray[0] = 1; testArray[1] = 2; testArray.shift();");
+  heap->CollectGarbage(OLD_POINTER_SPACE);
+}
+
+
 #ifdef DEBUG
 TEST(PathTracer) {
   CcTest::InitializeVM();
index 482f89f..eee3e13 100644 (file)
@@ -42,6 +42,7 @@
 #include "src/natives.h"
 #include "src/utils.h"
 #include "src/v8threads.h"
+#include "src/version.h"
 #include "src/vm-state-inl.h"
 #include "test/cctest/cctest.h"
 
@@ -477,10 +478,9 @@ TEST(EquivalenceOfLoggingAndTraversal) {
         isolate, log.start(), v8::String::kNormalString, log.length());
     initialize_logger.env()->Global()->Set(v8_str("_log"), log_str);
 
-    i::Vector<const unsigned char> source = TestSources::GetScriptsSource();
+    i::Vector<const char> source = TestSources::GetScriptsSource();
     v8::Handle<v8::String> source_str = v8::String::NewFromUtf8(
-        isolate, reinterpret_cast<const char*>(source.start()),
-        v8::String::kNormalString, source.length());
+        isolate, source.start(), v8::String::kNormalString, source.length());
     v8::TryCatch try_catch;
     v8::Handle<v8::Script> script = CompileWithOrigin(source_str, "");
     if (script.IsEmpty()) {
@@ -496,7 +496,7 @@ TEST(EquivalenceOfLoggingAndTraversal) {
     }
     // The result either be a "true" literal or problem description.
     if (!result->IsTrue()) {
-      v8::Local<v8::String> s = result->ToString();
+      v8::Local<v8::String> s = result->ToString(isolate);
       i::ScopedVector<char> data(s->Utf8Length() + 1);
       CHECK_NE(NULL, data.start());
       s->WriteUtf8(data.start());
@@ -508,3 +508,23 @@ TEST(EquivalenceOfLoggingAndTraversal) {
   }
   isolate->Dispose();
 }
+
+
+TEST(LogVersion) {
+  v8::Isolate* isolate;
+  {
+    ScopedLoggerInitializer initialize_logger;
+    isolate = initialize_logger.isolate();
+    bool exists = false;
+    i::Vector<const char> log(
+        i::ReadFile(initialize_logger.StopLoggingGetTempFile(), &exists, true));
+    CHECK(exists);
+    i::EmbeddedVector<char, 100> ref_data;
+    i::SNPrintF(ref_data, "v8-version,%d,%d,%d,%d,%d", i::Version::GetMajor(),
+                i::Version::GetMinor(), i::Version::GetBuild(),
+                i::Version::GetPatch(), i::Version::IsCandidate());
+    CHECK_NE(NULL, StrNStr(log.start(), ref_data.start(), log.length()));
+    log.Dispose();
+  }
+  isolate->Dispose();
+}
index c7d6531..64d995d 100644 (file)
@@ -446,7 +446,7 @@ static intptr_t MemoryInUse() {
     bool write_permission = (buffer[position++] == 'w');
     CHECK(buffer[position] == '-' || buffer[position] == 'x');
     bool execute_permission = (buffer[position++] == 'x');
-    CHECK(buffer[position] == '-' || buffer[position] == 'p');
+    CHECK(buffer[position] == 's' || buffer[position] == 'p');
     bool private_mapping = (buffer[position++] == 'p');
     CHECK_EQ(buffer[position++], ' ');
     uintptr_t offset = ReadLong(buffer, &position, 16);
index 8851d88..2766a4f 100644 (file)
@@ -762,3 +762,76 @@ TEST(DontLeakContextOnNotifierPerformChange) {
   CcTest::isolate()->ContextDisposedNotification();
   CheckSurvivingGlobalObjectsCount(1);
 }
+
+
+static void ObserverCallback(const FunctionCallbackInfo<Value>& args) {
+  *static_cast<int*>(Handle<External>::Cast(args.Data())->Value()) =
+      Handle<Array>::Cast(args[0])->Length();
+}
+
+
+TEST(ObjectObserveCallsCppFunction) {
+  Isolate* isolate = CcTest::isolate();
+  HandleScope scope(isolate);
+  LocalContext context(isolate);
+  int numRecordsSent = 0;
+  Handle<Function> observer =
+      Function::New(CcTest::isolate(), ObserverCallback,
+                    External::New(isolate, &numRecordsSent));
+  context->Global()->Set(String::NewFromUtf8(CcTest::isolate(), "observer"),
+                         observer);
+  CompileRun(
+      "var obj = {};"
+      "Object.observe(obj, observer);"
+      "obj.foo = 1;"
+      "obj.bar = 2;");
+  CHECK_EQ(2, numRecordsSent);
+}
+
+
+TEST(ObjectObserveCallsFunctionTemplateInstance) {
+  Isolate* isolate = CcTest::isolate();
+  HandleScope scope(isolate);
+  LocalContext context(isolate);
+  int numRecordsSent = 0;
+  Handle<FunctionTemplate> tmpl = FunctionTemplate::New(
+      isolate, ObserverCallback, External::New(isolate, &numRecordsSent));
+  context->Global()->Set(String::NewFromUtf8(CcTest::isolate(), "observer"),
+                         tmpl->GetFunction());
+  CompileRun(
+      "var obj = {};"
+      "Object.observe(obj, observer);"
+      "obj.foo = 1;"
+      "obj.bar = 2;");
+  CHECK_EQ(2, numRecordsSent);
+}
+
+
+static void AccessorGetter(Local<String> property,
+                           const PropertyCallbackInfo<Value>& info) {
+  info.GetReturnValue().Set(Integer::New(info.GetIsolate(), 42));
+}
+
+
+static void AccessorSetter(Local<String> property, Local<Value> value,
+                           const PropertyCallbackInfo<void>& info) {
+  info.GetReturnValue().SetUndefined();
+}
+
+
+TEST(APIAccessorsShouldNotNotify) {
+  Isolate* isolate = CcTest::isolate();
+  HandleScope handle_scope(isolate);
+  LocalContext context(isolate);
+  Handle<Object> object = Object::New(isolate);
+  object->SetAccessor(String::NewFromUtf8(isolate, "accessor"), &AccessorGetter,
+                      &AccessorSetter);
+  context->Global()->Set(String::NewFromUtf8(isolate, "obj"), object);
+  CompileRun(
+      "var records = null;"
+      "Object.observe(obj, function(r) { records = r });"
+      "obj.accessor = 43;");
+  CHECK(CompileRun("records")->IsNull());
+  CompileRun("Object.defineProperty(obj, 'accessor', { value: 44 });");
+  CHECK(CompileRun("records")->IsNull());
+}
index 1909b3e..ef6b5d3 100644 (file)
@@ -319,8 +319,8 @@ TEST(StandAlonePreParser) {
 
     i::PreParser preparser(&scanner, &log, stack_limit);
     preparser.set_allow_lazy(true);
-    preparser.set_allow_natives_syntax(true);
-    preparser.set_allow_arrow_functions(true);
+    preparser.set_allow_natives(true);
+    preparser.set_allow_harmony_arrow_functions(true);
     i::PreParser::PreParseResult result = preparser.PreParseProgram();
     CHECK_EQ(i::PreParser::kPreParseSuccess, result);
     CHECK(!log.HasError());
@@ -454,14 +454,14 @@ TEST(Regress928) {
   i::PreParser::PreParseResult result = preparser.PreParseProgram();
   CHECK_EQ(i::PreParser::kPreParseSuccess, result);
   i::ScriptData* sd = log.GetScriptData();
-  i::ParseData pd(sd);
-  pd.Initialize();
+  i::ParseData* pd = i::ParseData::FromCachedData(sd);
+  pd->Initialize();
 
   int first_function =
       static_cast<int>(strstr(program, "function") - program);
   int first_lbrace = first_function + i::StrLength("function () ");
   CHECK_EQ('{', program[first_lbrace]);
-  i::FunctionEntry entry1 = pd.GetFunctionEntry(first_lbrace);
+  i::FunctionEntry entry1 = pd->GetFunctionEntry(first_lbrace);
   CHECK(!entry1.is_valid());
 
   int second_function =
@@ -469,10 +469,11 @@ TEST(Regress928) {
   int second_lbrace =
       second_function + i::StrLength("function () ");
   CHECK_EQ('{', program[second_lbrace]);
-  i::FunctionEntry entry2 = pd.GetFunctionEntry(second_lbrace);
+  i::FunctionEntry entry2 = pd->GetFunctionEntry(second_lbrace);
   CHECK(entry2.is_valid());
   CHECK_EQ('}', program[entry2.end_pos() - 1]);
   delete sd;
+  delete pd;
 }
 
 
@@ -498,7 +499,7 @@ TEST(PreParseOverflow) {
 
   i::PreParser preparser(&scanner, &log, stack_limit);
   preparser.set_allow_lazy(true);
-  preparser.set_allow_arrow_functions(true);
+  preparser.set_allow_harmony_arrow_functions(true);
   i::PreParser::PreParseResult result = preparser.PreParseProgram();
   CHECK_EQ(i::PreParser::kPreParseStackOverflow, result);
 }
@@ -919,7 +920,7 @@ static int Utf8LengthHelper(const char* s) {
 }
 
 
-TEST(ScopeUsesThisAndArguments) {
+TEST(ScopeUsesArgumentsSuperThis) {
   static const struct {
     const char* prefix;
     const char* suffix;
@@ -928,62 +929,71 @@ TEST(ScopeUsesThisAndArguments) {
     { "var f = () => {", "}" },
   };
 
+  enum Expected {
+    NONE = 0,
+    ARGUMENTS = 1,
+    SUPER_PROPERTY = 2,
+    SUPER_CONSTRUCTOR_CALL = 4,
+    THIS = 8,
+    INNER_ARGUMENTS = 16,
+    INNER_SUPER_PROPERTY = 32,
+    INNER_SUPER_CONSTRUCTOR_CALL = 64,
+    INNER_THIS = 128
+  };
+
   static const struct {
     const char* body;
-    bool uses_this;
-    bool uses_arguments;
-    bool inner_uses_this;
-    bool inner_uses_arguments;
+    int expected;
   } source_data[] = {
-    { "",
-      false, false, false, false },
-    { "return this",
-      true, false, false, false },
-    { "return arguments",
-      false, true, false, false },
-    { "return arguments[0]",
-      false, true, false, false },
-    { "return this + arguments[0]",
-      true, true, false, false },
-    { "return x => this + x",
-      false, false, true, false },
-    { "this.foo = 42;",
-      true, false, false, false },
-    { "this.foo();",
-      true, false, false, false },
-    { "if (foo()) { this.f() }",
-      true, false, false, false },
-    { "if (arguments.length) { this.f() }",
-      true, true, false, false },
-    { "while (true) { this.f() }",
-      true, false, false, false },
-    { "if (true) { while (true) this.foo(arguments) }",
-      true, true, false, false },
-    // Multiple nesting levels must work as well.
-    { "while (true) { while (true) { while (true) return this } }",
-      true, false, false, false },
-    { "if (1) { return () => { while (true) new this() } }",
-      false, false, true, false },
-    // Note that propagation of the inner_uses_this() value does not
-    // cross boundaries of normal functions onto parent scopes.
-    { "return function (x) { return this + x }",
-      false, false, false, false },
-    { "var x = function () { this.foo = 42 };",
-      false, false, false, false },
-    { "if (1) { return function () { while (true) new this() } }",
-      false, false, false, false },
-    { "return function (x) { return () => this }",
-      false, false, false, false },
-    // Flags must be correctly set when using block scoping.
-    { "\"use strict\"; while (true) { let x; this, arguments; }",
-      false, false, true, true },
-    { "\"use strict\"; if (foo()) { let x; this.f() }",
-      false, false, true, false },
-    "\"use strict\"; if (1) {"
-      "  let x; return function () { return this + arguments }"
-      "}",
-      false, false, false, false },
-  };
+        {"", NONE},
+        {"return this", THIS},
+        {"return arguments", ARGUMENTS},
+        {"return super()", SUPER_CONSTRUCTOR_CALL},
+        {"return super.x", SUPER_PROPERTY},
+        {"return arguments[0]", ARGUMENTS},
+        {"return this + arguments[0]", ARGUMENTS | THIS},
+        {"return this + arguments[0] + super.x",
+         ARGUMENTS | SUPER_PROPERTY | THIS},
+        {"return x => this + x", INNER_THIS},
+        {"return x => super() + x", INNER_SUPER_CONSTRUCTOR_CALL},
+        {"this.foo = 42;", THIS},
+        {"this.foo();", THIS},
+        {"if (foo()) { this.f() }", THIS},
+        {"if (foo()) { super.f() }", SUPER_PROPERTY},
+        {"if (arguments.length) { this.f() }", ARGUMENTS | THIS},
+        {"while (true) { this.f() }", THIS},
+        {"while (true) { super.f() }", SUPER_PROPERTY},
+        {"if (true) { while (true) this.foo(arguments) }", ARGUMENTS | THIS},
+        // Multiple nesting levels must work as well.
+        {"while (true) { while (true) { while (true) return this } }", THIS},
+        {"while (true) { while (true) { while (true) return super() } }",
+         SUPER_CONSTRUCTOR_CALL},
+        {"if (1) { return () => { while (true) new this() } }", INNER_THIS},
+        {"if (1) { return () => { while (true) new super() } }", NONE},
+        {"if (1) { return () => { while (true) new new super() } }", NONE},
+        // Note that propagation of the inner_uses_this() value does not
+        // cross boundaries of normal functions onto parent scopes.
+        {"return function (x) { return this + x }", NONE},
+        {"return function (x) { return super() + x }", NONE},
+        {"var x = function () { this.foo = 42 };", NONE},
+        {"var x = function () { super.foo = 42 };", NONE},
+        {"if (1) { return function () { while (true) new this() } }", NONE},
+        {"if (1) { return function () { while (true) new super() } }", NONE},
+        {"return function (x) { return () => this }", NONE},
+        {"return function (x) { return () => super() }", NONE},
+        // Flags must be correctly set when using block scoping.
+        {"\"use strict\"; while (true) { let x; this, arguments; }",
+         INNER_ARGUMENTS | INNER_THIS},
+        {"\"use strict\"; while (true) { let x; this, super(), arguments; }",
+         INNER_ARGUMENTS | INNER_SUPER_CONSTRUCTOR_CALL | INNER_THIS},
+        {"\"use strict\"; if (foo()) { let x; this.f() }", INNER_THIS},
+        {"\"use strict\"; if (foo()) { let x; super.f() }",
+         INNER_SUPER_PROPERTY},
+        {"\"use strict\"; if (1) {"
+         "  let x; return function () { return this + super() + arguments }"
+         "}",
+         NONE},
+    };
 
   i::Isolate* isolate = CcTest::i_isolate();
   i::Factory* factory = isolate->factory();
@@ -1012,7 +1022,8 @@ TEST(ScopeUsesThisAndArguments) {
                                          isolate->heap()->HashSeed(),
                                          isolate->unicode_cache()};
       i::Parser parser(&info, &parse_info);
-      parser.set_allow_arrow_functions(true);
+      parser.set_allow_harmony_arrow_functions(true);
+      parser.set_allow_harmony_classes(true);
       parser.set_allow_harmony_scoping(true);
       info.MarkAsGlobal();
       parser.Parse();
@@ -1020,16 +1031,26 @@ TEST(ScopeUsesThisAndArguments) {
       CHECK(i::Scope::Analyze(&info));
       CHECK(info.function() != NULL);
 
-      i::Scope* global_scope = info.function()->scope();
-      CHECK(global_scope->is_global_scope());
-      CHECK_EQ(1, global_scope->inner_scopes()->length());
-
-      i::Scope* scope = global_scope->inner_scopes()->at(0);
-      CHECK_EQ(source_data[i].uses_this, scope->uses_this());
-      CHECK_EQ(source_data[i].uses_arguments, scope->uses_arguments());
-      CHECK_EQ(source_data[i].inner_uses_this, scope->inner_uses_this());
-      CHECK_EQ(source_data[i].inner_uses_arguments,
+      i::Scope* script_scope = info.function()->scope();
+      CHECK(script_scope->is_script_scope());
+      CHECK_EQ(1, script_scope->inner_scopes()->length());
+
+      i::Scope* scope = script_scope->inner_scopes()->at(0);
+      CHECK_EQ((source_data[i].expected & ARGUMENTS) != 0,
+               scope->uses_arguments());
+      CHECK_EQ((source_data[i].expected & SUPER_PROPERTY) != 0,
+               scope->uses_super_property());
+      CHECK_EQ((source_data[i].expected & SUPER_CONSTRUCTOR_CALL) != 0,
+               scope->uses_super_constructor_call());
+      CHECK_EQ((source_data[i].expected & THIS) != 0, scope->uses_this());
+      CHECK_EQ((source_data[i].expected & INNER_ARGUMENTS) != 0,
                scope->inner_uses_arguments());
+      CHECK_EQ((source_data[i].expected & INNER_SUPER_PROPERTY) != 0,
+               scope->inner_uses_super_property());
+      CHECK_EQ((source_data[i].expected & INNER_SUPER_CONSTRUCTOR_CALL) != 0,
+               scope->inner_uses_super_constructor_call());
+      CHECK_EQ((source_data[i].expected & INNER_THIS) != 0,
+               scope->inner_uses_this());
     }
   }
 }
@@ -1254,7 +1275,7 @@ TEST(ScopePositions) {
     i::Parser parser(&info, &parse_info);
     parser.set_allow_lazy(true);
     parser.set_allow_harmony_scoping(true);
-    parser.set_allow_arrow_functions(true);
+    parser.set_allow_harmony_arrow_functions(true);
     info.MarkAsGlobal();
     info.SetStrictMode(source_data[i].strict_mode);
     parser.Parse();
@@ -1262,7 +1283,7 @@ TEST(ScopePositions) {
 
     // Check scope types and positions.
     i::Scope* scope = info.function()->scope();
-    CHECK(scope->is_global_scope());
+    CHECK(scope->is_script_scope());
     CHECK_EQ(scope->start_position(), 0);
     CHECK_EQ(scope->end_position(), kProgramSize);
     CHECK_EQ(scope->inner_scopes()->length(), 1);
@@ -1328,13 +1349,16 @@ i::Handle<i::String> FormatMessage(i::Vector<unsigned> data) {
 
 enum ParserFlag {
   kAllowLazy,
-  kAllowNativesSyntax,
+  kAllowNatives,
   kAllowHarmonyScoping,
-  kAllowModules,
+  kAllowHarmonyModules,
   kAllowHarmonyNumericLiterals,
-  kAllowArrowFunctions,
-  kAllowClasses,
-  kAllowHarmonyObjectLiterals
+  kAllowHarmonyArrowFunctions,
+  kAllowHarmonyClasses,
+  kAllowHarmonyObjectLiterals,
+  kAllowHarmonyTemplates,
+  kAllowHarmonySloppy,
+  kAllowHarmonyUnicode
 };
 
 
@@ -1348,15 +1372,19 @@ template <typename Traits>
 void SetParserFlags(i::ParserBase<Traits>* parser,
                     i::EnumSet<ParserFlag> flags) {
   parser->set_allow_lazy(flags.Contains(kAllowLazy));
-  parser->set_allow_natives_syntax(flags.Contains(kAllowNativesSyntax));
+  parser->set_allow_natives(flags.Contains(kAllowNatives));
   parser->set_allow_harmony_scoping(flags.Contains(kAllowHarmonyScoping));
-  parser->set_allow_modules(flags.Contains(kAllowModules));
+  parser->set_allow_harmony_modules(flags.Contains(kAllowHarmonyModules));
   parser->set_allow_harmony_numeric_literals(
       flags.Contains(kAllowHarmonyNumericLiterals));
   parser->set_allow_harmony_object_literals(
       flags.Contains(kAllowHarmonyObjectLiterals));
-  parser->set_allow_arrow_functions(flags.Contains(kAllowArrowFunctions));
-  parser->set_allow_classes(flags.Contains(kAllowClasses));
+  parser->set_allow_harmony_arrow_functions(
+      flags.Contains(kAllowHarmonyArrowFunctions));
+  parser->set_allow_harmony_classes(flags.Contains(kAllowHarmonyClasses));
+  parser->set_allow_harmony_templates(flags.Contains(kAllowHarmonyTemplates));
+  parser->set_allow_harmony_sloppy(flags.Contains(kAllowHarmonySloppy));
+  parser->set_allow_harmony_unicode(flags.Contains(kAllowHarmonyUnicode));
 }
 
 
@@ -1367,6 +1395,8 @@ void TestParserSyncWithFlags(i::Handle<i::String> source,
   i::Factory* factory = isolate->factory();
 
   uintptr_t stack_limit = isolate->stack_guard()->real_climit();
+  int preparser_materialized_literals = -1;
+  int parser_materialized_literals = -2;
 
   // Preparse the data.
   i::CompleteParserRecorder log;
@@ -1376,7 +1406,8 @@ void TestParserSyncWithFlags(i::Handle<i::String> source,
     i::PreParser preparser(&scanner, &log, stack_limit);
     SetParserFlags(&preparser, flags);
     scanner.Initialize(&stream);
-    i::PreParser::PreParseResult result = preparser.PreParseProgram();
+    i::PreParser::PreParseResult result = preparser.PreParseProgram(
+        &preparser_materialized_literals);
     CHECK_EQ(i::PreParser::kPreParseSuccess, result);
   }
 
@@ -1395,6 +1426,9 @@ void TestParserSyncWithFlags(i::Handle<i::String> source,
     info.MarkAsGlobal();
     parser.Parse();
     function = info.function();
+    if (function) {
+      parser_materialized_literals = function->materialized_literal_count();
+    }
   }
 
   // Check that preparsing fails iff parsing fails.
@@ -1460,6 +1494,15 @@ void TestParserSyncWithFlags(i::Handle<i::String> source,
         "However, parser and preparser succeeded",
         source->ToCString().get());
     CHECK(false);
+  } else if (preparser_materialized_literals != parser_materialized_literals) {
+    v8::base::OS::Print(
+        "Preparser materialized literals (%d) differ from Parser materialized "
+        "literals (%d) on:\n"
+        "\t%s\n"
+        "However, parser and preparser succeeded",
+        preparser_materialized_literals, parser_materialized_literals,
+        source->ToCString().get());
+    CHECK(false);
   }
 }
 
@@ -1469,7 +1512,9 @@ void TestParserSync(const char* source,
                     size_t varying_flags_length,
                     ParserSyncTestResult result = kSuccessOrError,
                     const ParserFlag* always_true_flags = NULL,
-                    size_t always_true_flags_length = 0) {
+                    size_t always_true_flags_length = 0,
+                    const ParserFlag* always_false_flags = NULL,
+                    size_t always_false_flags_length = 0) {
   i::Handle<i::String> str =
       CcTest::i_isolate()->factory()->NewStringFromAsciiChecked(source);
   for (int bits = 0; bits < (1 << varying_flags_length); bits++) {
@@ -1482,6 +1527,10 @@ void TestParserSync(const char* source,
          ++flag_index) {
       flags.Add(always_true_flags[flag_index]);
     }
+    for (size_t flag_index = 0; flag_index < always_false_flags_length;
+         ++flag_index) {
+      flags.Remove(always_false_flags[flag_index]);
+    }
     TestParserSyncWithFlags(str, flags, result);
   }
 }
@@ -1516,7 +1565,7 @@ TEST(ParserSync) {
     "if (false) {} else ;",
     "if (false) {} else {}",
     "if (false) {} else 12",
-    "if (false) ;"
+    "if (false) ;",
     "if (false) {}",
     "if (false) 12",
     "do {} while (false)",
@@ -1536,8 +1585,8 @@ TEST(ParserSync) {
     "with ({}) ;",
     "with ({}) {}",
     "with ({}) 12",
-    "switch ({}) { default: }"
-    "label3: "
+    "switch ({}) { default: }",
+    "label3: ",
     "throw",
     "throw  12",
     "throw\n12",
@@ -1564,16 +1613,6 @@ TEST(ParserSync) {
   CcTest::i_isolate()->stack_guard()->SetStackLimit(
       i::GetCurrentStackPosition() - 128 * 1024);
 
-  static const ParserFlag flags1[] = {
-    kAllowArrowFunctions,
-    kAllowClasses,
-    kAllowHarmonyNumericLiterals,
-    kAllowHarmonyObjectLiterals,
-    kAllowHarmonyScoping,
-    kAllowLazy,
-    kAllowModules,
-  };
-
   for (int i = 0; context_data[i][0] != NULL; ++i) {
     for (int j = 0; statement_data[j] != NULL; ++j) {
       for (int k = 0; termination_data[k] != NULL; ++k) {
@@ -1593,7 +1632,7 @@ TEST(ParserSync) {
             termination_data[k],
             context_data[i][1]);
         CHECK(length == kProgramSize);
-        TestParserSync(program.start(), flags1, arraysize(flags1));
+        TestParserSync(program.start(), NULL, 0);
       }
     }
   }
@@ -1605,7 +1644,7 @@ TEST(ParserSync) {
   TestParserSync("0o1234", flags2, arraysize(flags2));
   TestParserSync("0b1011", flags2, arraysize(flags2));
 
-  static const ParserFlag flags3[] = { kAllowNativesSyntax };
+  static const ParserFlag flags3[] = { kAllowNatives };
   TestParserSync("%DebugPrint(123)", flags3, arraysize(flags3));
 }
 
@@ -1639,7 +1678,9 @@ void RunParserSyncTest(const char* context_data[][2],
                        const ParserFlag* flags = NULL,
                        int flags_len = 0,
                        const ParserFlag* always_true_flags = NULL,
-                       int always_true_flags_len = 0) {
+                       int always_true_len = 0,
+                       const ParserFlag* always_false_flags = NULL,
+                       int always_false_len = 0) {
   v8::HandleScope handles(CcTest::isolate());
   v8::Handle<v8::Context> context = v8::Context::New(CcTest::isolate());
   v8::Context::Scope context_scope(context);
@@ -1647,36 +1688,32 @@ void RunParserSyncTest(const char* context_data[][2],
   CcTest::i_isolate()->stack_guard()->SetStackLimit(
       i::GetCurrentStackPosition() - 128 * 1024);
 
+  // Experimental feature flags should not go here; pass the flags as
+  // always_true_flags if the test needs them.
   static const ParserFlag default_flags[] = {
-    kAllowArrowFunctions,
-    kAllowClasses,
-    kAllowHarmonyNumericLiterals,
-    kAllowHarmonyObjectLiterals,
-    kAllowHarmonyScoping,
     kAllowLazy,
-    kAllowModules,
-    kAllowNativesSyntax,
+    kAllowNatives,
   };
   ParserFlag* generated_flags = NULL;
   if (flags == NULL) {
     flags = default_flags;
     flags_len = arraysize(default_flags);
-    if (always_true_flags != NULL) {
-      // Remove always_true_flags from default_flags.
-      CHECK(always_true_flags_len < flags_len);
-      generated_flags = new ParserFlag[flags_len - always_true_flags_len];
+    if (always_true_flags != NULL || always_false_flags != NULL) {
+      // Remove always_true/false_flags from default_flags (if present).
+      CHECK((always_true_flags != NULL) == (always_true_len > 0));
+      CHECK((always_false_flags != NULL) == (always_false_len > 0));
+      generated_flags = new ParserFlag[flags_len + always_true_len];
       int flag_index = 0;
       for (int i = 0; i < flags_len; ++i) {
         bool use_flag = true;
-        for (int j = 0; j < always_true_flags_len; ++j) {
-          if (flags[i] == always_true_flags[j]) {
-            use_flag = false;
-            break;
-          }
+        for (int j = 0; use_flag && j < always_true_len; ++j) {
+          if (flags[i] == always_true_flags[j]) use_flag = false;
+        }
+        for (int j = 0; use_flag && j < always_false_len; ++j) {
+          if (flags[i] == always_false_flags[j]) use_flag = false;
         }
         if (use_flag) generated_flags[flag_index++] = flags[i];
       }
-      CHECK(flag_index == flags_len - always_true_flags_len);
       flags_len = flag_index;
       flags = generated_flags;
     }
@@ -1701,7 +1738,9 @@ void RunParserSyncTest(const char* context_data[][2],
                      flags_len,
                      result,
                      always_true_flags,
-                     always_true_flags_len);
+                     always_true_len,
+                     always_false_flags,
+                     always_false_len);
     }
   }
   delete[] generated_flags;
@@ -1808,7 +1847,7 @@ TEST(NoErrorsEvalAndArgumentsStrict) {
     NULL
   };
 
-  static const ParserFlag always_flags[] = {kAllowArrowFunctions};
+  static const ParserFlag always_flags[] = {kAllowHarmonyArrowFunctions};
   RunParserSyncTest(context_data, statement_data, kSuccess, NULL, 0,
                     always_flags, arraysize(always_flags));
 }
@@ -1826,6 +1865,13 @@ TEST(NoErrorsEvalAndArgumentsStrict) {
   V(yield)
 
 
+#define LIMITED_FUTURE_STRICT_RESERVED_WORDS(V) \
+  V(implements)                                 \
+  V(let)                                        \
+  V(static)                                     \
+  V(yield)
+
+
 #define FUTURE_STRICT_RESERVED_STATEMENTS(NAME) \
   "var " #NAME ";",                             \
   "var foo, " #NAME ";",                        \
@@ -1847,28 +1893,24 @@ TEST(ErrorsFutureStrictReservedWords) {
   // it's ok to use future strict reserved words as identifiers. With the strict
   // mode, it isn't.
   const char* context_data[][2] = {
-    { "\"use strict\";", "" },
     { "function test_func() {\"use strict\"; ", "}"},
     { "() => { \"use strict\"; ", "}" },
     { NULL, NULL }
   };
 
   const char* statement_data[] {
-    FUTURE_STRICT_RESERVED_WORDS(FUTURE_STRICT_RESERVED_STATEMENTS)
+    LIMITED_FUTURE_STRICT_RESERVED_WORDS(FUTURE_STRICT_RESERVED_STATEMENTS)
     NULL
   };
 
-  static const ParserFlag always_flags[] = {kAllowArrowFunctions};
-  RunParserSyncTest(context_data, statement_data, kError, NULL, 0, always_flags,
-                    arraysize(always_flags));
-
-  static const ParserFlag classes_flags[] = {
-      kAllowArrowFunctions, kAllowClasses, kAllowHarmonyScoping};
-  RunParserSyncTest(context_data, statement_data, kError, NULL, 0,
-                    classes_flags, arraysize(classes_flags));
+  RunParserSyncTest(context_data, statement_data, kError);
+  RunParserSyncTest(context_data, statement_data, kError);
 }
 
 
+#undef LIMITED_FUTURE_STRICT_RESERVED_WORDS
+
+
 TEST(NoErrorsFutureStrictReservedWords) {
   const char* context_data[][2] = {
     { "", "" },
@@ -1882,12 +1924,12 @@ TEST(NoErrorsFutureStrictReservedWords) {
     NULL
   };
 
-  static const ParserFlag always_flags[] = {kAllowArrowFunctions};
+  static const ParserFlag always_flags[] = {kAllowHarmonyArrowFunctions};
   RunParserSyncTest(context_data, statement_data, kSuccess, NULL, 0,
                     always_flags, arraysize(always_flags));
 
   static const ParserFlag classes_flags[] = {
-      kAllowArrowFunctions, kAllowClasses, kAllowHarmonyScoping};
+      kAllowHarmonyArrowFunctions, kAllowHarmonyClasses, kAllowHarmonyScoping};
   RunParserSyncTest(context_data, statement_data, kSuccess, NULL, 0,
                     classes_flags, arraysize(classes_flags));
 }
@@ -2300,7 +2342,7 @@ TEST(NoErrorsIllegalWordsAsLabels) {
     NULL
   };
 
-  static const ParserFlag always_flags[] = {kAllowArrowFunctions};
+  static const ParserFlag always_flags[] = {kAllowHarmonyArrowFunctions};
   RunParserSyncTest(context_data, statement_data, kSuccess, NULL, 0,
                     always_flags, arraysize(always_flags));
 }
@@ -2321,7 +2363,7 @@ TEST(NoErrorsFutureStrictReservedAsLabelsSloppy) {
   };
 #undef LABELLED_WHILE
 
-  static const ParserFlag always_flags[] = {kAllowArrowFunctions};
+  static const ParserFlag always_flags[] = {kAllowHarmonyArrowFunctions};
   RunParserSyncTest(context_data, statement_data, kSuccess, NULL, 0,
                     always_flags, arraysize(always_flags));
 }
@@ -2441,17 +2483,18 @@ TEST(DontRegressPreParserDataSizes) {
     i::ScriptData* sd = NULL;
     info.SetCachedData(&sd, v8::ScriptCompiler::kProduceParserCache);
     i::Parser::Parse(&info, true);
-    i::ParseData pd(sd);
+    i::ParseData* pd = i::ParseData::FromCachedData(sd);
 
-    if (pd.FunctionCount() != test_cases[i].functions) {
+    if (pd->FunctionCount() != test_cases[i].functions) {
       v8::base::OS::Print(
           "Expected preparse data for program:\n"
           "\t%s\n"
           "to contain %d functions, however, received %d functions.\n",
-          program, test_cases[i].functions, pd.FunctionCount());
+          program, test_cases[i].functions, pd->FunctionCount());
       CHECK(false);
     }
     delete sd;
+    delete pd;
   }
 }
 
@@ -2572,9 +2615,9 @@ TEST(Intrinsics) {
     NULL
   };
 
-  // This test requires kAllowNativesSyntax to succeed.
+  // This test requires kAllowNatives to succeed.
   static const ParserFlag always_true_flags[] = {
-    kAllowNativesSyntax
+    kAllowNatives
   };
 
   RunParserSyncTest(context_data, statement_data, kSuccess, NULL, 0,
@@ -3060,11 +3103,11 @@ TEST(SerializationOfMaybeAssignmentFlag) {
   const i::AstRawString* name = avf.GetOneByteString("result");
   i::Handle<i::String> str = name->string();
   CHECK(str->IsInternalizedString());
-  i::Scope* global_scope =
-      new (&zone) i::Scope(NULL, i::GLOBAL_SCOPE, &avf, &zone);
-  global_scope->Initialize();
-  i::Scope* s = i::Scope::DeserializeScopeChain(context, global_scope, &zone);
-  DCHECK(s != global_scope);
+  i::Scope* script_scope =
+      new (&zone) i::Scope(NULL, i::SCRIPT_SCOPE, &avf, &zone);
+  script_scope->Initialize();
+  i::Scope* s = i::Scope::DeserializeScopeChain(context, script_scope, &zone);
+  DCHECK(s != script_scope);
   DCHECK(name != NULL);
 
   // Get result from h's function context (that is f's context)
@@ -3107,11 +3150,11 @@ TEST(IfArgumentsArrayAccessedThenParametersMaybeAssigned) {
   i::AstValueFactory avf(&zone, isolate->heap()->HashSeed());
   avf.Internalize(isolate);
 
-  i::Scope* global_scope =
-      new (&zone) i::Scope(NULL, i::GLOBAL_SCOPE, &avf, &zone);
-  global_scope->Initialize();
-  i::Scope* s = i::Scope::DeserializeScopeChain(context, global_scope, &zone);
-  DCHECK(s != global_scope);
+  i::Scope* script_scope =
+      new (&zone) i::Scope(NULL, i::SCRIPT_SCOPE, &avf, &zone);
+  script_scope->Initialize();
+  i::Scope* s = i::Scope::DeserializeScopeChain(context, script_scope, &zone);
+  DCHECK(s != script_scope);
   const i::AstRawString* name_x = avf.GetOneByteString("x");
 
   // Get result from f's function context (that is g's outer context)
@@ -3154,11 +3197,11 @@ TEST(ExportsMaybeAssigned) {
   i::AstValueFactory avf(&zone, isolate->heap()->HashSeed());
   avf.Internalize(isolate);
 
-  i::Scope* global_scope =
-      new (&zone) i::Scope(NULL, i::GLOBAL_SCOPE, &avf, &zone);
-  global_scope->Initialize();
-  i::Scope* s = i::Scope::DeserializeScopeChain(context, global_scope, &zone);
-  DCHECK(s != global_scope);
+  i::Scope* script_scope =
+      new (&zone) i::Scope(NULL, i::SCRIPT_SCOPE, &avf, &zone);
+  script_scope->Initialize();
+  i::Scope* s = i::Scope::DeserializeScopeChain(context, script_scope, &zone);
+  DCHECK(s != script_scope);
   const i::AstRawString* name_x = avf.GetOneByteString("x");
   const i::AstRawString* name_f = avf.GetOneByteString("f");
   const i::AstRawString* name_y = avf.GetOneByteString("y");
@@ -3459,7 +3502,7 @@ TEST(ErrorsArrowFunctions) {
 
   // The test is quite slow, so run it with a reduced set of flags.
   static const ParserFlag flags[] = {kAllowLazy, kAllowHarmonyScoping};
-  static const ParserFlag always_flags[] = { kAllowArrowFunctions };
+  static const ParserFlag always_flags[] = { kAllowHarmonyArrowFunctions };
   RunParserSyncTest(context_data, statement_data, kError, flags,
                     arraysize(flags), always_flags, arraysize(always_flags));
 }
@@ -3513,7 +3556,7 @@ TEST(NoErrorsArrowFunctions) {
     NULL
   };
 
-  static const ParserFlag always_flags[] = {kAllowArrowFunctions};
+  static const ParserFlag always_flags[] = {kAllowHarmonyArrowFunctions};
   RunParserSyncTest(context_data, statement_data, kSuccess, NULL, 0,
                     always_flags, arraysize(always_flags));
 }
@@ -3538,7 +3581,7 @@ TEST(NoErrorsSuper) {
     "z.super",  // Ok, property lookup.
     NULL};
 
-  static const ParserFlag always_flags[] = {kAllowClasses};
+  static const ParserFlag always_flags[] = {kAllowHarmonyClasses};
   RunParserSyncTest(context_data, statement_data, kSuccess, NULL, 0,
                     always_flags, arraysize(always_flags));
 }
@@ -3557,7 +3600,7 @@ TEST(ErrorsSuper) {
     "f(super)",
     NULL};
 
-  static const ParserFlag always_flags[] = {kAllowClasses};
+  static const ParserFlag always_flags[] = {kAllowHarmonyClasses};
   RunParserSyncTest(context_data, statement_data, kError, NULL, 0,
                     always_flags, arraysize(always_flags));
 }
@@ -3740,7 +3783,8 @@ TEST(ClassExpressionNoErrors) {
     "class name extends class base {} {}",
     NULL};
 
-  static const ParserFlag always_flags[] = {kAllowClasses};
+  static const ParserFlag always_flags[] = {
+      kAllowHarmonyClasses, kAllowHarmonySloppy};
   RunParserSyncTest(context_data, class_data, kSuccess, NULL, 0,
                     always_flags, arraysize(always_flags));
 }
@@ -3759,7 +3803,8 @@ TEST(ClassDeclarationNoErrors) {
     "class name extends class base {} {}",
     NULL};
 
-  static const ParserFlag always_flags[] = {kAllowClasses};
+  static const ParserFlag always_flags[] = {
+      kAllowHarmonyClasses, kAllowHarmonySloppy};
   RunParserSyncTest(context_data, statement_data, kSuccess, NULL, 0,
                     always_flags, arraysize(always_flags));
 }
@@ -3803,8 +3848,9 @@ TEST(ClassBodyNoErrors) {
     NULL};
 
   static const ParserFlag always_flags[] = {
-    kAllowClasses,
-    kAllowHarmonyObjectLiterals
+    kAllowHarmonyClasses,
+    kAllowHarmonyObjectLiterals,
+    kAllowHarmonySloppy
   };
   RunParserSyncTest(context_data, class_body_data, kSuccess, NULL, 0,
                     always_flags, arraysize(always_flags));
@@ -3861,8 +3907,9 @@ TEST(ClassPropertyNameNoErrors) {
     NULL};
 
   static const ParserFlag always_flags[] = {
-    kAllowClasses,
-    kAllowHarmonyObjectLiterals
+    kAllowHarmonyClasses,
+    kAllowHarmonyObjectLiterals,
+    kAllowHarmonySloppy
   };
   RunParserSyncTest(context_data, name_data, kSuccess, NULL, 0,
                     always_flags, arraysize(always_flags));
@@ -3892,8 +3939,9 @@ TEST(ClassExpressionErrors) {
     NULL};
 
   static const ParserFlag always_flags[] = {
-    kAllowClasses,
-    kAllowHarmonyObjectLiterals
+    kAllowHarmonyClasses,
+    kAllowHarmonyObjectLiterals,
+    kAllowHarmonySloppy
   };
   RunParserSyncTest(context_data, class_data, kError, NULL, 0,
                     always_flags, arraysize(always_flags));
@@ -3929,8 +3977,9 @@ TEST(ClassDeclarationErrors) {
     NULL};
 
   static const ParserFlag always_flags[] = {
-    kAllowClasses,
-    kAllowHarmonyNumericLiterals
+    kAllowHarmonyClasses,
+    kAllowHarmonyNumericLiterals,
+    kAllowHarmonySloppy
   };
   RunParserSyncTest(context_data, class_data, kError, NULL, 0,
                     always_flags, arraysize(always_flags));
@@ -3959,8 +4008,9 @@ TEST(ClassNameErrors) {
     NULL};
 
   static const ParserFlag always_flags[] = {
-    kAllowClasses,
-    kAllowHarmonyObjectLiterals
+    kAllowHarmonyClasses,
+    kAllowHarmonyObjectLiterals,
+    kAllowHarmonySloppy
   };
   RunParserSyncTest(context_data, class_name, kError, NULL, 0,
                     always_flags, arraysize(always_flags));
@@ -3992,8 +4042,9 @@ TEST(ClassGetterParamNameErrors) {
     NULL};
 
   static const ParserFlag always_flags[] = {
-    kAllowClasses,
-    kAllowHarmonyObjectLiterals
+    kAllowHarmonyClasses,
+    kAllowHarmonyObjectLiterals,
+    kAllowHarmonySloppy
   };
   RunParserSyncTest(context_data, class_name, kError, NULL, 0,
                     always_flags, arraysize(always_flags));
@@ -4020,8 +4071,9 @@ TEST(ClassStaticPrototypeErrors) {
     NULL};
 
   static const ParserFlag always_flags[] = {
-    kAllowClasses,
-    kAllowHarmonyObjectLiterals
+    kAllowHarmonyClasses,
+    kAllowHarmonyObjectLiterals,
+    kAllowHarmonySloppy
   };
   RunParserSyncTest(context_data, class_body_data, kError, NULL, 0,
                     always_flags, arraysize(always_flags));
@@ -4047,8 +4099,9 @@ TEST(ClassSpecialConstructorErrors) {
     NULL};
 
   static const ParserFlag always_flags[] = {
-    kAllowClasses,
-    kAllowHarmonyObjectLiterals
+    kAllowHarmonyClasses,
+    kAllowHarmonyObjectLiterals,
+    kAllowHarmonySloppy
   };
   RunParserSyncTest(context_data, class_body_data, kError, NULL, 0,
                     always_flags, arraysize(always_flags));
@@ -4069,8 +4122,9 @@ TEST(ClassConstructorNoErrors) {
     NULL};
 
   static const ParserFlag always_flags[] = {
-    kAllowClasses,
-    kAllowHarmonyObjectLiterals
+    kAllowHarmonyClasses,
+    kAllowHarmonyObjectLiterals,
+    kAllowHarmonySloppy
   };
   RunParserSyncTest(context_data, class_body_data, kSuccess, NULL, 0,
                     always_flags, arraysize(always_flags));
@@ -4087,8 +4141,9 @@ TEST(ClassMultipleConstructorErrors) {
     NULL};
 
   static const ParserFlag always_flags[] = {
-    kAllowClasses,
-    kAllowHarmonyObjectLiterals
+    kAllowHarmonyClasses,
+    kAllowHarmonyObjectLiterals,
+    kAllowHarmonySloppy
   };
   RunParserSyncTest(context_data, class_body_data, kError, NULL, 0,
                     always_flags, arraysize(always_flags));
@@ -4109,8 +4164,9 @@ TEST(ClassMultiplePropertyNamesNoErrors) {
     NULL};
 
   static const ParserFlag always_flags[] = {
-    kAllowClasses,
-    kAllowHarmonyObjectLiterals
+    kAllowHarmonyClasses,
+    kAllowHarmonyObjectLiterals,
+    kAllowHarmonySloppy
   };
   RunParserSyncTest(context_data, class_body_data, kSuccess, NULL, 0,
                     always_flags, arraysize(always_flags));
@@ -4129,8 +4185,9 @@ TEST(ClassesAreStrictErrors) {
     NULL};
 
   static const ParserFlag always_flags[] = {
-    kAllowClasses,
-    kAllowHarmonyObjectLiterals
+    kAllowHarmonyClasses,
+    kAllowHarmonyObjectLiterals,
+    kAllowHarmonySloppy
   };
   RunParserSyncTest(context_data, class_body_data, kError, NULL, 0,
                     always_flags, arraysize(always_flags));
@@ -4302,7 +4359,226 @@ TEST(InvalidUnicodeEscapes) {
     "var foob\\u123r = 0;",
     "var \\u123roo = 0;",
     "\"foob\\u123rr\"",
-    "/regex/g\\u123r",
+    // No escapes allowed in regexp flags
+    "/regex/\\u0069g",
+    "/regex/\\u006g",
+    // Braces gone wrong
+    "var foob\\u{c481r = 0;",
+    "var foob\\uc481}r = 0;",
+    "var \\u{0052oo = 0;",
+    "var \\u0052}oo = 0;",
+    "\"foob\\u{c481r\"",
+    "var foob\\u{}ar = 0;",
+    // Too high value for the unicode escape
+    "\"\\u{110000}\"",
+    // Not an unicode escape
+    "var foob\\v1234r = 0;",
+    "var foob\\U1234r = 0;",
+    "var foob\\v{1234}r = 0;",
+    "var foob\\U{1234}r = 0;",
+    NULL};
+  static const ParserFlag always_flags[] = {kAllowHarmonyUnicode};
+  RunParserSyncTest(context_data, data, kError, NULL, 0, always_flags,
+                    arraysize(always_flags));
+}
+
+
+TEST(UnicodeEscapes) {
+  const char* context_data[][2] = {{"", ""},
+                                   {"'use strict';", ""},
+                                   {NULL, NULL}};
+  const char* data[] = {
+    // Identifier starting with escape
+    "var \\u0052oo = 0;",
+    "var \\u{0052}oo = 0;",
+    "var \\u{52}oo = 0;",
+    "var \\u{00000000052}oo = 0;",
+    // Identifier with an escape but not starting with an escape
+    "var foob\\uc481r = 0;",
+    "var foob\\u{c481}r = 0;",
+    // String with an escape
+    "\"foob\\uc481r\"",
+    "\"foob\\{uc481}r\"",
+    // This character is a valid unicode character, representable as a surrogate
+    // pair, not representable as 4 hex digits.
+    "\"foo\\u{10e6d}\"",
+    // Max value for the unicode escape
+    "\"\\u{10ffff}\"",
+    NULL};
+  static const ParserFlag always_flags[] = {kAllowHarmonyUnicode};
+  RunParserSyncTest(context_data, data, kSuccess, NULL, 0, always_flags,
+                    arraysize(always_flags));
+}
+
+
+TEST(ScanTemplateLiterals) {
+  const char* context_data[][2] = {{"'use strict';", ""},
+                                   {"function foo(){ 'use strict';"
+                                    "  var a, b, c; return ", "}"},
+                                   {NULL, NULL}};
+
+  const char* data[] = {
+      "``",
+      "`no-subst-template`",
+      "`template-head${a}`",
+      "`${a}`",
+      "`${a}template-tail`",
+      "`template-head${a}template-tail`",
+      "`${a}${b}${c}`",
+      "`a${a}b${b}c${c}`",
+      "`${a}a${b}b${c}c`",
+      "`foo\n\nbar\r\nbaz`",
+      "`foo\n\n${  bar  }\r\nbaz`",
+      "`foo${a /* comment */}`",
+      "`foo${a // comment\n}`",
+      "`foo${a \n}`",
+      "`foo${a \r\n}`",
+      "`foo${a \r}`",
+      "`foo${/* comment */ a}`",
+      "`foo${// comment\na}`",
+      "`foo${\n a}`",
+      "`foo${\r\n a}`",
+      "`foo${\r a}`",
+      "`foo${'a' in a}`",
+      NULL};
+  static const ParserFlag always_flags[] = {kAllowHarmonyTemplates};
+  RunParserSyncTest(context_data, data, kSuccess, NULL, 0, always_flags,
+                    arraysize(always_flags));
+}
+
+
+TEST(ScanTaggedTemplateLiterals) {
+  const char* context_data[][2] = {{"'use strict';", ""},
+                                   {"function foo(){ 'use strict';"
+                                    "  function tag() {}"
+                                    "  var a, b, c; return ", "}"},
+                                   {NULL, NULL}};
+
+  const char* data[] = {
+      "tag ``",
+      "tag `no-subst-template`",
+      "tag`template-head${a}`",
+      "tag `${a}`",
+      "tag `${a}template-tail`",
+      "tag   `template-head${a}template-tail`",
+      "tag\n`${a}${b}${c}`",
+      "tag\r\n`a${a}b${b}c${c}`",
+      "tag    `${a}a${b}b${c}c`",
+      "tag\t`foo\n\nbar\r\nbaz`",
+      "tag\r`foo\n\n${  bar  }\r\nbaz`",
+      "tag`foo${a /* comment */}`",
+      "tag`foo${a // comment\n}`",
+      "tag`foo${a \n}`",
+      "tag`foo${a \r\n}`",
+      "tag`foo${a \r}`",
+      "tag`foo${/* comment */ a}`",
+      "tag`foo${// comment\na}`",
+      "tag`foo${\n a}`",
+      "tag`foo${\r\n a}`",
+      "tag`foo${\r a}`",
+      "tag`foo${'a' in a}`",
+      NULL};
+  static const ParserFlag always_flags[] = {kAllowHarmonyTemplates};
+  RunParserSyncTest(context_data, data, kSuccess, NULL, 0, always_flags,
+                    arraysize(always_flags));
+}
+
+
+TEST(TemplateMaterializedLiterals) {
+  const char* context_data[][2] = {
+    {
+      "'use strict';\n"
+      "function tag() {}\n"
+      "var a, b, c;\n"
+      "(", ")"
+    },
+    {NULL, NULL}
+  };
+
+  const char* data[] = {
+    "tag``",
+    "tag`a`",
+    "tag`a${1}b`",
+    "tag`a${1}b${2}c`",
+    "``",
+    "`a`",
+    "`a${1}b`",
+    "`a${1}b${2}c`",
+    NULL
+  };
+
+  static const ParserFlag always_flags[] = {kAllowHarmonyTemplates};
+  RunParserSyncTest(context_data, data, kSuccess, NULL, 0, always_flags,
+                    arraysize(always_flags));
+}
+
+
+TEST(ScanUnterminatedTemplateLiterals) {
+  const char* context_data[][2] = {{"'use strict';", ""},
+                                   {"function foo(){ 'use strict';"
+                                    "  var a, b, c; return ", "}"},
+                                   {NULL, NULL}};
+
+  const char* data[] = {
+      "`no-subst-template",
+      "`template-head${a}",
+      "`${a}template-tail",
+      "`template-head${a}template-tail",
+      "`${a}${b}${c}",
+      "`a${a}b${b}c${c}",
+      "`${a}a${b}b${c}c",
+      "`foo\n\nbar\r\nbaz",
+      "`foo\n\n${  bar  }\r\nbaz",
+      "`foo${a /* comment } */`",
+      "`foo${a /* comment } `*/",
+      "`foo${a // comment}`",
+      "`foo${a \n`",
+      "`foo${a \r\n`",
+      "`foo${a \r`",
+      "`foo${/* comment */ a`",
+      "`foo${// commenta}`",
+      "`foo${\n a`",
+      "`foo${\r\n a`",
+      "`foo${\r a`",
+      "`foo${fn(}`",
+      "`foo${1 if}`",
+      NULL};
+  static const ParserFlag always_flags[] = {kAllowHarmonyTemplates};
+  RunParserSyncTest(context_data, data, kError, NULL, 0, always_flags,
+                    arraysize(always_flags));
+}
+
+
+TEST(LexicalScopingSloppyMode) {
+  const char* context_data[][2] = {
+      {"", ""},
+      {"function f() {", "}"},
+      {"{", "}"},
+      {NULL, NULL}};
+  const char* bad_data[] = {
+    "let x = 1;",
+    "for(let x = 1;;){}",
+    "for(let x of []){}",
+    "for(let x in []){}",
+    "class C {}",
+    "class C extends D {}",
+    "(class {})",
+    "(class extends D {})",
+    "(class C {})",
+    "(class C extends D {})",
+    NULL};
+  static const ParserFlag always_true_flags[] = {
+      kAllowHarmonyScoping, kAllowHarmonyClasses};
+  static const ParserFlag always_false_flags[] = {kAllowHarmonySloppy};
+  RunParserSyncTest(context_data, bad_data, kError, NULL, 0,
+                    always_true_flags, arraysize(always_true_flags),
+                    always_false_flags, arraysize(always_false_flags));
+
+  const char* good_data[] = {
+    "let = 1;",
+    "for(let = 1;;){}",
     NULL};
-  RunParserSyncTest(context_data, data, kError);
+  RunParserSyncTest(context_data, good_data, kSuccess, NULL, 0,
+                    always_true_flags, arraysize(always_true_flags),
+                    always_false_flags, arraysize(always_false_flags));
 }
index 6a0e24a..acd904f 100644 (file)
@@ -114,7 +114,7 @@ TEST(ExternalReferenceDecoder) {
 }
 
 
-void WritePayload(const List<byte>& payload, const char* file_name) {
+void WritePayload(const Vector<const byte>& payload, const char* file_name) {
   FILE* file = v8::base::OS::FOpen(file_name, "wb");
   if (file == NULL) {
     PrintF("Unable to write to snapshot file \"%s\"\n", file_name);
@@ -129,50 +129,12 @@ void WritePayload(const List<byte>& payload, const char* file_name) {
 }
 
 
-void WriteSpaceUsed(Serializer* ser, const char* file_name) {
-  int file_name_length = StrLength(file_name) + 10;
-  Vector<char> name = Vector<char>::New(file_name_length + 1);
-  SNPrintF(name, "%s.size", file_name);
-  FILE* fp = v8::base::OS::FOpen(name.start(), "w");
-  name.Dispose();
-
-  Vector<const uint32_t> chunks = ser->FinalAllocationChunks(NEW_SPACE);
-  CHECK_EQ(1, chunks.length());
-  fprintf(fp, "new %d\n", chunks[0]);
-  chunks = ser->FinalAllocationChunks(OLD_POINTER_SPACE);
-  CHECK_EQ(1, chunks.length());
-  fprintf(fp, "pointer %d\n", chunks[0]);
-  chunks = ser->FinalAllocationChunks(OLD_DATA_SPACE);
-  CHECK_EQ(1, chunks.length());
-  fprintf(fp, "data %d\n", chunks[0]);
-  chunks = ser->FinalAllocationChunks(CODE_SPACE);
-  CHECK_EQ(1, chunks.length());
-  fprintf(fp, "code %d\n", chunks[0]);
-  chunks = ser->FinalAllocationChunks(MAP_SPACE);
-  CHECK_EQ(1, chunks.length());
-  fprintf(fp, "map %d\n", chunks[0]);
-  chunks = ser->FinalAllocationChunks(CELL_SPACE);
-  CHECK_EQ(1, chunks.length());
-  fprintf(fp, "cell %d\n", chunks[0]);
-  chunks = ser->FinalAllocationChunks(PROPERTY_CELL_SPACE);
-  CHECK_EQ(1, chunks.length());
-  fprintf(fp, "property cell %d\n", chunks[0]);
-  chunks = ser->FinalAllocationChunks(LO_SPACE);
-  CHECK_EQ(1, chunks.length());
-  fprintf(fp, "lo %d\n", chunks[0]);
-  fclose(fp);
-}
-
-
 static bool WriteToFile(Isolate* isolate, const char* snapshot_file) {
   SnapshotByteSink sink;
   StartupSerializer ser(isolate, &sink);
   ser.Serialize();
-  ser.FinalizeAllocation();
-
-  WritePayload(sink.data(), snapshot_file);
-  WriteSpaceUsed(&ser, snapshot_file);
-
+  SnapshotData snapshot_data(sink, ser);
+  WritePayload(snapshot_data.RawData(), snapshot_file);
   return true;
 }
 
@@ -220,53 +182,14 @@ UNINITIALIZED_TEST(SerializeTwice) {
 //----------------------------------------------------------------------------
 // Tests that the heap can be deserialized.
 
-
-static void ReserveSpaceForSnapshot(Deserializer* deserializer,
-                                    const char* file_name) {
-  int file_name_length = StrLength(file_name) + 10;
-  Vector<char> name = Vector<char>::New(file_name_length + 1);
-  SNPrintF(name, "%s.size", file_name);
-  FILE* fp = v8::base::OS::FOpen(name.start(), "r");
-  name.Dispose();
-  int new_size, pointer_size, data_size, code_size, map_size, cell_size,
-      property_cell_size, lo_size;
-#if V8_CC_MSVC
-  // Avoid warning about unsafe fscanf from MSVC.
-  // Please note that this is only fine if %c and %s are not being used.
-#define fscanf fscanf_s
-#endif
-  CHECK_EQ(1, fscanf(fp, "new %d\n", &new_size));
-  CHECK_EQ(1, fscanf(fp, "pointer %d\n", &pointer_size));
-  CHECK_EQ(1, fscanf(fp, "data %d\n", &data_size));
-  CHECK_EQ(1, fscanf(fp, "code %d\n", &code_size));
-  CHECK_EQ(1, fscanf(fp, "map %d\n", &map_size));
-  CHECK_EQ(1, fscanf(fp, "cell %d\n", &cell_size));
-  CHECK_EQ(1, fscanf(fp, "property cell %d\n", &property_cell_size));
-  CHECK_EQ(1, fscanf(fp, "lo %d\n", &lo_size));
-#if V8_CC_MSVC
-#undef fscanf
-#endif
-  fclose(fp);
-  deserializer->AddReservation(NEW_SPACE, new_size);
-  deserializer->AddReservation(OLD_POINTER_SPACE, pointer_size);
-  deserializer->AddReservation(OLD_DATA_SPACE, data_size);
-  deserializer->AddReservation(CODE_SPACE, code_size);
-  deserializer->AddReservation(MAP_SPACE, map_size);
-  deserializer->AddReservation(CELL_SPACE, cell_size);
-  deserializer->AddReservation(PROPERTY_CELL_SPACE, property_cell_size);
-  deserializer->AddReservation(LO_SPACE, lo_size);
-}
-
-
 v8::Isolate* InitializeFromFile(const char* snapshot_file) {
   int len;
   byte* str = ReadBytes(snapshot_file, &len);
   if (!str) return NULL;
   v8::Isolate* v8_isolate = NULL;
   {
-    SnapshotByteSource source(str, len);
-    Deserializer deserializer(&source);
-    ReserveSpaceForSnapshot(&deserializer, snapshot_file);
+    SnapshotData snapshot_data(Vector<const byte>(str, len));
+    Deserializer deserializer(&snapshot_data);
     Isolate* isolate = Isolate::NewForTesting();
     v8_isolate = reinterpret_cast<v8::Isolate*>(isolate);
     v8::Isolate::Scope isolate_scope(v8_isolate);
@@ -436,14 +359,11 @@ UNINITIALIZED_TEST(PartialSerialization) {
 
       startup_serializer.SerializeWeakReferences();
 
-      partial_serializer.FinalizeAllocation();
-      startup_serializer.FinalizeAllocation();
-
-      WritePayload(partial_sink.data(), FLAG_testing_serialization_file);
-      WritePayload(startup_sink.data(), startup_name.start());
+      SnapshotData startup_snapshot(startup_sink, startup_serializer);
+      SnapshotData partial_snapshot(partial_sink, partial_serializer);
 
-      WriteSpaceUsed(&partial_serializer, FLAG_testing_serialization_file);
-      WriteSpaceUsed(&startup_serializer, startup_name.start());
+      WritePayload(partial_snapshot.RawData(), FLAG_testing_serialization_file);
+      WritePayload(startup_snapshot.RawData(), startup_name.start());
 
       startup_name.Dispose();
     }
@@ -473,9 +393,8 @@ UNINITIALIZED_DEPENDENT_TEST(PartialDeserialization, PartialSerialization) {
       Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
       Object* root;
       {
-        SnapshotByteSource source(snapshot, snapshot_size);
-        Deserializer deserializer(&source);
-        ReserveSpaceForSnapshot(&deserializer, file_name);
+        SnapshotData snapshot_data(Vector<const byte>(snapshot, snapshot_size));
+        Deserializer deserializer(&snapshot_data);
         deserializer.DeserializePartial(isolate, &root);
         CHECK(root->IsString());
       }
@@ -485,9 +404,8 @@ UNINITIALIZED_DEPENDENT_TEST(PartialDeserialization, PartialSerialization) {
 
       Object* root2;
       {
-        SnapshotByteSource source(snapshot, snapshot_size);
-        Deserializer deserializer(&source);
-        ReserveSpaceForSnapshot(&deserializer, file_name);
+        SnapshotData snapshot_data(Vector<const byte>(snapshot, snapshot_size));
+        Deserializer deserializer(&snapshot_data);
         deserializer.DeserializePartial(isolate, &root2);
         CHECK(root2->IsString());
         CHECK(*root_handle == root2);
@@ -552,14 +470,11 @@ UNINITIALIZED_TEST(ContextSerialization) {
       partial_serializer.Serialize(&raw_context);
       startup_serializer.SerializeWeakReferences();
 
-      partial_serializer.FinalizeAllocation();
-      startup_serializer.FinalizeAllocation();
-
-      WritePayload(partial_sink.data(), FLAG_testing_serialization_file);
-      WritePayload(startup_sink.data(), startup_name.start());
+      SnapshotData startup_snapshot(startup_sink, startup_serializer);
+      SnapshotData partial_snapshot(partial_sink, partial_serializer);
 
-      WriteSpaceUsed(&partial_serializer, FLAG_testing_serialization_file);
-      WriteSpaceUsed(&startup_serializer, startup_name.start());
+      WritePayload(partial_snapshot.RawData(), FLAG_testing_serialization_file);
+      WritePayload(startup_snapshot.RawData(), startup_name.start());
 
       startup_name.Dispose();
     }
@@ -588,9 +503,8 @@ UNINITIALIZED_DEPENDENT_TEST(ContextDeserialization, ContextSerialization) {
       Isolate* isolate = reinterpret_cast<Isolate*>(v8_isolate);
       Object* root;
       {
-        SnapshotByteSource source(snapshot, snapshot_size);
-        Deserializer deserializer(&source);
-        ReserveSpaceForSnapshot(&deserializer, file_name);
+        SnapshotData snapshot_data(Vector<const byte>(snapshot, snapshot_size));
+        Deserializer deserializer(&snapshot_data);
         deserializer.DeserializePartial(isolate, &root);
         CHECK(root->IsContext());
       }
@@ -600,9 +514,8 @@ UNINITIALIZED_DEPENDENT_TEST(ContextDeserialization, ContextSerialization) {
 
       Object* root2;
       {
-        SnapshotByteSource source(snapshot, snapshot_size);
-        Deserializer deserializer(&source);
-        ReserveSpaceForSnapshot(&deserializer, file_name);
+        SnapshotData snapshot_data(Vector<const byte>(snapshot, snapshot_size));
+        Deserializer deserializer(&snapshot_data);
         deserializer.DeserializePartial(isolate, &root2);
         CHECK(root2->IsContext());
         CHECK(*root_handle != root2);
@@ -946,12 +859,15 @@ TEST(SerializeToplevelThreeBigStrings) {
   CHECK_EQ(600000 + 700000, CompileRun("(a + b).length")->Int32Value());
   CHECK_EQ(500000 + 600000, CompileRun("(b + c).length")->Int32Value());
   Heap* heap = isolate->heap();
-  CHECK(heap->InSpace(*v8::Utils::OpenHandle(*CompileRun("a")->ToString()),
-                      OLD_DATA_SPACE));
-  CHECK(heap->InSpace(*v8::Utils::OpenHandle(*CompileRun("b")->ToString()),
-                      OLD_DATA_SPACE));
-  CHECK(heap->InSpace(*v8::Utils::OpenHandle(*CompileRun("c")->ToString()),
-                      OLD_DATA_SPACE));
+  CHECK(heap->InSpace(
+      *v8::Utils::OpenHandle(*CompileRun("a")->ToString(CcTest::isolate())),
+      OLD_DATA_SPACE));
+  CHECK(heap->InSpace(
+      *v8::Utils::OpenHandle(*CompileRun("b")->ToString(CcTest::isolate())),
+      OLD_DATA_SPACE));
+  CHECK(heap->InSpace(
+      *v8::Utils::OpenHandle(*CompileRun("c")->ToString(CcTest::isolate())),
+      OLD_DATA_SPACE));
 
   delete cache;
   source_a.Dispose();
@@ -1208,7 +1124,7 @@ TEST(SerializeToplevelIsolates) {
         buffer, data->length, v8::ScriptCompiler::CachedData::BufferOwned);
 
     v8::Local<v8::Value> result = script->BindToCurrentContext()->Run();
-    CHECK(result->ToString()->Equals(v8_str("abcdef")));
+    CHECK(result->ToString(isolate1)->Equals(v8_str("abcdef")));
   }
   isolate1->Dispose();
 
@@ -1231,15 +1147,16 @@ TEST(SerializeToplevelIsolates) {
       script = v8::ScriptCompiler::CompileUnbound(
           isolate2, &source, v8::ScriptCompiler::kConsumeCodeCache);
     }
+    CHECK(!cache->rejected);
     v8::Local<v8::Value> result = script->BindToCurrentContext()->Run();
-    CHECK(result->ToString()->Equals(v8_str("abcdef")));
+    CHECK(result->ToString(isolate2)->Equals(v8_str("abcdef")));
   }
   DCHECK(toplevel_test_code_event_found);
   isolate2->Dispose();
 }
 
 
-TEST(Bug3628) {
+TEST(SerializeWithHarmonyScoping) {
   FLAG_serialize_toplevel = true;
   FLAG_harmony_scoping = true;
 
@@ -1273,7 +1190,7 @@ TEST(Bug3628) {
         buffer, data->length, v8::ScriptCompiler::CachedData::BufferOwned);
 
     v8::Local<v8::Value> result = script->BindToCurrentContext()->Run();
-    CHECK(result->ToString()->Equals(v8_str("XY")));
+    CHECK(result->ToString(isolate1)->Equals(v8_str("XY")));
   }
   isolate1->Dispose();
 
@@ -1298,7 +1215,7 @@ TEST(Bug3628) {
           isolate2, &source, v8::ScriptCompiler::kConsumeCodeCache);
     }
     v8::Local<v8::Value> result = script->BindToCurrentContext()->Run();
-    CHECK(result->ToString()->Equals(v8_str("XY")));
+    CHECK(result->ToString(isolate2)->Equals(v8_str("XY")));
   }
   isolate2->Dispose();
 }
index 9ad4423..a84b867 100644 (file)
@@ -216,13 +216,14 @@ TEST(Regress3540) {
   if (!code_range->SetUp(
           code_range_size +
           RoundUp(v8::base::OS::CommitPageSize() * kReservedCodeRangePages,
-                  MemoryChunk::kAlignment))) {
+                  MemoryChunk::kAlignment) +
+          v8::internal::MemoryAllocator::CodePageAreaSize())) {
     return;
   }
   Address address;
   size_t size;
-  address = code_range->AllocateRawMemory(code_range_size - MB,
-                                          code_range_size - MB, &size);
+  address = code_range->AllocateRawMemory(code_range_size - 2 * MB,
+                                          code_range_size - 2 * MB, &size);
   CHECK(address != NULL);
   Address null_address;
   size_t null_size;
index e01c767..d1f23f7 100644 (file)
@@ -1020,11 +1020,13 @@ TEST(JSONStringifySliceMadeExternal) {
   // into a two-byte external string.  Check that JSON.stringify works.
   v8::HandleScope handle_scope(CcTest::isolate());
   v8::Handle<v8::String> underlying =
-      CompileRun("var underlying = 'abcdefghijklmnopqrstuvwxyz';"
-                 "underlying")->ToString();
-  v8::Handle<v8::String> slice =
-      CompileRun("var slice = underlying.slice(1);"
-                 "slice")->ToString();
+      CompileRun(
+          "var underlying = 'abcdefghijklmnopqrstuvwxyz';"
+          "underlying")->ToString(CcTest::isolate());
+  v8::Handle<v8::String> slice = CompileRun(
+                                     "var slice = '';"
+                                     "slice = underlying.slice(1);"
+                                     "slice")->ToString(CcTest::isolate());
   CHECK(v8::Utils::OpenHandle(*slice)->IsSlicedString());
   CHECK(v8::Utils::OpenHandle(*underlying)->IsSeqOneByteString());
 
@@ -1082,7 +1084,7 @@ TEST(CachedHashOverflow) {
     CHECK_EQ(results[i]->IsNumber(), result->IsNumber());
     if (result->IsNumber()) {
       CHECK_EQ(Object::ToSmi(isolate, results[i]).ToHandleChecked()->value(),
-               result->ToInt32()->Value());
+               result->ToInt32(CcTest::isolate())->Value());
     }
   }
 }
@@ -1182,7 +1184,7 @@ TEST(SliceFromSlice) {
   v8::Local<v8::Value> result;
   Handle<String> string;
   const char* init = "var str = 'abcdefghijklmnopqrstuvwxyz';";
-  const char* slice = "var slice = str.slice(1,-1); slice";
+  const char* slice = "var slice = ''; slice = str.slice(1,-1); slice";
   const char* slice_from_slice = "slice.slice(1,-1);";
 
   CompileRun(init);
@@ -1313,7 +1315,7 @@ TEST(CountBreakIterator) {
       "  return iterator.next();"
       "})();");
   CHECK(result->IsNumber());
-  int uses = result->ToInt32()->Value() == 0 ? 0 : 1;
+  int uses = result->ToInt32(CcTest::isolate())->Value() == 0 ? 0 : 1;
   CHECK_EQ(uses, use_counts[v8::Isolate::kBreakIterator]);
   // Make sure GC cleans up the break iterator, so we don't get a memory leak
   // reported by ASAN.
index 94e230c..6bcdb35 100644 (file)
@@ -30,6 +30,24 @@ static void ConnectTransition(Handle<Map> parent,
 }
 
 
+static void CheckPropertyDetailsFieldsConsistency(PropertyType type,
+                                                  PropertyKind kind,
+                                                  PropertyLocation location) {
+  int type_value = PropertyDetails::TypeField::encode(type);
+  int kind_location_value = PropertyDetails::KindField::encode(kind) |
+                            PropertyDetails::LocationField::encode(location);
+  CHECK_EQ(type_value, kind_location_value);
+}
+
+
+TEST(PropertyDetailsFieldsConsistency) {
+  CheckPropertyDetailsFieldsConsistency(FIELD, DATA, IN_OBJECT);
+  CheckPropertyDetailsFieldsConsistency(CONSTANT, DATA, IN_DESCRIPTOR);
+  CheckPropertyDetailsFieldsConsistency(ACCESSOR_FIELD, ACCESSOR, IN_OBJECT);
+  CheckPropertyDetailsFieldsConsistency(CALLBACKS, ACCESSOR, IN_DESCRIPTOR);
+}
+
+
 TEST(TransitionArray_SimpleFieldTransitions) {
   CcTest::InitializeVM();
   v8::HandleScope scope(CcTest::isolate());
@@ -59,7 +77,7 @@ TEST(TransitionArray_SimpleFieldTransitions) {
       transitions->Insert(map0, name1, map1, SIMPLE_PROPERTY_TRANSITION);
   ConnectTransition(map0, transitions, map1);
   CHECK(transitions->IsSimpleTransition());
-  transition = transitions->Search(FIELD, *name1, attributes);
+  transition = transitions->Search(DATA, *name1, attributes);
   CHECK_EQ(TransitionArray::kSimpleTransitionIndex, transition);
   CHECK_EQ(*name1, transitions->GetKey(transition));
   CHECK_EQ(*map1, transitions->GetTarget(transition));
@@ -69,11 +87,11 @@ TEST(TransitionArray_SimpleFieldTransitions) {
   ConnectTransition(map0, transitions, map2);
   CHECK(transitions->IsFullTransitionArray());
 
-  transition = transitions->Search(FIELD, *name1, attributes);
+  transition = transitions->Search(DATA, *name1, attributes);
   CHECK_EQ(*name1, transitions->GetKey(transition));
   CHECK_EQ(*map1, transitions->GetTarget(transition));
 
-  transition = transitions->Search(FIELD, *name2, attributes);
+  transition = transitions->Search(DATA, *name2, attributes);
   CHECK_EQ(*name2, transitions->GetKey(transition));
   CHECK_EQ(*map2, transitions->GetTarget(transition));
 
@@ -109,7 +127,7 @@ TEST(TransitionArray_FullFieldTransitions) {
   transitions = transitions->Insert(map0, name1, map1, PROPERTY_TRANSITION);
   ConnectTransition(map0, transitions, map1);
   CHECK(transitions->IsFullTransitionArray());
-  transition = transitions->Search(FIELD, *name1, attributes);
+  transition = transitions->Search(DATA, *name1, attributes);
   CHECK_EQ(*name1, transitions->GetKey(transition));
   CHECK_EQ(*map1, transitions->GetTarget(transition));
 
@@ -117,11 +135,11 @@ TEST(TransitionArray_FullFieldTransitions) {
   ConnectTransition(map0, transitions, map2);
   CHECK(transitions->IsFullTransitionArray());
 
-  transition = transitions->Search(FIELD, *name1, attributes);
+  transition = transitions->Search(DATA, *name1, attributes);
   CHECK_EQ(*name1, transitions->GetKey(transition));
   CHECK_EQ(*map1, transitions->GetTarget(transition));
 
-  transition = transitions->Search(FIELD, *name2, attributes);
+  transition = transitions->Search(DATA, *name2, attributes);
   CHECK_EQ(*name2, transitions->GetKey(transition));
   CHECK_EQ(*map2, transitions->GetTarget(transition));
 
@@ -161,7 +179,7 @@ TEST(TransitionArray_DifferentFieldNames) {
   }
 
   for (int i = 0; i < PROPS_COUNT; i++) {
-    int transition = transitions->Search(FIELD, *names[i], attributes);
+    int transition = transitions->Search(DATA, *names[i], attributes);
     CHECK_EQ(*names[i], transitions->GetKey(transition));
     CHECK_EQ(*maps[i], transitions->GetTarget(transition));
   }
@@ -204,7 +222,7 @@ TEST(TransitionArray_SameFieldNamesDifferentAttributesSimple) {
   for (int i = 0; i < ATTRS_COUNT; i++) {
     PropertyAttributes attributes = static_cast<PropertyAttributes>(i);
 
-    int transition = transitions->Search(FIELD, *name, attributes);
+    int transition = transitions->Search(DATA, *name, attributes);
     CHECK_EQ(*name, transitions->GetKey(transition));
     CHECK_EQ(*attr_maps[i], transitions->GetTarget(transition));
   }
@@ -267,14 +285,14 @@ TEST(TransitionArray_SameFieldNamesDifferentAttributes) {
   for (int i = 0; i < ATTRS_COUNT; i++) {
     PropertyAttributes attributes = static_cast<PropertyAttributes>(i);
 
-    int transition = transitions->Search(FIELD, *name, attributes);
+    int transition = transitions->Search(DATA, *name, attributes);
     CHECK_EQ(*name, transitions->GetKey(transition));
     CHECK_EQ(*attr_maps[i], transitions->GetTarget(transition));
   }
 
   // Ensure that info about the other fields still valid.
   for (int i = 0; i < PROPS_COUNT; i++) {
-    int transition = transitions->Search(FIELD, *names[i], NONE);
+    int transition = transitions->Search(DATA, *names[i], NONE);
     CHECK_EQ(*names[i], transitions->GetKey(transition));
     CHECK_EQ(*maps[i], transitions->GetTarget(transition));
   }
index e564c6c..ebef527 100644 (file)
@@ -294,39 +294,55 @@ struct Tests : Rep {
     CHECK(T.Constant(fac->NewNumber(0))->Is(T.UnsignedSmall));
     CHECK(T.Constant(fac->NewNumber(1))->Is(T.UnsignedSmall));
     CHECK(T.Constant(fac->NewNumber(0x3fffffff))->Is(T.UnsignedSmall));
-    CHECK(T.Constant(fac->NewNumber(-1))->Is(T.OtherSignedSmall));
-    CHECK(T.Constant(fac->NewNumber(-0x3fffffff))->Is(T.OtherSignedSmall));
-    CHECK(T.Constant(fac->NewNumber(-0x40000000))->Is(T.OtherSignedSmall));
+    CHECK(T.Constant(fac->NewNumber(-1))->Is(T.NegativeSignedSmall));
+    CHECK(T.Constant(fac->NewNumber(-0x3fffffff))->Is(T.NegativeSignedSmall));
+    CHECK(T.Constant(fac->NewNumber(-0x40000000))->Is(T.NegativeSignedSmall));
     if (SmiValuesAre31Bits()) {
-      CHECK(T.Constant(fac->NewNumber(0x40000000))->Is(T.OtherUnsigned31));
-      CHECK(T.Constant(fac->NewNumber(0x7fffffff))->Is(T.OtherUnsigned31));
-      CHECK(T.Constant(fac->NewNumber(-0x40000001))->Is(T.OtherSigned32));
-      CHECK(T.Constant(fac->NewNumber(-0x7fffffff))->Is(T.OtherSigned32));
-      CHECK(T.Constant(fac->NewNumber(-0x7fffffff-1))->Is(T.OtherSigned32));
+      CHECK(T.Constant(fac->NewNumber(0x40000000))->Is(T.NonNegativeSigned32));
+      CHECK(!T.Constant(fac->NewNumber(0x40000000))->Is(T.UnsignedSmall));
+      CHECK(T.Constant(fac->NewNumber(0x7fffffff))->Is(T.NonNegativeSigned32));
+      CHECK(!T.Constant(fac->NewNumber(0x7fffffff))->Is(T.UnsignedSmall));
+      CHECK(T.Constant(fac->NewNumber(-0x40000001))->Is(T.NegativeSigned32));
+      CHECK(
+          !T.Constant(fac->NewNumber(-0x40000001))->Is(T.NegativeSignedSmall));
+      CHECK(T.Constant(fac->NewNumber(-0x7fffffff))->Is(T.NegativeSigned32));
+      CHECK(!T.Constant(fac->NewNumber(-0x7fffffff - 1))
+                 ->Is(T.NegativeSignedSmall));
     } else {
       CHECK(SmiValuesAre32Bits());
       CHECK(T.Constant(fac->NewNumber(0x40000000))->Is(T.UnsignedSmall));
       CHECK(T.Constant(fac->NewNumber(0x7fffffff))->Is(T.UnsignedSmall));
-      CHECK(!T.Constant(fac->NewNumber(0x40000000))->Is(T.OtherUnsigned31));
-      CHECK(!T.Constant(fac->NewNumber(0x7fffffff))->Is(T.OtherUnsigned31));
-      CHECK(T.Constant(fac->NewNumber(-0x40000001))->Is(T.OtherSignedSmall));
-      CHECK(T.Constant(fac->NewNumber(-0x7fffffff))->Is(T.OtherSignedSmall));
-      CHECK(T.Constant(fac->NewNumber(-0x7fffffff-1))->Is(T.OtherSignedSmall));
-      CHECK(!T.Constant(fac->NewNumber(-0x40000001))->Is(T.OtherSigned32));
-      CHECK(!T.Constant(fac->NewNumber(-0x7fffffff))->Is(T.OtherSigned32));
-      CHECK(!T.Constant(fac->NewNumber(-0x7fffffff-1))->Is(T.OtherSigned32));
-    }
-    CHECK(T.Constant(fac->NewNumber(0x80000000u))->Is(T.OtherUnsigned32));
-    CHECK(T.Constant(fac->NewNumber(0xffffffffu))->Is(T.OtherUnsigned32));
-    CHECK(T.Constant(fac->NewNumber(0xffffffffu+1.0))->Is(T.OtherNumber));
-    CHECK(T.Constant(fac->NewNumber(-0x7fffffff-2.0))->Is(T.OtherNumber));
-    CHECK(T.Constant(fac->NewNumber(0.1))->Is(T.OtherNumber));
-    CHECK(T.Constant(fac->NewNumber(-10.1))->Is(T.OtherNumber));
-    CHECK(T.Constant(fac->NewNumber(10e60))->Is(T.OtherNumber));
+      CHECK(T.Constant(fac->NewNumber(0x40000000))->Is(T.NonNegativeSigned32));
+      CHECK(T.Constant(fac->NewNumber(0x7fffffff))->Is(T.NonNegativeSigned32));
+      CHECK(T.Constant(fac->NewNumber(-0x40000001))->Is(T.NegativeSignedSmall));
+      CHECK(T.Constant(fac->NewNumber(-0x7fffffff))->Is(T.NegativeSignedSmall));
+      CHECK(T.Constant(fac->NewNumber(-0x7fffffff - 1))
+                ->Is(T.NegativeSignedSmall));
+      CHECK(T.Constant(fac->NewNumber(-0x40000001))->Is(T.NegativeSigned32));
+      CHECK(T.Constant(fac->NewNumber(-0x7fffffff))->Is(T.NegativeSigned32));
+      CHECK(
+          T.Constant(fac->NewNumber(-0x7fffffff - 1))->Is(T.NegativeSigned32));
+    }
+    CHECK(T.Constant(fac->NewNumber(0x80000000u))->Is(T.Unsigned32));
+    CHECK(!T.Constant(fac->NewNumber(0x80000000u))->Is(T.NonNegativeSigned32));
+    CHECK(T.Constant(fac->NewNumber(0xffffffffu))->Is(T.Unsigned32));
+    CHECK(!T.Constant(fac->NewNumber(0xffffffffu))->Is(T.NonNegativeSigned32));
+    CHECK(T.Constant(fac->NewNumber(0xffffffffu + 1.0))->Is(T.PlainNumber));
+    CHECK(!T.Constant(fac->NewNumber(0xffffffffu + 1.0))->Is(T.Integral32));
+    CHECK(T.Constant(fac->NewNumber(-0x7fffffff - 2.0))->Is(T.PlainNumber));
+    CHECK(!T.Constant(fac->NewNumber(-0x7fffffff - 2.0))->Is(T.Integral32));
+    CHECK(T.Constant(fac->NewNumber(0.1))->Is(T.PlainNumber));
+    CHECK(!T.Constant(fac->NewNumber(0.1))->Is(T.Integral32));
+    CHECK(T.Constant(fac->NewNumber(-10.1))->Is(T.PlainNumber));
+    CHECK(!T.Constant(fac->NewNumber(-10.1))->Is(T.Integral32));
+    CHECK(T.Constant(fac->NewNumber(10e60))->Is(T.PlainNumber));
+    CHECK(!T.Constant(fac->NewNumber(10e60))->Is(T.Integral32));
     CHECK(T.Constant(fac->NewNumber(-1.0*0.0))->Is(T.MinusZero));
     CHECK(T.Constant(fac->NewNumber(v8::base::OS::nan_value()))->Is(T.NaN));
-    CHECK(T.Constant(fac->NewNumber(V8_INFINITY))->Is(T.OtherNumber));
-    CHECK(T.Constant(fac->NewNumber(-V8_INFINITY))->Is(T.OtherNumber));
+    CHECK(T.Constant(fac->NewNumber(V8_INFINITY))->Is(T.PlainNumber));
+    CHECK(!T.Constant(fac->NewNumber(V8_INFINITY))->Is(T.Integral32));
+    CHECK(T.Constant(fac->NewNumber(-V8_INFINITY))->Is(T.PlainNumber));
+    CHECK(!T.Constant(fac->NewNumber(-V8_INFINITY))->Is(T.Integral32));
   }
 
   void Range() {
@@ -904,7 +920,7 @@ struct Tests : Rep {
       if (type->IsRange()) {
         TypeHandle lub = Rep::BitsetType::New(
             Rep::BitsetType::Lub(type), T.region());
-        CHECK(lub->Is(T.Union(T.Integral32, T.OtherNumber)));
+        CHECK(lub->Is(T.PlainNumber));
       }
     }
 
@@ -934,10 +950,8 @@ struct Tests : Rep {
 
     CheckSub(T.Object, T.Receiver);
     CheckSub(T.Array, T.Object);
-    CheckSub(T.Function, T.Object);
     CheckSub(T.Proxy, T.Receiver);
     CheckUnordered(T.Object, T.Proxy);
-    CheckUnordered(T.Array, T.Function);
 
 
     // Subtyping between concrete structural types
@@ -973,7 +987,7 @@ struct Tests : Rep {
     CheckSub(T.NumberArray, T.Object);
     CheckUnordered(T.StringArray, T.AnyArray);
 
-    CheckSub(T.MethodFunction, T.Function);
+    CheckSub(T.MethodFunction, T.Object);
     CheckSub(T.NumberFunction1, T.Object);
     CheckUnordered(T.SignedFunction1, T.NumberFunction1);
     CheckUnordered(T.NumberFunction1, T.NumberFunction2);
@@ -1259,10 +1273,8 @@ struct Tests : Rep {
     CheckDisjoint(T.InternalizedString, T.Symbol);
     CheckOverlap(T.Object, T.Receiver);
     CheckOverlap(T.Array, T.Object);
-    CheckOverlap(T.Function, T.Object);
     CheckOverlap(T.Proxy, T.Receiver);
     CheckDisjoint(T.Object, T.Proxy);
-    CheckDisjoint(T.Array, T.Function);
 
     // Structural types
     CheckOverlap(T.ObjectClass, T.Object);
@@ -1286,7 +1298,7 @@ struct Tests : Rep {
     CheckOverlap(T.NumberArray, T.Array);
     CheckDisjoint(T.NumberArray, T.AnyArray);
     CheckDisjoint(T.NumberArray, T.StringArray);
-    CheckOverlap(T.MethodFunction, T.Function);
+    CheckOverlap(T.MethodFunction, T.Object);
     CheckDisjoint(T.SignedFunction1, T.NumberFunction1);
     CheckDisjoint(T.SignedFunction1, T.NumberFunction2);
     CheckDisjoint(T.NumberFunction1, T.NumberFunction2);
@@ -1456,11 +1468,11 @@ struct Tests : Rep {
     CheckDisjoint(T.Union(T.NumberArray, T.String), T.Number);
 
     // Bitset-function
-    CHECK(this->IsBitset(T.Union(T.MethodFunction, T.Function)));
+    CHECK(this->IsBitset(T.Union(T.MethodFunction, T.Object)));
     CHECK(this->IsUnion(T.Union(T.NumberFunction1, T.Number)));
 
-    CheckEqual(T.Union(T.MethodFunction, T.Function), T.Function);
-    CheckUnordered(T.Union(T.NumberFunction1, T.String), T.Function);
+    CheckEqual(T.Union(T.MethodFunction, T.Object), T.Object);
+    CheckUnordered(T.Union(T.NumberFunction1, T.String), T.Object);
     CheckOverlap(T.Union(T.NumberFunction2, T.String), T.Object);
     CheckDisjoint(T.Union(T.NumberFunction1, T.String), T.Number);
 
@@ -1528,7 +1540,7 @@ struct Tests : Rep {
     CheckEqual(
         T.Union(T.NumberFunction1, T.NumberFunction2),
         T.Union(T.NumberFunction2, T.NumberFunction1));
-    CheckSub(T.Union(T.SignedFunction1, T.MethodFunction), T.Function);
+    CheckSub(T.Union(T.SignedFunction1, T.MethodFunction), T.Object);
 
     // Union-union
     CheckEqual(
@@ -1689,11 +1701,11 @@ struct Tests : Rep {
 
     // Bitset-array
     CheckEqual(T.Intersect(T.NumberArray, T.Object), T.NumberArray);
-    CheckEqual(T.Intersect(T.AnyArray, T.Function), T.None);
+    CheckEqual(T.Intersect(T.AnyArray, T.Proxy), T.None);
 
     // Bitset-function
     CheckEqual(T.Intersect(T.MethodFunction, T.Object), T.MethodFunction);
-    CheckEqual(T.Intersect(T.NumberFunction1, T.Array), T.None);
+    CheckEqual(T.Intersect(T.NumberFunction1, T.Proxy), T.None);
 
     // Bitset-union
     CheckEqual(
@@ -1831,6 +1843,32 @@ struct Tests : Rep {
     */
   }
 
+  void GetRange() {
+    // GetRange(Range(a, b)) = Range(a, b).
+    for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
+      TypeHandle type1 = *it1;
+      if (type1->IsRange()) {
+        typename Type::RangeType* range = type1->GetRange();
+        CHECK(type1->Min() == range->Min()->Number());
+        CHECK(type1->Max() == range->Max()->Number());
+      }
+    }
+
+    // GetRange(Union(Constant(x), Range(min,max))) == Range(min, max).
+    for (TypeIterator it1 = T.types.begin(); it1 != T.types.end(); ++it1) {
+      for (TypeIterator it2 = T.types.begin(); it2 != T.types.end(); ++it2) {
+        TypeHandle type1 = *it1;
+        TypeHandle type2 = *it2;
+        if (type1->IsConstant() && type2->IsRange()) {
+          TypeHandle u = T.Union(type1, type2);
+
+          CHECK(type2->Min() == u->GetRange()->Min()->Number());
+          CHECK(type2->Max() == u->GetRange()->Max()->Number());
+        }
+      }
+    }
+  }
+
   template<class Type2, class TypeHandle2, class Region2, class Rep2>
   void Convert() {
     Types<Type2, TypeHandle2, Region2> T2(
@@ -2030,6 +2068,13 @@ TEST(Distributivity) {
 }
 
 
+TEST(GetRange) {
+  CcTest::InitializeVM();
+  ZoneTests().GetRange();
+  HeapTests().GetRange();
+}
+
+
 TEST(Convert) {
   CcTest::InitializeVM();
   ZoneTests().Convert<HeapType, Handle<HeapType>, Isolate, HeapRep>();
diff --git a/deps/v8/test/cctest/test-unboxed-doubles.cc b/deps/v8/test/cctest/test-unboxed-doubles.cc
new file mode 100644 (file)
index 0000000..a12bf47
--- /dev/null
@@ -0,0 +1,1162 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <stdlib.h>
+#include <utility>
+
+#include "src/v8.h"
+
+#include "src/compilation-cache.h"
+#include "src/execution.h"
+#include "src/factory.h"
+#include "src/global-handles.h"
+#include "src/ic/ic.h"
+#include "src/macro-assembler.h"
+#include "test/cctest/cctest.h"
+
+using namespace v8::base;
+using namespace v8::internal;
+
+#if (V8_DOUBLE_FIELDS_UNBOXING)
+
+
+static double GetDoubleFieldValue(JSObject* obj, FieldIndex field_index) {
+  if (obj->IsUnboxedDoubleField(field_index)) {
+    return obj->RawFastDoublePropertyAt(field_index);
+  } else {
+    Object* value = obj->RawFastPropertyAt(field_index);
+    DCHECK(value->IsMutableHeapNumber());
+    return HeapNumber::cast(value)->value();
+  }
+}
+
+const int kNumberOfBits = 32;
+
+
+enum TestPropertyKind {
+  PROP_CONSTANT,
+  PROP_SMI,
+  PROP_DOUBLE,
+  PROP_TAGGED,
+  PROP_KIND_NUMBER
+};
+
+static Representation representations[PROP_KIND_NUMBER] = {
+    Representation::None(), Representation::Smi(), Representation::Double(),
+    Representation::Tagged()};
+
+
+static Handle<DescriptorArray> CreateDescriptorArray(Isolate* isolate,
+                                                     TestPropertyKind* props,
+                                                     int kPropsCount) {
+  Factory* factory = isolate->factory();
+
+  Handle<String> func_name = factory->InternalizeUtf8String("func");
+  Handle<JSFunction> func = factory->NewFunction(func_name);
+
+  Handle<DescriptorArray> descriptors =
+      DescriptorArray::Allocate(isolate, 0, kPropsCount);
+
+  int next_field_offset = 0;
+  for (int i = 0; i < kPropsCount; i++) {
+    EmbeddedVector<char, 64> buffer;
+    SNPrintF(buffer, "prop%d", i);
+    Handle<String> name = factory->InternalizeUtf8String(buffer.start());
+
+    TestPropertyKind kind = props[i];
+
+    if (kind == PROP_CONSTANT) {
+      ConstantDescriptor d(name, func, NONE);
+      descriptors->Append(&d);
+
+    } else {
+      FieldDescriptor f(name, next_field_offset, NONE, representations[kind]);
+      next_field_offset += f.GetDetails().field_width_in_words();
+      descriptors->Append(&f);
+    }
+  }
+  return descriptors;
+}
+
+
+TEST(LayoutDescriptorBasicFast) {
+  CcTest::InitializeVM();
+  v8::HandleScope scope(CcTest::isolate());
+
+  LayoutDescriptor* layout_desc = LayoutDescriptor::FastPointerLayout();
+
+  CHECK(!layout_desc->IsSlowLayout());
+  CHECK(layout_desc->IsFastPointerLayout());
+  CHECK_EQ(kSmiValueSize, layout_desc->capacity());
+
+  for (int i = 0; i < kSmiValueSize + 13; i++) {
+    CHECK_EQ(true, layout_desc->IsTagged(i));
+  }
+  CHECK_EQ(true, layout_desc->IsTagged(-1));
+  CHECK_EQ(true, layout_desc->IsTagged(-12347));
+  CHECK_EQ(true, layout_desc->IsTagged(15635));
+  CHECK(layout_desc->IsFastPointerLayout());
+
+  for (int i = 0; i < kSmiValueSize; i++) {
+    layout_desc = layout_desc->SetTaggedForTesting(i, false);
+    CHECK_EQ(false, layout_desc->IsTagged(i));
+    layout_desc = layout_desc->SetTaggedForTesting(i, true);
+    CHECK_EQ(true, layout_desc->IsTagged(i));
+  }
+  CHECK(layout_desc->IsFastPointerLayout());
+
+  int sequence_length;
+  CHECK_EQ(true, layout_desc->IsTagged(0, std::numeric_limits<int>::max(),
+                                       &sequence_length));
+  CHECK_EQ(std::numeric_limits<int>::max(), sequence_length);
+
+  CHECK_EQ(true, layout_desc->IsTagged(0, 7, &sequence_length));
+  CHECK_EQ(7, sequence_length);
+}
+
+
+TEST(LayoutDescriptorBasicSlow) {
+  CcTest::InitializeVM();
+  Isolate* isolate = CcTest::i_isolate();
+  v8::HandleScope scope(CcTest::isolate());
+
+  Handle<LayoutDescriptor> layout_descriptor;
+  const int kPropsCount = kSmiValueSize * 3;
+  TestPropertyKind props[kPropsCount];
+  for (int i = 0; i < kPropsCount; i++) {
+    // All properties tagged.
+    props[i] = PROP_TAGGED;
+  }
+
+  {
+    Handle<DescriptorArray> descriptors =
+        CreateDescriptorArray(isolate, props, kPropsCount);
+
+    Handle<Map> map = Map::Create(isolate, kPropsCount);
+
+    layout_descriptor = LayoutDescriptor::New(map, descriptors, kPropsCount);
+    CHECK_EQ(LayoutDescriptor::FastPointerLayout(), *layout_descriptor);
+    CHECK_EQ(kSmiValueSize, layout_descriptor->capacity());
+    map->InitializeDescriptors(*descriptors, *layout_descriptor);
+    DCHECK(layout_descriptor->IsConsistentWithMap(*map));
+  }
+
+  props[0] = PROP_DOUBLE;
+  props[kPropsCount - 1] = PROP_DOUBLE;
+
+  Handle<DescriptorArray> descriptors =
+      CreateDescriptorArray(isolate, props, kPropsCount);
+
+  {
+    int inobject_properties = kPropsCount - 1;
+    Handle<Map> map = Map::Create(isolate, inobject_properties);
+
+    // Should be fast as the only double property is the first one.
+    layout_descriptor = LayoutDescriptor::New(map, descriptors, kPropsCount);
+    CHECK_NE(LayoutDescriptor::FastPointerLayout(), *layout_descriptor);
+    CHECK(!layout_descriptor->IsSlowLayout());
+    CHECK(!layout_descriptor->IsFastPointerLayout());
+
+    CHECK_EQ(false, layout_descriptor->IsTagged(0));
+    for (int i = 1; i < kPropsCount; i++) {
+      CHECK_EQ(true, layout_descriptor->IsTagged(i));
+    }
+    map->InitializeDescriptors(*descriptors, *layout_descriptor);
+    DCHECK(layout_descriptor->IsConsistentWithMap(*map));
+  }
+
+  {
+    int inobject_properties = kPropsCount;
+    Handle<Map> map = Map::Create(isolate, inobject_properties);
+
+    layout_descriptor = LayoutDescriptor::New(map, descriptors, kPropsCount);
+    CHECK_NE(LayoutDescriptor::FastPointerLayout(), *layout_descriptor);
+    CHECK(layout_descriptor->IsSlowLayout());
+    CHECK(!layout_descriptor->IsFastPointerLayout());
+    CHECK(layout_descriptor->capacity() > kSmiValueSize);
+
+    CHECK_EQ(false, layout_descriptor->IsTagged(0));
+    CHECK_EQ(false, layout_descriptor->IsTagged(kPropsCount - 1));
+    for (int i = 1; i < kPropsCount - 1; i++) {
+      CHECK_EQ(true, layout_descriptor->IsTagged(i));
+    }
+
+    map->InitializeDescriptors(*descriptors, *layout_descriptor);
+    DCHECK(layout_descriptor->IsConsistentWithMap(*map));
+
+    // Here we have truly slow layout descriptor, so play with the bits.
+    CHECK_EQ(true, layout_descriptor->IsTagged(-1));
+    CHECK_EQ(true, layout_descriptor->IsTagged(-12347));
+    CHECK_EQ(true, layout_descriptor->IsTagged(15635));
+
+    LayoutDescriptor* layout_desc = *layout_descriptor;
+    // Play with the bits but leave it in consistent state with map at the end.
+    for (int i = 1; i < kPropsCount - 1; i++) {
+      layout_desc = layout_desc->SetTaggedForTesting(i, false);
+      CHECK_EQ(false, layout_desc->IsTagged(i));
+      layout_desc = layout_desc->SetTaggedForTesting(i, true);
+      CHECK_EQ(true, layout_desc->IsTagged(i));
+    }
+    CHECK(layout_desc->IsSlowLayout());
+    CHECK(!layout_desc->IsFastPointerLayout());
+
+    DCHECK(layout_descriptor->IsConsistentWithMap(*map));
+  }
+}
+
+
+static void TestLayoutDescriptorQueries(int layout_descriptor_length,
+                                        int* bit_flip_positions,
+                                        int max_sequence_length) {
+  Handle<LayoutDescriptor> layout_descriptor = LayoutDescriptor::NewForTesting(
+      CcTest::i_isolate(), layout_descriptor_length);
+  layout_descriptor_length = layout_descriptor->capacity();
+  LayoutDescriptor* layout_desc = *layout_descriptor;
+
+  {
+    // Fill in the layout descriptor.
+    int cur_bit_flip_index = 0;
+    bool tagged = true;
+    for (int i = 0; i < layout_descriptor_length; i++) {
+      if (i == bit_flip_positions[cur_bit_flip_index]) {
+        tagged = !tagged;
+        ++cur_bit_flip_index;
+        CHECK(i < bit_flip_positions[cur_bit_flip_index]);  // check test data
+      }
+      layout_desc = layout_desc->SetTaggedForTesting(i, tagged);
+    }
+  }
+
+  if (layout_desc->IsFastPointerLayout()) {
+    return;
+  }
+
+  {
+    // Check queries.
+    int cur_bit_flip_index = 0;
+    bool tagged = true;
+    for (int i = 0; i < layout_descriptor_length; i++) {
+      if (i == bit_flip_positions[cur_bit_flip_index]) {
+        tagged = !tagged;
+        ++cur_bit_flip_index;
+      }
+      CHECK_EQ(tagged, layout_desc->IsTagged(i));
+
+      int next_bit_flip_position = bit_flip_positions[cur_bit_flip_index];
+      int expected_sequence_length;
+      if (next_bit_flip_position < layout_desc->capacity()) {
+        expected_sequence_length = next_bit_flip_position - i;
+      } else {
+        expected_sequence_length = tagged ? std::numeric_limits<int>::max()
+                                          : (layout_desc->capacity() - i);
+      }
+      expected_sequence_length =
+          Min(expected_sequence_length, max_sequence_length);
+      int sequence_length;
+      CHECK_EQ(tagged,
+               layout_desc->IsTagged(i, max_sequence_length, &sequence_length));
+      CHECK(sequence_length > 0);
+
+      CHECK_EQ(expected_sequence_length, sequence_length);
+    }
+
+    int sequence_length;
+    CHECK_EQ(true,
+             layout_desc->IsTagged(layout_descriptor_length,
+                                   max_sequence_length, &sequence_length));
+    CHECK_EQ(max_sequence_length, sequence_length);
+  }
+}
+
+
+static void TestLayoutDescriptorQueriesFast(int max_sequence_length) {
+  {
+    LayoutDescriptor* layout_desc = LayoutDescriptor::FastPointerLayout();
+    int sequence_length;
+    for (int i = 0; i < kNumberOfBits; i++) {
+      CHECK_EQ(true,
+               layout_desc->IsTagged(i, max_sequence_length, &sequence_length));
+      CHECK(sequence_length > 0);
+      CHECK_EQ(max_sequence_length, sequence_length);
+    }
+  }
+
+  {
+    int bit_flip_positions[] = {1000};
+    TestLayoutDescriptorQueries(kSmiValueSize, bit_flip_positions,
+                                max_sequence_length);
+  }
+
+  {
+    int bit_flip_positions[] = {0, 1000};
+    TestLayoutDescriptorQueries(kSmiValueSize, bit_flip_positions,
+                                max_sequence_length);
+  }
+
+  {
+    int bit_flip_positions[kNumberOfBits + 1];
+    for (int i = 0; i <= kNumberOfBits; i++) {
+      bit_flip_positions[i] = i;
+    }
+    TestLayoutDescriptorQueries(kSmiValueSize, bit_flip_positions,
+                                max_sequence_length);
+  }
+
+  {
+    int bit_flip_positions[] = {3, 7, 8, 10, 15, 21, 30, 1000};
+    TestLayoutDescriptorQueries(kSmiValueSize, bit_flip_positions,
+                                max_sequence_length);
+  }
+
+  {
+    int bit_flip_positions[] = {0,  1,  2,  3,  5,  7,  9,
+                                12, 15, 18, 22, 26, 29, 1000};
+    TestLayoutDescriptorQueries(kSmiValueSize, bit_flip_positions,
+                                max_sequence_length);
+  }
+}
+
+
+TEST(LayoutDescriptorQueriesFastLimited7) {
+  CcTest::InitializeVM();
+  v8::HandleScope scope(CcTest::isolate());
+
+  TestLayoutDescriptorQueriesFast(7);
+}
+
+
+TEST(LayoutDescriptorQueriesFastLimited13) {
+  CcTest::InitializeVM();
+  v8::HandleScope scope(CcTest::isolate());
+
+  TestLayoutDescriptorQueriesFast(13);
+}
+
+
+TEST(LayoutDescriptorQueriesFastUnlimited) {
+  CcTest::InitializeVM();
+  v8::HandleScope scope(CcTest::isolate());
+
+  TestLayoutDescriptorQueriesFast(std::numeric_limits<int>::max());
+}
+
+
+static void TestLayoutDescriptorQueriesSlow(int max_sequence_length) {
+  {
+    int bit_flip_positions[] = {10000};
+    TestLayoutDescriptorQueries(kMaxNumberOfDescriptors, bit_flip_positions,
+                                max_sequence_length);
+  }
+
+  {
+    int bit_flip_positions[] = {0, 10000};
+    TestLayoutDescriptorQueries(kMaxNumberOfDescriptors, bit_flip_positions,
+                                max_sequence_length);
+  }
+
+  {
+    int bit_flip_positions[kMaxNumberOfDescriptors + 1];
+    for (int i = 0; i < kMaxNumberOfDescriptors; i++) {
+      bit_flip_positions[i] = i;
+    }
+    bit_flip_positions[kMaxNumberOfDescriptors] = 10000;
+    TestLayoutDescriptorQueries(kMaxNumberOfDescriptors, bit_flip_positions,
+                                max_sequence_length);
+  }
+
+  {
+    int bit_flip_positions[] = {3,  7,  8,  10, 15,  21,   30,
+                                37, 54, 80, 99, 383, 10000};
+    TestLayoutDescriptorQueries(kMaxNumberOfDescriptors, bit_flip_positions,
+                                max_sequence_length);
+  }
+
+  {
+    int bit_flip_positions[] = {0,   10,  20,  30,  50,  70,  90,
+                                120, 150, 180, 220, 260, 290, 10000};
+    TestLayoutDescriptorQueries(kMaxNumberOfDescriptors, bit_flip_positions,
+                                max_sequence_length);
+  }
+
+  {
+    int bit_flip_positions[kMaxNumberOfDescriptors + 1];
+    int cur = 0;
+    for (int i = 0; i < kMaxNumberOfDescriptors; i++) {
+      bit_flip_positions[i] = cur;
+      cur = (cur + 1) * 2;
+    }
+    CHECK(cur < 10000);
+    bit_flip_positions[kMaxNumberOfDescriptors] = 10000;
+    TestLayoutDescriptorQueries(kMaxNumberOfDescriptors, bit_flip_positions,
+                                max_sequence_length);
+  }
+
+  {
+    int bit_flip_positions[kMaxNumberOfDescriptors + 1];
+    int cur = 3;
+    for (int i = 0; i < kMaxNumberOfDescriptors; i++) {
+      bit_flip_positions[i] = cur;
+      cur = (cur + 1) * 2;
+    }
+    CHECK(cur < 10000);
+    bit_flip_positions[kMaxNumberOfDescriptors] = 10000;
+    TestLayoutDescriptorQueries(kMaxNumberOfDescriptors, bit_flip_positions,
+                                max_sequence_length);
+  }
+}
+
+
+TEST(LayoutDescriptorQueriesSlowLimited7) {
+  CcTest::InitializeVM();
+  v8::HandleScope scope(CcTest::isolate());
+
+  TestLayoutDescriptorQueriesSlow(7);
+}
+
+
+TEST(LayoutDescriptorQueriesSlowLimited13) {
+  CcTest::InitializeVM();
+  v8::HandleScope scope(CcTest::isolate());
+
+  TestLayoutDescriptorQueriesSlow(13);
+}
+
+
+TEST(LayoutDescriptorQueriesSlowLimited42) {
+  CcTest::InitializeVM();
+  v8::HandleScope scope(CcTest::isolate());
+
+  TestLayoutDescriptorQueriesSlow(42);
+}
+
+
+TEST(LayoutDescriptorQueriesSlowUnlimited) {
+  CcTest::InitializeVM();
+  v8::HandleScope scope(CcTest::isolate());
+
+  TestLayoutDescriptorQueriesSlow(std::numeric_limits<int>::max());
+}
+
+
+TEST(LayoutDescriptorCreateNewFast) {
+  CcTest::InitializeVM();
+  Isolate* isolate = CcTest::i_isolate();
+  v8::HandleScope scope(CcTest::isolate());
+
+  Handle<LayoutDescriptor> layout_descriptor;
+  TestPropertyKind props[] = {
+      PROP_CONSTANT,
+      PROP_TAGGED,  // field #0
+      PROP_CONSTANT,
+      PROP_DOUBLE,  // field #1
+      PROP_CONSTANT,
+      PROP_TAGGED,  // field #2
+      PROP_CONSTANT,
+  };
+  const int kPropsCount = arraysize(props);
+
+  Handle<DescriptorArray> descriptors =
+      CreateDescriptorArray(isolate, props, kPropsCount);
+
+  {
+    Handle<Map> map = Map::Create(isolate, 0);
+    layout_descriptor = LayoutDescriptor::New(map, descriptors, kPropsCount);
+    CHECK_EQ(LayoutDescriptor::FastPointerLayout(), *layout_descriptor);
+    map->InitializeDescriptors(*descriptors, *layout_descriptor);
+    DCHECK(layout_descriptor->IsConsistentWithMap(*map));
+  }
+
+  {
+    Handle<Map> map = Map::Create(isolate, 1);
+    layout_descriptor = LayoutDescriptor::New(map, descriptors, kPropsCount);
+    CHECK_EQ(LayoutDescriptor::FastPointerLayout(), *layout_descriptor);
+    map->InitializeDescriptors(*descriptors, *layout_descriptor);
+    DCHECK(layout_descriptor->IsConsistentWithMap(*map));
+  }
+
+  {
+    Handle<Map> map = Map::Create(isolate, 2);
+    layout_descriptor = LayoutDescriptor::New(map, descriptors, kPropsCount);
+    CHECK_NE(LayoutDescriptor::FastPointerLayout(), *layout_descriptor);
+    CHECK(!layout_descriptor->IsSlowLayout());
+    CHECK_EQ(true, layout_descriptor->IsTagged(0));
+    CHECK_EQ(false, layout_descriptor->IsTagged(1));
+    CHECK_EQ(true, layout_descriptor->IsTagged(2));
+    CHECK_EQ(true, layout_descriptor->IsTagged(125));
+    map->InitializeDescriptors(*descriptors, *layout_descriptor);
+    DCHECK(layout_descriptor->IsConsistentWithMap(*map));
+  }
+}
+
+
+TEST(LayoutDescriptorCreateNewSlow) {
+  CcTest::InitializeVM();
+  Isolate* isolate = CcTest::i_isolate();
+  v8::HandleScope scope(CcTest::isolate());
+
+  Handle<LayoutDescriptor> layout_descriptor;
+  const int kPropsCount = kSmiValueSize * 3;
+  TestPropertyKind props[kPropsCount];
+  for (int i = 0; i < kPropsCount; i++) {
+    props[i] = static_cast<TestPropertyKind>(i % PROP_KIND_NUMBER);
+  }
+
+  Handle<DescriptorArray> descriptors =
+      CreateDescriptorArray(isolate, props, kPropsCount);
+
+  {
+    Handle<Map> map = Map::Create(isolate, 0);
+    layout_descriptor = LayoutDescriptor::New(map, descriptors, kPropsCount);
+    CHECK_EQ(LayoutDescriptor::FastPointerLayout(), *layout_descriptor);
+    map->InitializeDescriptors(*descriptors, *layout_descriptor);
+    DCHECK(layout_descriptor->IsConsistentWithMap(*map));
+  }
+
+  {
+    Handle<Map> map = Map::Create(isolate, 1);
+    layout_descriptor = LayoutDescriptor::New(map, descriptors, kPropsCount);
+    CHECK_EQ(LayoutDescriptor::FastPointerLayout(), *layout_descriptor);
+    map->InitializeDescriptors(*descriptors, *layout_descriptor);
+    DCHECK(layout_descriptor->IsConsistentWithMap(*map));
+  }
+
+  {
+    Handle<Map> map = Map::Create(isolate, 2);
+    layout_descriptor = LayoutDescriptor::New(map, descriptors, kPropsCount);
+    CHECK_NE(LayoutDescriptor::FastPointerLayout(), *layout_descriptor);
+    CHECK(!layout_descriptor->IsSlowLayout());
+    CHECK_EQ(true, layout_descriptor->IsTagged(0));
+    CHECK_EQ(false, layout_descriptor->IsTagged(1));
+    CHECK_EQ(true, layout_descriptor->IsTagged(2));
+    CHECK_EQ(true, layout_descriptor->IsTagged(125));
+    map->InitializeDescriptors(*descriptors, *layout_descriptor);
+    DCHECK(layout_descriptor->IsConsistentWithMap(*map));
+  }
+
+  {
+    int inobject_properties = kPropsCount / 2;
+    Handle<Map> map = Map::Create(isolate, inobject_properties);
+    layout_descriptor = LayoutDescriptor::New(map, descriptors, kPropsCount);
+    CHECK_NE(LayoutDescriptor::FastPointerLayout(), *layout_descriptor);
+    CHECK(layout_descriptor->IsSlowLayout());
+    for (int i = 0; i < inobject_properties; i++) {
+      // PROP_DOUBLE has index 1 among FIELD properties.
+      const bool tagged = (i % (PROP_KIND_NUMBER - 1)) != 1;
+      CHECK_EQ(tagged, layout_descriptor->IsTagged(i));
+    }
+    // Every property after inobject_properties must be tagged.
+    for (int i = inobject_properties; i < kPropsCount; i++) {
+      CHECK_EQ(true, layout_descriptor->IsTagged(i));
+    }
+    map->InitializeDescriptors(*descriptors, *layout_descriptor);
+    DCHECK(layout_descriptor->IsConsistentWithMap(*map));
+
+    // Now test LayoutDescriptor::cast_gc_safe().
+    Handle<LayoutDescriptor> layout_descriptor_copy =
+        LayoutDescriptor::New(map, descriptors, kPropsCount);
+
+    LayoutDescriptor* layout_desc = *layout_descriptor;
+    CHECK_EQ(layout_desc, LayoutDescriptor::cast(layout_desc));
+    CHECK_EQ(layout_desc, LayoutDescriptor::cast_gc_safe(layout_desc));
+    CHECK(layout_descriptor->IsFixedTypedArrayBase());
+    // Now make it look like a forwarding pointer to layout_descriptor_copy.
+    MapWord map_word = layout_desc->map_word();
+    CHECK(!map_word.IsForwardingAddress());
+    layout_desc->set_map_word(
+        MapWord::FromForwardingAddress(*layout_descriptor_copy));
+    CHECK(layout_desc->map_word().IsForwardingAddress());
+    CHECK_EQ(*layout_descriptor_copy,
+             LayoutDescriptor::cast_gc_safe(layout_desc));
+
+    // Restore it back.
+    layout_desc->set_map_word(map_word);
+    CHECK_EQ(layout_desc, LayoutDescriptor::cast(layout_desc));
+  }
+}
+
+
+static Handle<LayoutDescriptor> TestLayoutDescriptorAppend(
+    Isolate* isolate, int inobject_properties, TestPropertyKind* props,
+    int kPropsCount) {
+  Factory* factory = isolate->factory();
+
+  Handle<String> func_name = factory->InternalizeUtf8String("func");
+  Handle<JSFunction> func = factory->NewFunction(func_name);
+
+  Handle<DescriptorArray> descriptors =
+      DescriptorArray::Allocate(isolate, 0, kPropsCount);
+
+  Handle<Map> map = Map::Create(isolate, inobject_properties);
+  map->InitializeDescriptors(*descriptors,
+                             LayoutDescriptor::FastPointerLayout());
+
+  int next_field_offset = 0;
+  for (int i = 0; i < kPropsCount; i++) {
+    EmbeddedVector<char, 64> buffer;
+    SNPrintF(buffer, "prop%d", i);
+    Handle<String> name = factory->InternalizeUtf8String(buffer.start());
+
+    Handle<LayoutDescriptor> layout_descriptor;
+    TestPropertyKind kind = props[i];
+    if (kind == PROP_CONSTANT) {
+      ConstantDescriptor d(name, func, NONE);
+      layout_descriptor = LayoutDescriptor::Append(map, d.GetDetails());
+      descriptors->Append(&d);
+
+    } else {
+      FieldDescriptor f(name, next_field_offset, NONE, representations[kind]);
+      int field_width_in_words = f.GetDetails().field_width_in_words();
+      next_field_offset += field_width_in_words;
+      layout_descriptor = LayoutDescriptor::Append(map, f.GetDetails());
+      descriptors->Append(&f);
+
+      int field_index = f.GetDetails().field_index();
+      bool is_inobject = field_index < map->inobject_properties();
+      for (int bit = 0; bit < field_width_in_words; bit++) {
+        CHECK_EQ(is_inobject && (kind == PROP_DOUBLE),
+                 !layout_descriptor->IsTagged(field_index + bit));
+      }
+      CHECK(layout_descriptor->IsTagged(next_field_offset));
+    }
+    map->InitializeDescriptors(*descriptors, *layout_descriptor);
+  }
+  Handle<LayoutDescriptor> layout_descriptor(map->layout_descriptor(), isolate);
+  DCHECK(layout_descriptor->IsConsistentWithMap(*map));
+  return layout_descriptor;
+}
+
+
+TEST(LayoutDescriptorAppend) {
+  CcTest::InitializeVM();
+  Isolate* isolate = CcTest::i_isolate();
+  v8::HandleScope scope(CcTest::isolate());
+
+  Handle<LayoutDescriptor> layout_descriptor;
+  const int kPropsCount = kSmiValueSize * 3;
+  TestPropertyKind props[kPropsCount];
+  for (int i = 0; i < kPropsCount; i++) {
+    props[i] = static_cast<TestPropertyKind>(i % PROP_KIND_NUMBER);
+  }
+
+  layout_descriptor =
+      TestLayoutDescriptorAppend(isolate, 0, props, kPropsCount);
+  CHECK(!layout_descriptor->IsSlowLayout());
+
+  layout_descriptor =
+      TestLayoutDescriptorAppend(isolate, 13, props, kPropsCount);
+  CHECK(!layout_descriptor->IsSlowLayout());
+
+  layout_descriptor =
+      TestLayoutDescriptorAppend(isolate, kSmiValueSize, props, kPropsCount);
+  CHECK(!layout_descriptor->IsSlowLayout());
+
+  layout_descriptor = TestLayoutDescriptorAppend(isolate, kSmiValueSize * 2,
+                                                 props, kPropsCount);
+  CHECK(layout_descriptor->IsSlowLayout());
+
+  layout_descriptor =
+      TestLayoutDescriptorAppend(isolate, kPropsCount, props, kPropsCount);
+  CHECK(layout_descriptor->IsSlowLayout());
+}
+
+
+TEST(LayoutDescriptorAppendAllDoubles) {
+  CcTest::InitializeVM();
+  Isolate* isolate = CcTest::i_isolate();
+  v8::HandleScope scope(CcTest::isolate());
+
+  Handle<LayoutDescriptor> layout_descriptor;
+  const int kPropsCount = kSmiValueSize * 3;
+  TestPropertyKind props[kPropsCount];
+  for (int i = 0; i < kPropsCount; i++) {
+    props[i] = PROP_DOUBLE;
+  }
+
+  layout_descriptor =
+      TestLayoutDescriptorAppend(isolate, 0, props, kPropsCount);
+  CHECK(!layout_descriptor->IsSlowLayout());
+
+  layout_descriptor =
+      TestLayoutDescriptorAppend(isolate, 13, props, kPropsCount);
+  CHECK(!layout_descriptor->IsSlowLayout());
+
+  layout_descriptor =
+      TestLayoutDescriptorAppend(isolate, kSmiValueSize, props, kPropsCount);
+  CHECK(!layout_descriptor->IsSlowLayout());
+
+  layout_descriptor = TestLayoutDescriptorAppend(isolate, kSmiValueSize + 1,
+                                                 props, kPropsCount);
+  CHECK(layout_descriptor->IsSlowLayout());
+
+  layout_descriptor = TestLayoutDescriptorAppend(isolate, kSmiValueSize * 2,
+                                                 props, kPropsCount);
+  CHECK(layout_descriptor->IsSlowLayout());
+
+  layout_descriptor =
+      TestLayoutDescriptorAppend(isolate, kPropsCount, props, kPropsCount);
+  CHECK(layout_descriptor->IsSlowLayout());
+
+  {
+    // Ensure layout descriptor switches into slow mode at the right moment.
+    layout_descriptor =
+        TestLayoutDescriptorAppend(isolate, kPropsCount, props, kSmiValueSize);
+    CHECK(!layout_descriptor->IsSlowLayout());
+
+    layout_descriptor = TestLayoutDescriptorAppend(isolate, kPropsCount, props,
+                                                   kSmiValueSize + 1);
+    CHECK(layout_descriptor->IsSlowLayout());
+  }
+}
+
+
+static Handle<LayoutDescriptor> TestLayoutDescriptorAppendIfFastOrUseFull(
+    Isolate* isolate, int inobject_properties,
+    Handle<DescriptorArray> descriptors, int number_of_descriptors) {
+  Handle<Map> map = Map::Create(isolate, inobject_properties);
+
+  Handle<LayoutDescriptor> full_layout_descriptor = LayoutDescriptor::New(
+      map, descriptors, descriptors->number_of_descriptors());
+
+  int nof = 0;
+  bool switched_to_slow_mode = false;
+
+  for (int i = 0; i < number_of_descriptors; i++) {
+    PropertyDetails details = descriptors->GetDetails(i);
+
+    // This method calls LayoutDescriptor::AppendIfFastOrUseFull() internally
+    // and does all the required map-descriptors related book keeping.
+    map = Map::CopyInstallDescriptorsForTesting(map, i, descriptors,
+                                                full_layout_descriptor);
+
+    LayoutDescriptor* layout_desc = map->layout_descriptor();
+
+    if (layout_desc->IsSlowLayout()) {
+      switched_to_slow_mode = true;
+      CHECK_EQ(*full_layout_descriptor, layout_desc);
+    } else {
+      CHECK(!switched_to_slow_mode);
+      if (details.type() == FIELD) {
+        nof++;
+        int field_index = details.field_index();
+        int field_width_in_words = details.field_width_in_words();
+
+        bool is_inobject = field_index < map->inobject_properties();
+        for (int bit = 0; bit < field_width_in_words; bit++) {
+          CHECK_EQ(is_inobject && details.representation().IsDouble(),
+                   !layout_desc->IsTagged(field_index + bit));
+        }
+        CHECK(layout_desc->IsTagged(field_index + field_width_in_words));
+      }
+    }
+    DCHECK(map->layout_descriptor()->IsConsistentWithMap(*map));
+  }
+
+  Handle<LayoutDescriptor> layout_descriptor(map->GetLayoutDescriptor(),
+                                             isolate);
+  DCHECK(layout_descriptor->IsConsistentWithMap(*map));
+  return layout_descriptor;
+}
+
+
+TEST(LayoutDescriptorAppendIfFastOrUseFull) {
+  CcTest::InitializeVM();
+  Isolate* isolate = CcTest::i_isolate();
+  v8::HandleScope scope(CcTest::isolate());
+
+  Handle<LayoutDescriptor> layout_descriptor;
+  const int kPropsCount = kSmiValueSize * 3;
+  TestPropertyKind props[kPropsCount];
+  for (int i = 0; i < kPropsCount; i++) {
+    props[i] = static_cast<TestPropertyKind>(i % PROP_KIND_NUMBER);
+  }
+  Handle<DescriptorArray> descriptors =
+      CreateDescriptorArray(isolate, props, kPropsCount);
+
+  layout_descriptor = TestLayoutDescriptorAppendIfFastOrUseFull(
+      isolate, 0, descriptors, kPropsCount);
+  CHECK(!layout_descriptor->IsSlowLayout());
+
+  layout_descriptor = TestLayoutDescriptorAppendIfFastOrUseFull(
+      isolate, 13, descriptors, kPropsCount);
+  CHECK(!layout_descriptor->IsSlowLayout());
+
+  layout_descriptor = TestLayoutDescriptorAppendIfFastOrUseFull(
+      isolate, kSmiValueSize, descriptors, kPropsCount);
+  CHECK(!layout_descriptor->IsSlowLayout());
+
+  layout_descriptor = TestLayoutDescriptorAppendIfFastOrUseFull(
+      isolate, kSmiValueSize * 2, descriptors, kPropsCount);
+  CHECK(layout_descriptor->IsSlowLayout());
+
+  layout_descriptor = TestLayoutDescriptorAppendIfFastOrUseFull(
+      isolate, kPropsCount, descriptors, kPropsCount);
+  CHECK(layout_descriptor->IsSlowLayout());
+}
+
+
+TEST(LayoutDescriptorAppendIfFastOrUseFullAllDoubles) {
+  CcTest::InitializeVM();
+  Isolate* isolate = CcTest::i_isolate();
+  v8::HandleScope scope(CcTest::isolate());
+
+  Handle<LayoutDescriptor> layout_descriptor;
+  const int kPropsCount = kSmiValueSize * 3;
+  TestPropertyKind props[kPropsCount];
+  for (int i = 0; i < kPropsCount; i++) {
+    props[i] = PROP_DOUBLE;
+  }
+  Handle<DescriptorArray> descriptors =
+      CreateDescriptorArray(isolate, props, kPropsCount);
+
+  layout_descriptor = TestLayoutDescriptorAppendIfFastOrUseFull(
+      isolate, 0, descriptors, kPropsCount);
+  CHECK(!layout_descriptor->IsSlowLayout());
+
+  layout_descriptor = TestLayoutDescriptorAppendIfFastOrUseFull(
+      isolate, 13, descriptors, kPropsCount);
+  CHECK(!layout_descriptor->IsSlowLayout());
+
+  layout_descriptor = TestLayoutDescriptorAppendIfFastOrUseFull(
+      isolate, kSmiValueSize, descriptors, kPropsCount);
+  CHECK(!layout_descriptor->IsSlowLayout());
+
+  layout_descriptor = TestLayoutDescriptorAppendIfFastOrUseFull(
+      isolate, kSmiValueSize + 1, descriptors, kPropsCount);
+  CHECK(layout_descriptor->IsSlowLayout());
+
+  layout_descriptor = TestLayoutDescriptorAppendIfFastOrUseFull(
+      isolate, kSmiValueSize * 2, descriptors, kPropsCount);
+  CHECK(layout_descriptor->IsSlowLayout());
+
+  layout_descriptor = TestLayoutDescriptorAppendIfFastOrUseFull(
+      isolate, kPropsCount, descriptors, kPropsCount);
+  CHECK(layout_descriptor->IsSlowLayout());
+
+  {
+    // Ensure layout descriptor switches into slow mode at the right moment.
+    layout_descriptor = TestLayoutDescriptorAppendIfFastOrUseFull(
+        isolate, kPropsCount, descriptors, kSmiValueSize);
+    CHECK(!layout_descriptor->IsSlowLayout());
+
+    layout_descriptor = TestLayoutDescriptorAppendIfFastOrUseFull(
+        isolate, kPropsCount, descriptors, kSmiValueSize + 1);
+    CHECK(layout_descriptor->IsSlowLayout());
+  }
+}
+
+
+TEST(Regress436816) {
+  CcTest::InitializeVM();
+  Isolate* isolate = CcTest::i_isolate();
+  Factory* factory = isolate->factory();
+  v8::HandleScope scope(CcTest::isolate());
+
+  const int kPropsCount = kSmiValueSize * 3;
+  TestPropertyKind props[kPropsCount];
+  for (int i = 0; i < kPropsCount; i++) {
+    props[i] = PROP_DOUBLE;
+  }
+  Handle<DescriptorArray> descriptors =
+      CreateDescriptorArray(isolate, props, kPropsCount);
+
+  Handle<Map> map = Map::Create(isolate, kPropsCount);
+  Handle<LayoutDescriptor> layout_descriptor =
+      LayoutDescriptor::New(map, descriptors, kPropsCount);
+  map->InitializeDescriptors(*descriptors, *layout_descriptor);
+
+  Handle<JSObject> object = factory->NewJSObjectFromMap(map, TENURED);
+
+  Address fake_address = reinterpret_cast<Address>(~kHeapObjectTagMask);
+  HeapObject* fake_object = HeapObject::FromAddress(fake_address);
+  CHECK(fake_object->IsHeapObject());
+
+  double boom_value = bit_cast<double>(fake_object);
+  for (int i = 0; i < kPropsCount; i++) {
+    FieldIndex index = FieldIndex::ForDescriptor(*map, i);
+    CHECK(map->IsUnboxedDoubleField(index));
+    object->RawFastDoublePropertyAtPut(index, boom_value);
+  }
+  CHECK(object->HasFastProperties());
+  CHECK(!object->map()->HasFastPointerLayout());
+
+  Handle<Map> normalized_map =
+      Map::Normalize(map, KEEP_INOBJECT_PROPERTIES, "testing");
+  JSObject::MigrateToMap(object, normalized_map);
+  CHECK(!object->HasFastProperties());
+  CHECK(object->map()->HasFastPointerLayout());
+
+  // Trigger GCs and heap verification.
+  CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
+}
+
+
+TEST(DoScavenge) {
+  CcTest::InitializeVM();
+  Isolate* isolate = CcTest::i_isolate();
+  Factory* factory = isolate->factory();
+  v8::HandleScope scope(CcTest::isolate());
+
+  CompileRun(
+      "function A() {"
+      "  this.x = 42.5;"
+      "  this.o = {};"
+      "};"
+      "var o = new A();");
+
+  Handle<String> obj_name = factory->InternalizeUtf8String("o");
+
+  Handle<Object> obj_value =
+      Object::GetProperty(isolate->global_object(), obj_name).ToHandleChecked();
+  CHECK(obj_value->IsJSObject());
+  Handle<JSObject> obj = Handle<JSObject>::cast(obj_value);
+
+  {
+    // Ensure the object is properly set up.
+    Map* map = obj->map();
+    DescriptorArray* descriptors = map->instance_descriptors();
+    CHECK(map->NumberOfOwnDescriptors() == 2);
+    CHECK(descriptors->GetDetails(0).representation().IsDouble());
+    CHECK(descriptors->GetDetails(1).representation().IsHeapObject());
+    FieldIndex field_index = FieldIndex::ForDescriptor(map, 0);
+    CHECK(field_index.is_inobject() && field_index.is_double());
+    CHECK_EQ(FLAG_unbox_double_fields, map->IsUnboxedDoubleField(field_index));
+    CHECK_EQ(42.5, GetDoubleFieldValue(*obj, field_index));
+  }
+  CHECK(isolate->heap()->new_space()->Contains(*obj));
+
+  // Trigger GCs so that the newly allocated object moves to old gen.
+  CcTest::heap()->CollectGarbage(i::NEW_SPACE);  // in survivor space now
+
+  // Create temp object in the new space.
+  Handle<JSArray> temp = factory->NewJSArray(FAST_ELEMENTS, NOT_TENURED);
+  CHECK(isolate->heap()->new_space()->Contains(*temp));
+
+  // Construct a double value that looks like a pointer to the new space object
+  // and store it into the obj.
+  Address fake_object = reinterpret_cast<Address>(*temp) + kPointerSize;
+  double boom_value = bit_cast<double>(fake_object);
+
+  FieldIndex field_index = FieldIndex::ForDescriptor(obj->map(), 0);
+  Handle<HeapNumber> boom_number = factory->NewHeapNumber(boom_value, MUTABLE);
+  obj->FastPropertyAtPut(field_index, *boom_number);
+
+  // Now the object moves to old gen and it has a double field that looks like
+  // a pointer to a from semi-space.
+  CcTest::heap()->CollectGarbage(i::NEW_SPACE);  // in old gen now
+
+  CHECK(isolate->heap()->old_pointer_space()->Contains(*obj));
+
+  CHECK_EQ(boom_value, GetDoubleFieldValue(*obj, field_index));
+}
+
+
+static void TestLayoutDescriptorHelper(Isolate* isolate,
+                                       int inobject_properties,
+                                       Handle<DescriptorArray> descriptors,
+                                       int number_of_descriptors) {
+  Handle<Map> map = Map::Create(isolate, inobject_properties);
+
+  Handle<LayoutDescriptor> layout_descriptor = LayoutDescriptor::New(
+      map, descriptors, descriptors->number_of_descriptors());
+  map->InitializeDescriptors(*descriptors, *layout_descriptor);
+  DCHECK(layout_descriptor->IsConsistentWithMap(*map));
+
+  LayoutDescriptorHelper helper(*map);
+  bool all_fields_tagged = true;
+
+  int instance_size = map->instance_size();
+
+  int end_offset = instance_size * 2;
+  int first_non_tagged_field_offset = end_offset;
+  for (int i = 0; i < number_of_descriptors; i++) {
+    PropertyDetails details = descriptors->GetDetails(i);
+    if (details.type() != FIELD) continue;
+    FieldIndex index = FieldIndex::ForDescriptor(*map, i);
+    if (!index.is_inobject()) continue;
+    all_fields_tagged &= !details.representation().IsDouble();
+    bool expected_tagged = !index.is_double();
+    if (!expected_tagged) {
+      first_non_tagged_field_offset =
+          Min(first_non_tagged_field_offset, index.offset());
+    }
+
+    int end_of_region_offset;
+    CHECK_EQ(expected_tagged, helper.IsTagged(index.offset()));
+    CHECK_EQ(expected_tagged, helper.IsTagged(index.offset(), instance_size,
+                                              &end_of_region_offset));
+    CHECK(end_of_region_offset > 0);
+    CHECK(end_of_region_offset % kPointerSize == 0);
+    CHECK(end_of_region_offset <= instance_size);
+
+    for (int offset = index.offset(); offset < end_of_region_offset;
+         offset += kPointerSize) {
+      CHECK_EQ(expected_tagged, helper.IsTagged(index.offset()));
+    }
+    if (end_of_region_offset < instance_size) {
+      CHECK_EQ(!expected_tagged, helper.IsTagged(end_of_region_offset));
+    } else {
+      CHECK_EQ(true, helper.IsTagged(end_of_region_offset));
+    }
+  }
+
+  for (int offset = 0; offset < JSObject::kHeaderSize; offset += kPointerSize) {
+    // Header queries
+    CHECK_EQ(true, helper.IsTagged(offset));
+    int end_of_region_offset;
+    CHECK_EQ(true, helper.IsTagged(offset, end_offset, &end_of_region_offset));
+    CHECK_EQ(first_non_tagged_field_offset, end_of_region_offset);
+
+    // Out of bounds queries
+    CHECK_EQ(true, helper.IsTagged(offset + instance_size));
+  }
+
+  CHECK_EQ(all_fields_tagged, helper.all_fields_tagged());
+}
+
+
+TEST(LayoutDescriptorHelperMixed) {
+  CcTest::InitializeVM();
+  Isolate* isolate = CcTest::i_isolate();
+  v8::HandleScope scope(CcTest::isolate());
+
+  Handle<LayoutDescriptor> layout_descriptor;
+  const int kPropsCount = kSmiValueSize * 3;
+  TestPropertyKind props[kPropsCount];
+  for (int i = 0; i < kPropsCount; i++) {
+    props[i] = static_cast<TestPropertyKind>(i % PROP_KIND_NUMBER);
+  }
+  Handle<DescriptorArray> descriptors =
+      CreateDescriptorArray(isolate, props, kPropsCount);
+
+  TestLayoutDescriptorHelper(isolate, 0, descriptors, kPropsCount);
+
+  TestLayoutDescriptorHelper(isolate, 13, descriptors, kPropsCount);
+
+  TestLayoutDescriptorHelper(isolate, kSmiValueSize, descriptors, kPropsCount);
+
+  TestLayoutDescriptorHelper(isolate, kSmiValueSize * 2, descriptors,
+                             kPropsCount);
+
+  TestLayoutDescriptorHelper(isolate, kPropsCount, descriptors, kPropsCount);
+}
+
+
+TEST(LayoutDescriptorHelperAllTagged) {
+  CcTest::InitializeVM();
+  Isolate* isolate = CcTest::i_isolate();
+  v8::HandleScope scope(CcTest::isolate());
+
+  Handle<LayoutDescriptor> layout_descriptor;
+  const int kPropsCount = kSmiValueSize * 3;
+  TestPropertyKind props[kPropsCount];
+  for (int i = 0; i < kPropsCount; i++) {
+    props[i] = PROP_TAGGED;
+  }
+  Handle<DescriptorArray> descriptors =
+      CreateDescriptorArray(isolate, props, kPropsCount);
+
+  TestLayoutDescriptorHelper(isolate, 0, descriptors, kPropsCount);
+
+  TestLayoutDescriptorHelper(isolate, 13, descriptors, kPropsCount);
+
+  TestLayoutDescriptorHelper(isolate, kSmiValueSize, descriptors, kPropsCount);
+
+  TestLayoutDescriptorHelper(isolate, kSmiValueSize * 2, descriptors,
+                             kPropsCount);
+
+  TestLayoutDescriptorHelper(isolate, kPropsCount, descriptors, kPropsCount);
+}
+
+
+TEST(LayoutDescriptorHelperAllDoubles) {
+  CcTest::InitializeVM();
+  Isolate* isolate = CcTest::i_isolate();
+  v8::HandleScope scope(CcTest::isolate());
+
+  Handle<LayoutDescriptor> layout_descriptor;
+  const int kPropsCount = kSmiValueSize * 3;
+  TestPropertyKind props[kPropsCount];
+  for (int i = 0; i < kPropsCount; i++) {
+    props[i] = PROP_DOUBLE;
+  }
+  Handle<DescriptorArray> descriptors =
+      CreateDescriptorArray(isolate, props, kPropsCount);
+
+  TestLayoutDescriptorHelper(isolate, 0, descriptors, kPropsCount);
+
+  TestLayoutDescriptorHelper(isolate, 13, descriptors, kPropsCount);
+
+  TestLayoutDescriptorHelper(isolate, kSmiValueSize, descriptors, kPropsCount);
+
+  TestLayoutDescriptorHelper(isolate, kSmiValueSize * 2, descriptors,
+                             kPropsCount);
+
+  TestLayoutDescriptorHelper(isolate, kPropsCount, descriptors, kPropsCount);
+}
+
+
+TEST(StoreBufferScanOnScavenge) {
+  CcTest::InitializeVM();
+  Isolate* isolate = CcTest::i_isolate();
+  Factory* factory = isolate->factory();
+  v8::HandleScope scope(CcTest::isolate());
+
+  CompileRun(
+      "function A() {"
+      "  this.x = 42.5;"
+      "  this.o = {};"
+      "};"
+      "var o = new A();");
+
+  Handle<String> obj_name = factory->InternalizeUtf8String("o");
+
+  Handle<Object> obj_value =
+      Object::GetProperty(isolate->global_object(), obj_name).ToHandleChecked();
+  CHECK(obj_value->IsJSObject());
+  Handle<JSObject> obj = Handle<JSObject>::cast(obj_value);
+
+  {
+    // Ensure the object is properly set up.
+    Map* map = obj->map();
+    DescriptorArray* descriptors = map->instance_descriptors();
+    CHECK(map->NumberOfOwnDescriptors() == 2);
+    CHECK(descriptors->GetDetails(0).representation().IsDouble());
+    CHECK(descriptors->GetDetails(1).representation().IsHeapObject());
+    FieldIndex field_index = FieldIndex::ForDescriptor(map, 0);
+    CHECK(field_index.is_inobject() && field_index.is_double());
+    CHECK_EQ(FLAG_unbox_double_fields, map->IsUnboxedDoubleField(field_index));
+    CHECK_EQ(42.5, GetDoubleFieldValue(*obj, field_index));
+  }
+  CHECK(isolate->heap()->new_space()->Contains(*obj));
+
+  // Trigger GCs so that the newly allocated object moves to old gen.
+  CcTest::heap()->CollectGarbage(i::NEW_SPACE);  // in survivor space now
+  CcTest::heap()->CollectGarbage(i::NEW_SPACE);  // in old gen now
+
+  CHECK(isolate->heap()->old_pointer_space()->Contains(*obj));
+
+  // Create temp object in the new space.
+  Handle<JSArray> temp = factory->NewJSArray(FAST_ELEMENTS, NOT_TENURED);
+  CHECK(isolate->heap()->new_space()->Contains(*temp));
+
+  // Construct a double value that looks like a pointer to the new space object
+  // and store it into the obj.
+  Address fake_object = reinterpret_cast<Address>(*temp) + kPointerSize;
+  double boom_value = bit_cast<double>(fake_object);
+
+  FieldIndex field_index = FieldIndex::ForDescriptor(obj->map(), 0);
+  Handle<HeapNumber> boom_number = factory->NewHeapNumber(boom_value, MUTABLE);
+  obj->FastPropertyAtPut(field_index, *boom_number);
+
+  // Enforce scan on scavenge for the obj's page.
+  MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
+  chunk->set_scan_on_scavenge(true);
+
+  // Trigger GCs and force evacuation. Should not crash there.
+  CcTest::heap()->CollectAllGarbage(i::Heap::kNoGCFlags);
+
+  CHECK_EQ(boom_value, GetDoubleFieldValue(*obj, field_index));
+}
+
+#endif
index bb412a8..6c19cb8 100644 (file)
@@ -97,19 +97,20 @@ TEST(Weakness) {
   }
   CHECK(!global_handles->IsWeak(key.location()));
 
-  // Put entry into weak map.
+  // Put two chained entries into weak map.
   {
     HandleScope scope(isolate);
-    PutIntoWeakMap(weakmap,
-                   Handle<JSObject>(JSObject::cast(*key)),
-                   Handle<Smi>(Smi::FromInt(23), isolate));
+    Handle<Map> map = factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
+    Handle<JSObject> object = factory->NewJSObjectFromMap(map);
+    PutIntoWeakMap(weakmap, Handle<JSObject>(JSObject::cast(*key)), object);
+    PutIntoWeakMap(weakmap, object, Handle<Smi>(Smi::FromInt(23), isolate));
   }
-  CHECK_EQ(1, ObjectHashTable::cast(weakmap->table())->NumberOfElements());
+  CHECK_EQ(2, ObjectHashTable::cast(weakmap->table())->NumberOfElements());
 
   // Force a full GC.
   heap->CollectAllGarbage(false);
   CHECK_EQ(0, NumberOfWeakCalls);
-  CHECK_EQ(1, ObjectHashTable::cast(weakmap->table())->NumberOfElements());
+  CHECK_EQ(2, ObjectHashTable::cast(weakmap->table())->NumberOfElements());
   CHECK_EQ(
       0, ObjectHashTable::cast(weakmap->table())->NumberOfDeletedElements());
 
@@ -128,14 +129,14 @@ TEST(Weakness) {
   // weak references whereas the second one will also clear weak maps.
   heap->CollectAllGarbage(false);
   CHECK_EQ(1, NumberOfWeakCalls);
-  CHECK_EQ(1, ObjectHashTable::cast(weakmap->table())->NumberOfElements());
+  CHECK_EQ(2, ObjectHashTable::cast(weakmap->table())->NumberOfElements());
   CHECK_EQ(
       0, ObjectHashTable::cast(weakmap->table())->NumberOfDeletedElements());
   heap->CollectAllGarbage(false);
   CHECK_EQ(1, NumberOfWeakCalls);
   CHECK_EQ(0, ObjectHashTable::cast(weakmap->table())->NumberOfElements());
-  CHECK_EQ(
-      1, ObjectHashTable::cast(weakmap->table())->NumberOfDeletedElements());
+  CHECK_EQ(2,
+           ObjectHashTable::cast(weakmap->table())->NumberOfDeletedElements());
 }
 
 
index 233249a..4eac64c 100644 (file)
@@ -40,13 +40,8 @@ class Types {
   Types(Region* region, Isolate* isolate)
       : region_(region), rng_(isolate->random_number_generator()) {
     #define DECLARE_TYPE(name, value) \
-      name = Type::name(region); \
-      if (SmiValuesAre31Bits() || \
-          (!Type::name(region)->Equals(Type::OtherSigned32()) && \
-           !Type::name(region)->Equals(Type::OtherUnsigned31()))) { \
-        /* Hack: Avoid generating those empty bitset types. */ \
-        types.push_back(name); \
-      }
+      name = Type::name(region);      \
+      types.push_back(name);
     PROPER_BITSET_TYPE_LIST(DECLARE_TYPE)
     #undef DECLARE_TYPE
 
@@ -133,7 +128,7 @@ class Types {
   Handle<i::Oddball> uninitialized;
 
   #define DECLARE_TYPE(name, value) TypeHandle name;
-  BITSET_TYPE_LIST(DECLARE_TYPE)
+  PROPER_BITSET_TYPE_LIST(DECLARE_TYPE)
   #undef DECLARE_TYPE
 
   TypeHandle ObjectClass;
@@ -238,12 +233,6 @@ class Types {
           int j = rng_->NextInt(n);
           #define PICK_BITSET_TYPE(type, value) \
             if (j-- == 0) { \
-              if (!SmiValuesAre31Bits() && \
-                  (Type::type(region_)->Equals(Type::OtherSigned32()) || \
-                   Type::type(region_)->Equals(Type::OtherUnsigned31()))) { \
-                /* Hack: Avoid generating those empty bitset types. */ \
-                continue; \
-              } \
               TypeHandle tmp = Type::Intersect( \
                   result, Type::type(region_), region_); \
               if (tmp->Is(Type::None()) && i != 0) { \
diff --git a/deps/v8/test/js-perf-test/Classes/Classes.json b/deps/v8/test/js-perf-test/Classes/Classes.json
deleted file mode 100644 (file)
index dfed617..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
-{
-  "name": "JSTests/Classes",
-  "path": ["."],
-  "main": "run.js",
-  "flags": ["--harmony-classes"],
-  "run_count": 5,
-  "units": "score",
-  "results_regexp": "^%s\\-Classes\\(Score\\): (.+)$",
-  "total": true,
-  "tests": [
-    {"name": "Super"}
-  ]
-}
diff --git a/deps/v8/test/js-perf-test/Classes/default-constructor.js b/deps/v8/test/js-perf-test/Classes/default-constructor.js
new file mode 100644 (file)
index 0000000..49dcaa6
--- /dev/null
@@ -0,0 +1,33 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+'use strict';
+
+var DefaultConstructorBenchmark = new BenchmarkSuite('DefaultConstructor',
+    [100], [
+      new Benchmark('NoSuperClass', false, false, 0, NoSuperClass),
+      new Benchmark('WithSuperClass', false, false, 0, WithSuperClass),
+      new Benchmark('WithSuperClassArguments', false, false, 0,
+                    WithSuperClassArguments),
+    ]);
+
+
+class BaseClass {}
+
+
+class DerivedClass extends BaseClass {}
+
+
+function NoSuperClass() {
+  return new BaseClass();
+}
+
+
+function WithSuperClass() {
+  return new DerivedClass();
+}
+
+
+function WithSuperClassArguments() {
+  return new DerivedClass(0, 1, 2, 3, 4);
+}
index bb3921f..5d48b32 100644 (file)
@@ -5,6 +5,7 @@
 
 load('../base.js');
 load('super.js');
+load('default-constructor.js');
 
 
 var success = true;
diff --git a/deps/v8/test/js-perf-test/Collections/Collections.json b/deps/v8/test/js-perf-test/Collections/Collections.json
deleted file mode 100644 (file)
index fd29f46..0000000
+++ /dev/null
@@ -1,21 +0,0 @@
-{
-  "name": "JSTests/Collections",
-  "path": ["."],
-  "main": "run.js",
-  "run_count": 5,
-  "units": "score",
-  "results_regexp": "^%s\\-Collections\\(Score\\): (.+)$",
-  "total": true,
-  "tests": [
-    {"name": "Map-Smi"},
-    {"name": "Map-String"},
-    {"name": "Map-Object"},
-    {"name": "Map-Iteration"},
-    {"name": "Set-Smi"},
-    {"name": "Set-String"},
-    {"name": "Set-Object"},
-    {"name": "Set-Iteration"},
-    {"name": "WeakMap"},
-    {"name": "WeakSet"}
-  ]
-}
diff --git a/deps/v8/test/js-perf-test/Iterators/Iterators.json b/deps/v8/test/js-perf-test/Iterators/Iterators.json
deleted file mode 100644 (file)
index 679872e..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-{
-  "name": "JSTests/Iterators",
-  "path": ["."],
-  "main": "run.js",
-  "run_count": 5,
-  "units": "score",
-  "results_regexp": "^%s\\-Iterators\\(Score\\): (.+)$",
-  "total": true,
-  "tests": [
-    {"name": "ForOf"}
-  ]
-}
diff --git a/deps/v8/test/js-perf-test/JSTests.json b/deps/v8/test/js-perf-test/JSTests.json
new file mode 100644 (file)
index 0000000..0a99ad4
--- /dev/null
@@ -0,0 +1,86 @@
+{
+  "name": "JSTests",
+  "run_count": 5,
+  "run_count_android_arm": 3,
+  "run_count_android_arm64": 3,
+  "units": "score",
+  "total": true,
+  "resources": ["base.js"],
+  "tests": [
+    {
+      "name": "Classes",
+      "path": ["Classes"],
+      "main": "run.js",
+      "resources": ["super.js", "default-constructor.js"],
+      "flags": ["--harmony-classes"],
+      "results_regexp": "^%s\\-Classes\\(Score\\): (.+)$",
+      "tests": [
+        {"name": "Super"},
+        {"name": "DefaultConstructor"}
+      ]
+    },
+    {
+      "name": "Collections",
+      "path": ["Collections"],
+      "main": "run.js",
+      "resources": [
+        "common.js",
+        "map.js",
+        "run.js",
+        "set.js",
+        "weakmap.js",
+        "weakset.js"
+      ],
+      "results_regexp": "^%s\\-Collections\\(Score\\): (.+)$",
+      "tests": [
+        {"name": "Map-Smi"},
+        {"name": "Map-String"},
+        {"name": "Map-Object"},
+        {"name": "Map-Iteration"},
+        {"name": "Set-Smi"},
+        {"name": "Set-String"},
+        {"name": "Set-Object"},
+        {"name": "Set-Iteration"},
+        {"name": "WeakMap"},
+        {"name": "WeakSet"}
+      ]
+    },
+    {
+      "name": "Iterators",
+      "path": ["Iterators"],
+      "main": "run.js",
+      "resources": ["forof.js"],
+      "results_regexp": "^%s\\-Iterators\\(Score\\): (.+)$",
+      "tests": [
+        {"name": "ForOf"}
+      ]
+    },
+    {
+      "name": "Strings",
+      "path": ["Strings"],
+      "main": "run.js",
+      "resources": ["harmony-string.js"],
+      "flags": ["--harmony-strings"],
+      "results_regexp": "^%s\\-Strings\\(Score\\): (.+)$",
+      "tests": [
+        {"name": "StringFunctions"}
+      ]
+    },
+    {
+      "name": "Templates",
+      "path": ["Templates"],
+      "main": "run.js",
+      "resources": ["templates.js"],
+      "flags": ["--harmony-templates"],
+      "run_count": 5,
+      "units": "score",
+      "results_regexp": "^%s\\-Templates\\(Score\\): (.+)$",
+      "total": true,
+      "tests": [
+        {"name": "Untagged"},
+        {"name": "LargeUntagged"},
+        {"name": "Tagged"}
+      ]
+    }
+  ]
+}
diff --git a/deps/v8/test/js-perf-test/Strings/Strings.json b/deps/v8/test/js-perf-test/Strings/Strings.json
deleted file mode 100644 (file)
index 871e72f..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
-{
-  "name": "JSTests/Strings",
-  "path": ["."],
-  "main": "run.js",
-  "flags": ["--harmony-strings"],
-  "run_count": 5,
-  "units": "score",
-  "results_regexp": "^%s\\-Strings\\(Score\\): (.+)$",
-  "total": true,
-  "tests": [
-    {"name": "StringFunctions"}
-  ]
-}
index f430ab4..c2eac4e 100644 (file)
@@ -9,8 +9,8 @@ new BenchmarkSuite('StringFunctions', [1000], [
                 StartsWith, WithSetup, WithTearDown),
   new Benchmark('StringEndsWith', false, false, 0,
                 EndsWith, WithSetup, WithTearDown),
-  new Benchmark('StringContains', false, false, 0,
-                Contains, ContainsSetup, WithTearDown),
+  new Benchmark('StringIncludes', false, false, 0,
+                Includes, IncludesSetup, WithTearDown),
   new Benchmark('StringFromCodePoint', false, false, 0,
                 FromCodePoint, FromCodePointSetup, FromCodePointTearDown),
   new Benchmark('StringCodePointAt', false, false, 0,
@@ -60,13 +60,13 @@ function EndsWith() {
   result = str.endsWith(substr);
 }
 
-function ContainsSetup() {
+function IncludesSetup() {
   str = "def".repeat(100) + "abc".repeat(100) + "qqq".repeat(100);
   substr = "abc".repeat(100);
 }
 
-function Contains() {
-  result = str.contains(substr);
+function Includes() {
+  result = str.includes(substr);
 }
 
 var MAX_CODE_POINT = 0xFFFFF;
diff --git a/deps/v8/test/js-perf-test/Templates/run.js b/deps/v8/test/js-perf-test/Templates/run.js
new file mode 100644 (file)
index 0000000..73f1edd
--- /dev/null
@@ -0,0 +1,27 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+load('../base.js');
+load('templates.js');
+
+
+var success = true;
+
+function PrintResult(name, result) {
+  print(name + '-Templates(Score): ' + result);
+}
+
+
+function PrintError(name, error) {
+  PrintResult(name, error);
+  success = false;
+}
+
+
+BenchmarkSuite.config.doWarmup = undefined;
+BenchmarkSuite.config.doDeterministic = undefined;
+
+BenchmarkSuite.RunSuites({ NotifyResult: PrintResult,
+                           NotifyError: PrintError });
diff --git a/deps/v8/test/js-perf-test/Templates/templates.js b/deps/v8/test/js-perf-test/Templates/templates.js
new file mode 100644 (file)
index 0000000..4034ce7
--- /dev/null
@@ -0,0 +1,87 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+new BenchmarkSuite('Untagged', [1000], [
+  new Benchmark('Untagged-Simple', false, false, 0,
+                Untagged, UntaggedSetup, UntaggedTearDown),
+]);
+
+new BenchmarkSuite('LargeUntagged', [1000], [
+  new Benchmark('Untagged-Large', false, false, 0,
+                UntaggedLarge, UntaggedLargeSetup, UntaggedLargeTearDown),
+]);
+
+new BenchmarkSuite('Tagged', [1000], [
+  new Benchmark('TaggedRawSimple', false, false, 0,
+                TaggedRaw, TaggedRawSetup, TaggedRawTearDown),
+]);
+
+var result;
+var SUBJECT = 'Bob';
+var TARGET = 'Mary';
+var OBJECT = 'apple';
+
+function UntaggedSetup() {
+  result = undefined;
+}
+
+function Untagged() {
+  result = `${SUBJECT} gives ${TARGET} an ${OBJECT}.`;
+}
+
+function UntaggedTearDown() {
+  var expected = '' + SUBJECT + ' gives ' + TARGET + ' an ' + OBJECT + '.';
+  return result === expected;
+}
+
+
+// ----------------------------------------------------------------------------
+
+function UntaggedLargeSetup() {
+  result = undefined;
+}
+
+function UntaggedLarge() {
+  result = `Lorem ipsum dolor sit amet, consectetur adipiscing elit. Vivamus
+ aliquam, elit euismod vestibulum ${0}lacinia, arcu odio sagittis mauris, id
+ blandit dolor felis pretium nisl. Maecenas porttitor, nunc ut accumsan mollis,
+ arcu metus rutrum arcu, ${1}ut varius dolor lorem nec risus. Integer convallis
+ tristique ante, non pretium ante suscipit at. Sed egestas massa enim, convallis
+ fermentum neque vehicula ac. Donec imperdiet a tortor ac semper. Morbi accumsan
+ quam nec erat viverra iaculis. ${2}Donec a scelerisque cras amet.`;
+}
+
+function UntaggedLargeTearDown() {
+  var expected = "Lorem ipsum dolor sit amet, consectetur adipiscing elit. " +
+      "Vivamus\n aliquam, elit euismod vestibulum " + 0 + "lacinia, arcu odio" +
+      " sagittis mauris, id\n blandit dolor felis pretium nisl. Maecenas " +
+      "porttitor, nunc ut accumsan mollis,\n arcu metus rutrum arcu, " + 1 +
+      "ut varius dolor lorem nec risus. Integer convallis\n tristique ante, " +
+      "non pretium ante suscipit at. Sed egestas massa enim, convallis\n " +
+      "fermentum neque vehicula ac. Donec imperdiet a tortor ac semper. Morbi" +
+      " accumsan\n quam nec erat viverra iaculis. " + 2 + "Donec a " +
+      "scelerisque cras amet.";
+  return result === expected;
+}
+
+
+// ----------------------------------------------------------------------------
+
+
+function TaggedRawSetup() {
+  result = undefined;
+}
+
+function TaggedRaw() {
+  result = String.raw `${SUBJECT} gives ${TARGET} an ${OBJECT} \ud83c\udf4f.`;
+}
+
+function TaggedRawTearDown() {
+  var expected =
+      '' + SUBJECT + ' gives ' + TARGET + ' an ' + OBJECT + ' \\ud83c\\udf4f.';
+  return result === expected;
+}
+
+
+// ----------------------------------------------------------------------------
index 96d3bd6..fd73491 100644 (file)
@@ -27,6 +27,6 @@
 
 // Flags: --allow-natives-syntax
 var single_function_good = "(function() { return 5; })";
-%CompileString(single_function_good, true);
+%CompileString(single_function_good, true, 0);
 var single_function_bad = "(function() { return 5; })();";
-%CompileString(single_function_bad, true);
+%CompileString(single_function_bad, true, 0);
diff --git a/deps/v8/test/message/super-constructor-extra-statement.js b/deps/v8/test/message/super-constructor-extra-statement.js
new file mode 100644 (file)
index 0000000..e8ffe2d
--- /dev/null
@@ -0,0 +1,15 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-classes
+'use strict';
+
+class C {
+  constructor() {
+    var x;
+    super(x);
+  }
+}
+
+var c = new C();
diff --git a/deps/v8/test/message/super-constructor-extra-statement.out b/deps/v8/test/message/super-constructor-extra-statement.out
new file mode 100644 (file)
index 0000000..cbe1e07
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+*%(basename)s:10: TypeError: A 'super' constructor call may only appear as the first statement of a function, and its arguments may not access 'this'. Other forms are not yet supported.
+    var x;
+    
+TypeError: A 'super' constructor call may only appear as the first statement of a function, and its arguments may not access 'this'. Other forms are not yet supported.
+    at *%(basename)s:15:9
diff --git a/deps/v8/test/message/super-constructor.js b/deps/v8/test/message/super-constructor.js
new file mode 100644 (file)
index 0000000..430dc58
--- /dev/null
@@ -0,0 +1,14 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-classes
+'use strict';
+
+class C {
+  constructor() {
+    super(this.x);
+  }
+}
+
+var c = new C();
diff --git a/deps/v8/test/message/super-constructor.out b/deps/v8/test/message/super-constructor.out
new file mode 100644 (file)
index 0000000..bc3a699
--- /dev/null
@@ -0,0 +1,8 @@
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+*%(basename)s:10: TypeError: A 'super' constructor call may only appear as the first statement of a function, and its arguments may not access 'this'. Other forms are not yet supported.
+    super(this.x);
+     
+TypeError: A 'super' constructor call may only appear as the first statement of a function, and its arguments may not access 'this'. Other forms are not yet supported.
+    at *%(basename)s:14:9
index b472f9c..5d6ab84 100644 (file)
@@ -75,6 +75,7 @@ class MessageTestSuite(testsuite.TestSuite):
   def _IgnoreLine(self, string):
     """Ignore empty lines, valgrind output, Android output."""
     if not string: return True
+    if not string.strip(): return True
     return (string.startswith("==") or string.startswith("**") or
             string.startswith("ANDROID") or
             # These five patterns appear in normal Native Client output.
@@ -4,14 +4,12 @@
 
 // Flags: --allow-natives-syntax
 
-function test(mode) {
+function testAdd(mode) {
   var a = [];
   Object.defineProperty(a, "length", { writable : false});
 
   function check(f) {
-    try {
-      f(a);
-    } catch(e) { }
+    assertThrows(function() { f(a) }, TypeError);
     assertFalse(0 in a);
     assertEquals(0, a.length);
   }
@@ -37,11 +35,77 @@ function test(mode) {
   check(unshift);
   %OptimizeFunctionOnNextCall(unshift);
   check(unshift);
+
+  function splice(a) {
+    a.splice(0, 0, 3);
+  }
+
+  check(splice);
+  check(splice);
+  check(splice);
+  %OptimizeFunctionOnNextCall(splice);
+  check(splice);
 }
 
-test("fast properties");
+testAdd("fast properties");
+
+testAdd("normalized");
+
+function testRemove(a, mode) {
+  Object.defineProperty(a, "length", { writable : false});
+
+  function check(f) {
+    assertThrows(function() { f(a) }, TypeError);
+    assertEquals(3, a.length);
+  }
+
+  if (mode == "fast properties") %ToFastProperties(a);
+
+  function pop(a) {
+    a.pop();
+  }
 
-test("normalized");
+  check(pop);
+  check(pop);
+  check(pop);
+  %OptimizeFunctionOnNextCall(pop);
+  check(pop);
+
+  function shift(a) {
+    a.shift();
+  }
+
+  check(shift);
+  check(shift);
+  check(shift);
+  %OptimizeFunctionOnNextCall(shift);
+  check(shift);
+
+  function splice(a) {
+    a.splice(0, 1);
+  }
+
+  check(splice);
+  check(splice);
+  check(splice);
+  %OptimizeFunctionOnNextCall(splice);
+  check(splice);
+
+  %ClearFunctionTypeFeedback(pop);
+  %ClearFunctionTypeFeedback(shift);
+  %ClearFunctionTypeFeedback(splice);
+}
+
+for (var i = 0; i < 3; i++) {
+  var a = [1, 2, 3];
+  if (i == 1) {
+    a = [1, 2, 3.5];
+  } else if (i == 2) {
+    a = [1, 2, "string"];
+  }
+  testRemove(a, "fast properties");
+  testRemove(a, "normalized");
+}
 
 var b = [];
 Object.defineProperty(b.__proto__, "0", {
@@ -97,11 +161,6 @@ try {
   b.unshift(3, 4, 5);
 } catch(e) { }
 
-// TODO(ulan): According to the ECMA-262 unshift should throw an exception
-// when moving b[0] to b[3] (see 15.4.4.13 step 6.d.ii). This is difficult
-// to do with our current implementation of SmartMove() in src/array.js and
-// it will regress performance. Uncomment the following line once acceptable
-// solution is found:
-// assertFalse(2 in b);
-// assertFalse(3 in b);
-// assertEquals(2, b.length);
+assertFalse(2 in b);
+assertFalse(3 in b);
+assertEquals(2, b.length);
diff --git a/deps/v8/test/mjsunit/array-shift4.js b/deps/v8/test/mjsunit/array-shift4.js
new file mode 100644 (file)
index 0000000..669b11a
--- /dev/null
@@ -0,0 +1,24 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+// Inlining shift with holey smi arrays shouldn't deopt just because it
+// encounters the hole on the copy step.
+function doShift(a) {
+  var x = a.shift();
+  return x;
+}
+
+function makeArray() {
+  var a = [1, 2,, 3];
+  a[0] = 2;
+  return a;
+}
+
+doShift(makeArray());
+doShift(makeArray());
+%OptimizeFunctionOnNextCall(doShift);
+doShift(makeArray());
+assertOptimized(doShift);
index d524af2..9bb029e 100644 (file)
@@ -1,7 +1,3 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
 var EXPECTED_OUTPUT =
   /frame averages: .+ \+- .+, range: .+ to .+ \n/;
 var Module = {
index 8cf63f5..bf8d177 100644 (file)
@@ -1,7 +1,3 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
 var EXPECTED_OUTPUT = 'sum:8930\n';
 var Module = {
   arguments: [1],
index f4884ac..05cdc4c 100644 (file)
@@ -1,7 +1,3 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
 var EXPECTED_OUTPUT = 'final: 40006013:58243.\n';
 var Module = {
   arguments: [1],
index d4c1a31..64bd491 100644 (file)
@@ -1,7 +1,3 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
 var EXPECTED_OUTPUT =
   '123456789\n' +
   '213456789\n' +
index a7aab3d..8c66354 100644 (file)
@@ -1,7 +1,3 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
 var EXPECTED_OUTPUT =
   'GGCCGGGCGCGGTGGCTCACGCCTGTAATCCCAGCACTTTGGGAGGCCGAGGCGGGCGGA\n' +
   'TCACCTGAGGTCAGGAGTTCGAGACCAGCCTGGCCAACATGGTGAAACCCCGTCTCTACT\n' +
index ca71bdc..e3a5d8c 100644 (file)
@@ -1,7 +1,3 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
 var EXPECTED_OUTPUT =
   'stretch tree of depth 10\t check: -1\n' +
   '1448\t trees of depth 4\t check: -1448\n' +
index 1deb305..e8e607c 100644 (file)
@@ -1,7 +1,3 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
 var EXPECTED_OUTPUT = 'final: 840.\n';
 var Module = {
   arguments: [1],
index 9c9c047..32f80b8 100644 (file)
@@ -1,7 +1,3 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
 var EXPECTED_OUTPUT = 'lastprime: 387677.\n';
 var Module = {
   arguments: [1],
index c64317c..d90ee38 100644 (file)
@@ -1,7 +1,3 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// Use of this source code is governed by a BSD-style license that can be
-// found in the LICENSE file.
-
 var EXPECTED_OUTPUT = 'sizes: 100000,25906\nok.\n';
 var Module = {
   arguments: [1],
diff --git a/deps/v8/test/mjsunit/asm/float32array-negative-offset.js b/deps/v8/test/mjsunit/asm/float32array-negative-offset.js
new file mode 100644 (file)
index 0000000..524bdc8
--- /dev/null
@@ -0,0 +1,42 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var stdlib = this;
+var buffer = new ArrayBuffer(64 * 1024);
+var foreign = {}
+
+
+var m = (function Module(stdlib, foreign, heap) {
+  "use asm";
+  var MEM32 = new stdlib.Float32Array(heap);
+  function load(i) {
+    i = i|0;
+    i = +MEM32[i >> 2];
+    return i;
+  }
+  function store(i, v) {
+    i = i|0;
+    v = +v;
+    MEM32[i >> 2] = v;
+  }
+  function load8(i) {
+    i = i|0;
+    i = +MEM32[i + 8 >> 2];
+    return i;
+  }
+  function store8(i, v) {
+    i = i|0;
+    v = +v;
+    MEM32[i + 8 >> 2] = v;
+  }
+  return { load: load, store: store, load8: load8, store8: store8 };
+})(stdlib, foreign, buffer);
+
+assertEquals(NaN, m.load(-8));
+assertEquals(NaN, m.load8(-16));
+m.store(0, 42.0);
+assertEquals(42.0, m.load8(-8));
+m.store8(-8, 99.0);
+assertEquals(99.0, m.load(0));
+assertEquals(99.0, m.load8(-8));
diff --git a/deps/v8/test/mjsunit/asm/float64array-negative-offset.js b/deps/v8/test/mjsunit/asm/float64array-negative-offset.js
new file mode 100644 (file)
index 0000000..154bd82
--- /dev/null
@@ -0,0 +1,42 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var stdlib = this;
+var buffer = new ArrayBuffer(64 * 1024);
+var foreign = {}
+
+
+var m = (function Module(stdlib, foreign, heap) {
+  "use asm";
+  var MEM64 = new stdlib.Float64Array(heap);
+  function load(i) {
+    i = i|0;
+    i = +MEM64[i >> 3];
+    return i;
+  }
+  function store(i, v) {
+    i = i|0;
+    v = +v;
+    MEM64[i >> 3] = v;
+  }
+  function load8(i) {
+    i = i|0;
+    i = +MEM64[i + 8 >> 3];
+    return i;
+  }
+  function store8(i, v) {
+    i = i|0;
+    v = +v;
+    MEM64[i + 8 >> 3] = v;
+  }
+  return { load: load, store: store, load8: load8, store8: store8 };
+})(stdlib, foreign, buffer);
+
+assertEquals(NaN, m.load(-8));
+assertEquals(NaN, m.load8(-16));
+m.store(0, 42.0);
+assertEquals(42.0, m.load8(-8));
+m.store8(-8, 99.0);
+assertEquals(99.0, m.load(0));
+assertEquals(99.0, m.load8(-8));
index 3896ec4..9cd9582 100644 (file)
@@ -12,22 +12,32 @@ function Module(stdlib, foreign, heap) {
     i = +i;
     return +(-1 * i);
   }
-  return { f1: f1, f2: f2 };
+  function f3(i) {
+    i = +i;
+    return +(-i);
+  }
+  return { f1: f1, f2: f2, f3: f3 };
 }
 
 var m = Module(this, {}, new ArrayBuffer(64 * 1024));
 
 assertEquals(NaN, m.f1(NaN));
 assertEquals(NaN, m.f2(NaN));
+assertEquals(NaN, m.f3(NaN));
 assertEquals(Infinity, 1 / m.f1(-0));
 assertEquals(Infinity, 1 / m.f2(-0));
+assertEquals(Infinity, 1 / m.f3(-0));
 assertEquals(Infinity, m.f1(-Infinity));
 assertEquals(Infinity, m.f2(-Infinity));
+assertEquals(Infinity, m.f3(-Infinity));
 assertEquals(-Infinity, 1 / m.f1(0));
 assertEquals(-Infinity, 1 / m.f2(0));
+assertEquals(-Infinity, 1 / m.f3(0));
 assertEquals(-Infinity, m.f1(Infinity));
 assertEquals(-Infinity, m.f2(Infinity));
+assertEquals(-Infinity, m.f3(Infinity));
 for (var i = -2147483648; i < 2147483648; i += 3999777) {
   assertEquals(-i, m.f1(i));
   assertEquals(-i, m.f2(i));
+  assertEquals(-i, m.f3(i));
 }
diff --git a/deps/v8/test/mjsunit/asm/if-tonumber.js b/deps/v8/test/mjsunit/asm/if-tonumber.js
new file mode 100644 (file)
index 0000000..dd3f73b
--- /dev/null
@@ -0,0 +1,31 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var stdlib = this;
+var buffer = new ArrayBuffer(64 * 1024);
+var foreign = {}
+
+function Module(stdlib, foreign, heap) {
+  "use asm";
+  function foo(i) {
+    i = i|0;
+    if (i > 0) {
+      i = i == 1;
+    } else {
+      i = 1;
+    }
+    return i & 1|0;
+  }
+  return { foo: foo };
+}
+
+var m = Module(stdlib, foreign, buffer);
+
+assertEquals(1, m.foo(-1));
+assertEquals(1, m.foo(-0));
+assertEquals(1, m.foo(0));
+assertEquals(1, m.foo(1));
+assertEquals(0, m.foo(2));
+assertEquals(1, m.foo(true));
+assertEquals(1, m.foo(false));
diff --git a/deps/v8/test/mjsunit/asm/int16array-negative-offset.js b/deps/v8/test/mjsunit/asm/int16array-negative-offset.js
new file mode 100644 (file)
index 0000000..5d33115
--- /dev/null
@@ -0,0 +1,42 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var stdlib = this;
+var buffer = new ArrayBuffer(64 * 1024);
+var foreign = {}
+
+
+var m = (function Module(stdlib, foreign, heap) {
+  "use asm";
+  var MEM16 = new stdlib.Int16Array(heap);
+  function load(i) {
+    i = i|0;
+    i = MEM16[i >> 1]|0;
+    return i;
+  }
+  function store(i, v) {
+    i = i|0;
+    v = v|0;
+    MEM16[i >> 1] = v;
+  }
+  function load8(i) {
+    i = i|0;
+    i = MEM16[i + 8 >> 1]|0;
+    return i;
+  }
+  function store8(i, v) {
+    i = i|0;
+    v = v|0;
+    MEM16[i + 8 >> 1] = v;
+  }
+  return { load: load, store: store, load8: load8, store8: store8 };
+})(stdlib, foreign, buffer);
+
+assertEquals(0, m.load(-8));
+assertEquals(0, m.load8(-16));
+m.store(0, 42);
+assertEquals(42, m.load8(-8));
+m.store8(-8, 99);
+assertEquals(99, m.load(0));
+assertEquals(99, m.load8(-8));
diff --git a/deps/v8/test/mjsunit/asm/int32array-negative-offset.js b/deps/v8/test/mjsunit/asm/int32array-negative-offset.js
new file mode 100644 (file)
index 0000000..d1a8efa
--- /dev/null
@@ -0,0 +1,42 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var stdlib = this;
+var buffer = new ArrayBuffer(64 * 1024);
+var foreign = {}
+
+
+var m = (function Module(stdlib, foreign, heap) {
+  "use asm";
+  var MEM32 = new stdlib.Int32Array(heap);
+  function load(i) {
+    i = i|0;
+    i = MEM32[i >> 2]|0;
+    return i;
+  }
+  function store(i, v) {
+    i = i|0;
+    v = v|0;
+    MEM32[i >> 2] = v;
+  }
+  function load8(i) {
+    i = i|0;
+    i = MEM32[i + 8 >> 2]|0;
+    return i;
+  }
+  function store8(i, v) {
+    i = i|0;
+    v = v|0;
+    MEM32[i + 8 >> 2] = v;
+  }
+  return { load: load, store: store, load8: load8, store8: store8 };
+})(stdlib, foreign, buffer);
+
+assertEquals(0, m.load(-8));
+assertEquals(0, m.load8(-16));
+m.store(0, 42);
+assertEquals(42, m.load8(-8));
+m.store8(-8, 99);
+assertEquals(99, m.load(0));
+assertEquals(99, m.load8(-8));
diff --git a/deps/v8/test/mjsunit/asm/int32mod-constant.js b/deps/v8/test/mjsunit/asm/int32mod-constant.js
new file mode 100644 (file)
index 0000000..fdbdc5d
--- /dev/null
@@ -0,0 +1,33 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var stdlib = {};
+var foreign = {};
+var heap = new ArrayBuffer(64 * 1024);
+
+function Int32Mod(divisor) {
+  var name = "mod_";
+  if (divisor < 0) {
+    name += "minus_";
+  }
+  name += Math.abs(divisor);
+  var m = eval("function Module(stdlib, foreign, heap) {\n"
+      + " \"use asm\";\n"
+      + " function " + name + "(dividend) {\n"
+      + "  return ((dividend | 0) % " + divisor + ") | 0;\n"
+      + " }\n"
+      + " return { f: " + name + "}\n"
+      + "}; Module");
+  return m(stdlib, foreign, heap).f;
+}
+
+var divisors = [-2147483648, -32 * 1024, -1000, -16, -7, -2, -1, 0,
+                1, 3, 4, 10, 64, 100, 1024, 2147483647];
+for (var i in divisors) {
+  var divisor = divisors[i];
+  var mod = Int32Mod(divisor);
+  for (var dividend = -2147483648; dividend < 2147483648; dividend += 3999773) {
+    assertEquals((dividend % divisor) | 0, mod(dividend));
+  }
+}
index ec3e887..22fa813 100644 (file)
@@ -6,28 +6,21 @@ var stdlib = {};
 var foreign = {};
 var heap = new ArrayBuffer(64 * 1024);
 
-function Int32Mod(divisor) {
-  var name = "mod_";
-  if (divisor < 0) {
-    name += "minus_";
+var mod = (function Module(stdlib, foreign, heap) {
+  "use asm";
+  function mod(dividend, divisor) {
+    dividend = dividend|0;
+    divisor = divisor|0;
+    return (dividend % divisor) | 0;
   }
-  name += Math.abs(divisor);
-  var m = eval("function Module(stdlib, foreign, heap) {\n"
-      + " \"use asm\";\n"
-      + " function " + name + "(dividend) {\n"
-      + "  return ((dividend | 0) % " + divisor + ") | 0;\n"
-      + " }\n"
-      + " return { f: " + name + "}\n"
-      + "}; Module");
-  return m(stdlib, foreign, heap).f;
-}
+  return { mod: mod };
+})(stdlib, foreign, heap).mod;
 
-var divisors = [-2147483648, -32 * 1024, -1000, -16, -7, -2, -1,
+var divisors = [-2147483648, -32 * 1024, -1000, -16, -7, -2, -1, 0,
                 1, 3, 4, 10, 64, 100, 1024, 2147483647];
 for (var i in divisors) {
   var divisor = divisors[i];
-  var mod = Int32Mod(divisor);
   for (var dividend = -2147483648; dividend < 2147483648; dividend += 3999773) {
-    assertEquals((dividend % divisor) | 0, mod(dividend));
+    assertEquals((dividend % divisor) | 0, mod(dividend, divisor));
   }
 }
diff --git a/deps/v8/test/mjsunit/asm/int8array-negative-offset.js b/deps/v8/test/mjsunit/asm/int8array-negative-offset.js
new file mode 100644 (file)
index 0000000..47dbc1b
--- /dev/null
@@ -0,0 +1,42 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var stdlib = this;
+var buffer = new ArrayBuffer(64 * 1024);
+var foreign = {}
+
+
+var m = (function Module(stdlib, foreign, heap) {
+  "use asm";
+  var MEM8 = new stdlib.Int8Array(heap);
+  function load(i) {
+    i = i|0;
+    i = MEM8[i >> 0]|0;
+    return i;
+  }
+  function store(i, v) {
+    i = i|0;
+    v = v|0;
+    MEM8[i >> 0] = v;
+  }
+  function load8(i) {
+    i = i|0;
+    i = MEM8[i + 8 >> 0]|0;
+    return i;
+  }
+  function store8(i, v) {
+    i = i|0;
+    v = v|0;
+    MEM8[i + 8 >> 0] = v;
+  }
+  return { load: load, store: store, load8: load8, store8: store8 };
+})(stdlib, foreign, buffer);
+
+assertEquals(0, m.load(-8));
+assertEquals(0, m.load8(-16));
+m.store(0, 42);
+assertEquals(42, m.load8(-8));
+m.store8(-8, 99);
+assertEquals(99, m.load(0));
+assertEquals(99, m.load8(-8));
diff --git a/deps/v8/test/mjsunit/asm/sign-extend.js b/deps/v8/test/mjsunit/asm/sign-extend.js
new file mode 100644 (file)
index 0000000..62d8d34
--- /dev/null
@@ -0,0 +1,45 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var stdlib = this;
+var buffer = new ArrayBuffer(64 * 1024);
+var foreign = {}
+
+
+var sext8 = (function Module(stdlib, foreign, heap) {
+  "use asm";
+  function sext8(i) {
+    i = i|0;
+    i = i << 24 >> 24;
+    return i|0;
+  }
+  return { sext8: sext8 };
+})(stdlib, foreign, buffer).sext8;
+
+assertEquals(-128, sext8(128));
+assertEquals(-1, sext8(-1));
+assertEquals(-1, sext8(255));
+assertEquals(0, sext8(0));
+assertEquals(0, sext8(256));
+assertEquals(42, sext8(42));
+assertEquals(127, sext8(127));
+
+
+var sext16 = (function Module(stdlib, foreign, heap) {
+  "use asm";
+  function sext16(i) {
+    i = i|0;
+    i = i << 16 >> 16;
+    return i|0;
+  }
+  return { sext16: sext16 };
+})(stdlib, foreign, buffer).sext16;
+
+assertEquals(-32768, sext16(32768));
+assertEquals(-1, sext16(-1));
+assertEquals(-1, sext16(65535));
+assertEquals(0, sext16(0));
+assertEquals(0, sext16(65536));
+assertEquals(128, sext16(128));
+assertEquals(32767, sext16(32767));
diff --git a/deps/v8/test/mjsunit/asm/uint32mod-constant.js b/deps/v8/test/mjsunit/asm/uint32mod-constant.js
new file mode 100644 (file)
index 0000000..4ba94da
--- /dev/null
@@ -0,0 +1,29 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var stdlib = {};
+var foreign = {};
+var heap = new ArrayBuffer(64 * 1024);
+
+function Uint32Mod(divisor) {
+  var name = "mod_";
+  name += divisor;
+  var m = eval("function Module(stdlib, foreign, heap) {\n"
+      + " \"use asm\";\n"
+      + " function " + name + "(dividend) {\n"
+      + "  return ((dividend >>> 0) % " + divisor + ") >>> 0;\n"
+      + " }\n"
+      + " return { f: " + name + "}\n"
+      + "}; Module");
+  return m(stdlib, foreign, heap).f;
+}
+
+var divisors = [0, 1, 3, 4, 10, 42, 64, 100, 1024, 2147483647, 4294967295];
+for (var i in divisors) {
+  var divisor = divisors[i];
+  var mod = Uint32Mod(divisor);
+  for (var dividend = 0; dividend < 4294967296; dividend += 3999773) {
+    assertEquals((dividend % divisor) >>> 0, mod(dividend));
+  }
+}
index 4ba94da..fa40507 100644 (file)
@@ -6,24 +6,20 @@ var stdlib = {};
 var foreign = {};
 var heap = new ArrayBuffer(64 * 1024);
 
-function Uint32Mod(divisor) {
-  var name = "mod_";
-  name += divisor;
-  var m = eval("function Module(stdlib, foreign, heap) {\n"
-      + " \"use asm\";\n"
-      + " function " + name + "(dividend) {\n"
-      + "  return ((dividend >>> 0) % " + divisor + ") >>> 0;\n"
-      + " }\n"
-      + " return { f: " + name + "}\n"
-      + "}; Module");
-  return m(stdlib, foreign, heap).f;
-}
+var mod = (function Module(stdlib, foreign, heap) {
+  "use asm";
+  function mod(dividend, divisor) {
+    dividend = dividend >>> 0;
+    divisor = divisor >>> 0;
+    return (dividend % divisor) >>> 0;
+  }
+  return { mod: mod };
+})(stdlib, foreign, heap).mod;
 
 var divisors = [0, 1, 3, 4, 10, 42, 64, 100, 1024, 2147483647, 4294967295];
 for (var i in divisors) {
   var divisor = divisors[i];
-  var mod = Uint32Mod(divisor);
   for (var dividend = 0; dividend < 4294967296; dividend += 3999773) {
-    assertEquals((dividend % divisor) >>> 0, mod(dividend));
+    assertEquals((dividend % divisor) >>> 0, mod(dividend, divisor));
   }
 }
diff --git a/deps/v8/test/mjsunit/asm/word32ror.js b/deps/v8/test/mjsunit/asm/word32ror.js
new file mode 100644 (file)
index 0000000..9535bde
--- /dev/null
@@ -0,0 +1,37 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var stdlib = {};
+var foreign = {};
+var heap = new ArrayBuffer(64 * 1024);
+
+var rol = (function Module(stdlib, foreign, heap) {
+  "use asm";
+  function rol(x, y) {
+    x = x | 0;
+    y = y | 0;
+    return (x << y) | (x >>> (32 - y));
+  }
+  return { rol: rol };
+})(stdlib, foreign, heap).rol;
+
+assertEquals(10, rol(10, 0));
+assertEquals(2, rol(1, 1));
+assertEquals(0x40000000, rol(1, 30));
+assertEquals(-0x80000000, rol(1, 31));
+
+var ror = (function Module(stdlib, foreign, heap) {
+  "use asm";
+  function ror(x, y) {
+    x = x | 0;
+    y = y | 0;
+    return (x << (32 - y)) | (x >>> y);
+  }
+  return { ror: ror };
+})(stdlib, foreign, heap).ror;
+
+assertEquals(10, ror(10, 0));
+assertEquals(-0x80000000, ror(1, 1));
+assertEquals(0x40000000, ror(1, 2));
+assertEquals(2, ror(1, 31));
diff --git a/deps/v8/test/mjsunit/asm/zero-extend.js b/deps/v8/test/mjsunit/asm/zero-extend.js
new file mode 100644 (file)
index 0000000..a1f9da6
--- /dev/null
@@ -0,0 +1,37 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var stdlib = this;
+var buffer = new ArrayBuffer(64 * 1024);
+var foreign = {}
+
+
+var zext8 = (function Module(stdlib, foreign, heap) {
+  "use asm";
+  function zext8(i) {
+    i = i|0;
+    return i & 0xff;
+  }
+  return { zext8: zext8 };
+})(stdlib, foreign, buffer).zext8;
+
+assertEquals(0, zext8(0));
+assertEquals(0, zext8(0x100));
+assertEquals(0xff, zext8(-1));
+assertEquals(0xff, zext8(0xff));
+
+
+var zext16 = (function Module(stdlib, foreign, heap) {
+  "use asm";
+  function zext16(i) {
+    i = i|0;
+    return i & 0xffff;
+  }
+  return { zext16: zext16 };
+})(stdlib, foreign, buffer).zext16;
+
+assertEquals(0, zext16(0));
+assertEquals(0, zext16(0x10000));
+assertEquals(0xffff, zext16(-1));
+assertEquals(0xffff, zext16(0xffff));
index 0778e95..d3f3ac3 100644 (file)
@@ -101,6 +101,7 @@ function CreateTestValues() {
 
 // -----------------------------------------------------------------------------
 
+
 function TestDivisionLike(ref, construct, values, divisor) {
   // Define the function to test.
   var OptFun = new Function("dividend", construct(divisor));
@@ -111,12 +112,14 @@ function TestDivisionLike(ref, construct, values, divisor) {
   %OptimizeFunctionOnNextCall(OptFun);
   OptFun(13);
 
-  // Check results.
-  values.forEach(function(dividend) {
+function dude(dividend) {
     // Avoid deopt caused by overflow, we do not want to test this here.
     if (dividend === -2147483648 && divisor === -1) return;
     assertEquals(ref(dividend, divisor), OptFun(dividend));
-  });
+  }
+
+  // Check results.
+  values.forEach(dude);
 }
 
 function Test(ref, construct) {
index 8607cd9..3bda9fc 100644 (file)
@@ -38,8 +38,8 @@ assertEquals(8, eval("6;'abc';8"));
 // "/" comes just before "0".
 assertThrows('"\\x1/"');
 assertThrows('"\\u111/"');
-assertEquals("\\x1/", RegExp("\\x1/").source);
-assertEquals("\\u111/", RegExp("\\u111/").source);
+assertEquals("\\x1\\/", RegExp("\\x1/").source);
+assertEquals("\\u111\\/", RegExp("\\u111/").source);
 
 // ":" comes just after "9".
 assertThrows('"\\x1:"');
diff --git a/deps/v8/test/mjsunit/compiler/regress-3786.js b/deps/v8/test/mjsunit/compiler/regress-3786.js
new file mode 100644 (file)
index 0000000..d30ac0e
--- /dev/null
@@ -0,0 +1,12 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var foo = (function Module(stdlib, foreign, heap) {
+  "use asm";
+  var f = stdlib.Math.cos;
+  function foo() {
+    return f(48,48,48,59,32,102,111,110,116,45,119,101,105,103,104,116,58,98,111,108,100,59,102,111,110,116,45,102,97,109,105,108,121,58,65,114,105,97,108,44,32,72,101,108,118,101,116,105,99,97,44,32,115,97,110,115,45,115,101,114,105,102,44,86,101,114,100,97,110,97,34,32,99,111,108,111,114,61,34,35,70,70,48,48,48,48,34,62,70,79,82,69,88,47,80,65,82,38,35,51,48,52,59,60,119,98,114,32,47,62,84,69,32,38,35,51,48,52,59,38,35,51,53,48,59,76,69,77,76,69,82,38,35,51,48,52,59,60,47,102,111,110,116,62,60,47,115,112,97,110,62,60,47,116,100,62,10,60,47,116,114,62,60,116,114,62,10,60,116,100,32,97,108,105,103,110,61,34,108,101,102,116,34,62,60,115,112,97,110,32,105,100,61,34,97,99,95,100,101,115,99,34,62,60,102,111,110,116,32,115,116,121,108,101,61,34,102,111,110,116,45,115,105,122,101,58,49,49,112,120,59,32,99,111,108,111,114,58,35,48,48,48,48,48,48,59,32,102,111,110,116,45,102,97,109,105,108,121,58,65,114,105,97,108,44,32,72,101,108,118,101,116,105,99,97,44,32,115,97,110,115,45,115,101,114,105,102,44,86,101,114,100,97,110,97,34,62,38,112,111,117,110,100,59,47,36,32,50,32,112,105,112,44,32,89,84,76,32,49,50,32,112,105,112,44,65,108,116,38,35,51,48,53,59,110,32,51,32,99,101,110,116,46,32,83,97,98,105,116,32,83,112,114,101,97,100,45,84,38,117,117,109,108,59,114,60,119,98,114,32,47,62,107,32,66,97,110,107,97,115,38,35,51,48,53,59,32,65,86,65,78,84,65,74,73,60,47,102,111,110,116,62,60,47,115,112,97,110,62,60,47,116,100,62,10,60,47,116,114,62,60,116,114,62,10,60,116,100,32,97,108,105,103,110,61,34,108,101,102,116,34,62,60,100,105,118,32,105,100,61,34,97,99,95,117,114,108,34,62,60,102,111,110,116,32,115,116,121,108,101,61,34,102,111,110,116,45,115,105,122,101,58,49,48,112,120,59,32,99,111,108,111,114,58,35,70,70,54,54,57,57,59,32,102,111,110,116,45,102,97114,105,97);
+  }
+  return { foo: foo };
+})(this, {}).foo();
diff --git a/deps/v8/test/mjsunit/compiler/regress-439743.js b/deps/v8/test/mjsunit/compiler/regress-439743.js
new file mode 100644 (file)
index 0000000..288e2a4
--- /dev/null
@@ -0,0 +1,17 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function module(stdlib, foreign, heap) {
+    "use asm";
+    var MEM32 = new stdlib.Int32Array(heap);
+    function foo(i) {
+      i = i|0;
+      MEM32[0] = i;
+      return MEM32[i + 4 >> 2]|0;
+    }
+    return { foo: foo };
+}
+
+var foo = module(this, {}, new ArrayBuffer(64*1024)).foo;
+assertEquals(-4, foo(-4));
diff --git a/deps/v8/test/mjsunit/compiler/regress-443744.js b/deps/v8/test/mjsunit/compiler/regress-443744.js
new file mode 100644 (file)
index 0000000..5e7f3bc
--- /dev/null
@@ -0,0 +1,14 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var m = (function(stdlib, foreign, heap) {
+  "use asm";
+  var MEM = new stdlib.Uint8Array(heap);
+  function f(x) {
+    x = x | 0;
+    MEM[x] = 0;
+  }
+  return {f: f};
+})(this, {}, new ArrayBuffer(1));
+m.f(-926416896 * 32 * 1024);
diff --git a/deps/v8/test/mjsunit/compiler/regress-444508.js b/deps/v8/test/mjsunit/compiler/regress-444508.js
new file mode 100644 (file)
index 0000000..e7d51ae
--- /dev/null
@@ -0,0 +1,11 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function Module(stdlib, foreign, heap) {
+  "use asm";
+  // This is not valid asm.js, but should nevertheless work.
+  var MEM = new Uint8ClampedArray(heap);
+  function foo(i) { MEM[0] = 1; }
+  return {foo: foo};
+})(this, {}, new ArrayBuffer(64 * 1024)).foo();
diff --git a/deps/v8/test/mjsunit/compiler/regress-444695.js b/deps/v8/test/mjsunit/compiler/regress-444695.js
new file mode 100644 (file)
index 0000000..168ae25
--- /dev/null
@@ -0,0 +1,11 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var foo = (function(stdlib, foreign, heap) {
+  "use asm";
+  var MEM = new stdlib.Uint8Array(heap);
+  function foo(x) { MEM[x | 0] *= 0; }
+  return {foo: foo};
+})(this, {}, new ArrayBuffer(1)).foo;
+foo(-926416896 * 8 * 1024);
diff --git a/deps/v8/test/mjsunit/compiler/regress-445267.js b/deps/v8/test/mjsunit/compiler/regress-445267.js
new file mode 100644 (file)
index 0000000..465168b
--- /dev/null
@@ -0,0 +1,16 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var foo = (function Module(stdlib, foreign, heap) {
+  "use asm";
+  var MEM16 = new stdlib.Int16Array(heap);
+  function foo(i) {
+    i = i|0;
+    i = MEM16[i + 2147483650 >> 1]|0;
+    return i;
+  }
+  return { foo: foo };
+})(this, {}, new ArrayBuffer(64 * 1024)).foo;
+
+foo(0);
diff --git a/deps/v8/test/mjsunit/compiler/regress-445732.js b/deps/v8/test/mjsunit/compiler/regress-445732.js
new file mode 100644 (file)
index 0000000..199a29a
--- /dev/null
@@ -0,0 +1,11 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --turbo-asm
+
+"use asm";
+
+%NeverOptimizeFunction(f);
+function f() { }
+f();
diff --git a/deps/v8/test/mjsunit/compiler/regress-445858.js b/deps/v8/test/mjsunit/compiler/regress-445858.js
new file mode 100644 (file)
index 0000000..b2214ea
--- /dev/null
@@ -0,0 +1,15 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var foo = (function module(stdlib, foreign, heap) {
+  "use asm";
+  var MEM = new stdlib.Int8Array(heap);
+  function foo(i) {
+    i = i|0;
+    i[0] = i;
+    return MEM[i + 1 >> 0]|0;
+  }
+  return { foo: foo };
+})(this, {}, new ArrayBuffer(64 * 1024)).foo;
+foo(-1);
diff --git a/deps/v8/test/mjsunit/compiler/regress-445859.js b/deps/v8/test/mjsunit/compiler/regress-445859.js
new file mode 100644 (file)
index 0000000..256af3e
--- /dev/null
@@ -0,0 +1,11 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var foo = (function Module(global, env, buffer) {
+  "use asm";
+  var i8 = new global.Int8Array(buffer);
+  function foo() { i8[0] += 4294967295; }
+  return { foo: foo };
+})(this, {}, new ArrayBuffer(64 * 1024)).foo;
+foo();
diff --git a/deps/v8/test/mjsunit/compiler/regress-int32array-outofbounds-nan.js b/deps/v8/test/mjsunit/compiler/regress-int32array-outofbounds-nan.js
new file mode 100644 (file)
index 0000000..2eba2a4
--- /dev/null
@@ -0,0 +1,17 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function Module(stdlib, foreign, heap) {
+  "use asm";
+  var MEM32 = new stdlib.Int32Array(heap);
+  function foo(i) {
+    i = i|0;
+    return +MEM32[i >> 2];
+  }
+  return {foo: foo};
+}
+
+var foo = Module(this, {}, new ArrayBuffer(4)).foo;
+assertEquals(NaN, foo(-4));
+assertEquals(NaN, foo(4));
diff --git a/deps/v8/test/mjsunit/compiler/regress-uint8-deopt.js b/deps/v8/test/mjsunit/compiler/regress-uint8-deopt.js
new file mode 100644 (file)
index 0000000..ba2823f
--- /dev/null
@@ -0,0 +1,17 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --turbo-asm --turbo-deoptimization --allow-natives-syntax
+
+function Module(heap) {
+  "use asm";
+  var a = new Uint8Array(heap);
+  function f() {
+    var x = a[0] | 0;
+    %DeoptimizeFunction(f);
+    return x;
+  }
+  return f;
+}
+assertEquals(0, Module(new ArrayBuffer(1))());
diff --git a/deps/v8/test/mjsunit/compiler/truncating-store.js b/deps/v8/test/mjsunit/compiler/truncating-store.js
new file mode 100644 (file)
index 0000000..9e3dd38
--- /dev/null
@@ -0,0 +1,98 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function() {
+  var asm = (function Module(global, env, buffer) {
+    "use asm";
+
+    var i8 = new global.Int8Array(buffer);
+    var u8 = new global.Uint8Array(buffer);
+    var i16 = new global.Int16Array(buffer);
+    var u16 = new global.Uint16Array(buffer);
+    var i32 = new global.Int32Array(buffer);
+    var u32 = new global.Uint32Array(buffer);
+
+    var H = 0;
+
+    function store_i8() {
+      H = 4294967295;
+      i8[0 >> 0]= H;
+      return i8[0 >> 0];
+    }
+
+    function store_u8() {
+      H = 4294967295;
+      u8[0 >> 0]= H;
+      return u8[0 >> 0];
+    }
+
+    function store_i16() {
+      H = 4294967295;
+      i16[0 >> 0]= H;
+      return i16[0 >> 0];
+    }
+
+    function store_u16() {
+      H = 4294967295;
+      u16[0 >> 0]= H;
+      return u16[0 >> 0];
+    }
+
+    function store_i32() {
+      H = 4294967295;
+      i32[0 >> 0]= H;
+      return i32[0 >> 0];
+    }
+
+    function store_u32() {
+      H = 4294967295;
+      u32[0 >> 0]= H;
+      return u32[0 >> 0];
+    }
+
+    return { store_i8: store_i8,
+             store_u8: store_u8,
+             store_i16: store_i16,
+             store_u16: store_u16,
+             store_i32: store_i32,
+             store_u32: store_u32 };
+  })({
+    "Int8Array": Int8Array,
+    "Uint8Array": Uint8Array,
+    "Int16Array": Int16Array,
+    "Uint16Array": Uint16Array,
+    "Int32Array": Int32Array,
+    "Uint32Array": Uint32Array
+  }, {}, new ArrayBuffer(64 * 1024));
+
+  assertEquals(-1, asm.store_i8());
+  assertEquals(255, asm.store_u8());
+  assertEquals(-1, asm.store_i16());
+  assertEquals(65535, asm.store_u16());
+  assertEquals(-1, asm.store_i32());
+  assertEquals(4294967295, asm.store_u32());
+})();
+
+(function() {
+  var asm = (function Module(global, env, buffer) {
+    "use asm";
+
+    var i32 = new global.Int32Array(buffer);
+
+    var H = 0;
+
+    // This is not valid asm.js, but we should still generate correct code.
+    function store_i32_from_string() {
+      H = "3";
+      i32[0 >> 0]= H;
+      return i32[0 >> 0];
+    }
+
+    return { store_i32_from_string: store_i32_from_string };
+  })({
+    "Int32Array": Int32Array
+  }, {}, new ArrayBuffer(64 * 1024));
+
+  assertEquals(3, asm.store_i32_from_string());
+})();
index 137dfec..3c03bda 100644 (file)
@@ -36,13 +36,17 @@ var exception = false;
 
 var base_request = '"seq":0,"type":"request","command":"clearbreakpointgroup"';
 var scriptId = null;
+var muteListener = false;
 
 function safeEval(code) {
   try {
+    muteListener = true;
     return eval('(' + code + ')');
   } catch (e) {
     assertEquals(void 0, e);
     return undefined;
+  } finally {
+    muteListener = false;
   }
 }
 
@@ -58,6 +62,7 @@ function testArguments(dcp, arguments, success) {
 }
 
 function listener(event, exec_state, event_data, data) {
+  if (muteListener) return;
   try {
     if (event == Debug.DebugEvent.Break) {
       // Get the debug command processor.
index c38cd84..8623406 100644 (file)
@@ -37,7 +37,7 @@ var current_source = '';  // Current source being compiled.
 var source_count = 0;  // Total number of scources compiled.
 var host_compilations = 0;  // Number of scources compiled through the API.
 var eval_compilations = 0;  // Number of scources compiled through eval.
-
+var mute_listener = false;
 
 function compileSource(source) {
   current_source = source;
@@ -45,8 +45,20 @@ function compileSource(source) {
   source_count++;
 }
 
+function safeEval(code) {
+  try {
+    mute_listener = true;
+    return eval('(' + code + ')');
+  } catch (e) {
+    assertEquals(void 0, e);
+    return undefined;
+  } finally {
+    mute_listener = false;
+  }
+}
 
 function listener(event, exec_state, event_data, data) {
+  if (mute_listener) return;
   try {
     if (event == Debug.DebugEvent.BeforeCompile ||
         event == Debug.DebugEvent.AfterCompile ||
@@ -81,7 +93,7 @@ function listener(event, exec_state, event_data, data) {
       }
       // Check that script context is included into the event message.
       var json = event_data.toJSONProtocol();
-      var msg = eval('(' + json + ')');
+      var msg = safeEval(json);
       assertTrue('context' in msg.body.script);
 
       // Check that we pick script name from //# sourceURL, iff present
index 6696ec5..84b7e20 100644 (file)
@@ -89,9 +89,10 @@ function listener(event, exec_state, event_data, data) {
           }
 
           // All frames except the bottom one have two scopes.
-          assertEquals(2, frame.scopeCount());
+          assertEquals(3, frame.scopeCount());
           assertEquals(debug.ScopeType.Local, frame.scope(0).scopeType());
-          assertEquals(debug.ScopeType.Global, frame.scope(1).scopeType());
+          assertEquals(debug.ScopeType.Script, frame.scope(1).scopeType());
+          assertEquals(debug.ScopeType.Global, frame.scope(2).scopeType());
 
           Object.keys(expected_locals).forEach(function (name) {
             assertEquals(expected_locals[name],
@@ -134,8 +135,9 @@ function listener(event, exec_state, event_data, data) {
                        frame.evaluate(arguments_sum).value());
         } else {
           // The bottom frame only have the global scope.
-          assertEquals(1, frame.scopeCount());
-          assertEquals(debug.ScopeType.Global, frame.scope(0).scopeType());
+          assertEquals(2, frame.scopeCount());
+          assertEquals(debug.ScopeType.Script, frame.scope(0).scopeType());
+          assertEquals(debug.ScopeType.Global, frame.scope(1).scopeType());
         }
 
         // Check the frame function.
index d424001..9d539fe 100644 (file)
@@ -79,10 +79,11 @@ function listener(event, exec_state, event_data, data) {
                          frame.argumentValue(j).value());
           }
 
-          // All frames except the bottom one have two scopes.
-          assertEquals(2, frame.scopeCount());
+          // All frames except the bottom one have three scopes.
+          assertEquals(3, frame.scopeCount());
           assertEquals(debug.ScopeType.Local, frame.scope(0).scopeType());
-          assertEquals(debug.ScopeType.Global, frame.scope(1).scopeType());
+          assertEquals(debug.ScopeType.Script, frame.scope(1).scopeType());
+          assertEquals(debug.ScopeType.Global, frame.scope(2).scopeType());
 
           Object.keys(expected_locals).forEach(function (name) {
             assertEquals(expected_locals[name],
@@ -124,9 +125,10 @@ function listener(event, exec_state, event_data, data) {
           assertEquals(expected_args_sum,
                        frame.evaluate(arguments_sum).value());
         } else {
-          // The bottom frame only have the global scope.
-          assertEquals(1, frame.scopeCount());
-          assertEquals(debug.ScopeType.Global, frame.scope(0).scopeType());
+          // The bottom frame only have the script scope and the global scope.
+          assertEquals(2, frame.scopeCount());
+          assertEquals(debug.ScopeType.Script, frame.scope(0).scopeType());
+          assertEquals(debug.ScopeType.Global, frame.scope(1).scopeType());
         }
 
         // Check the frame function.
index 5e1c83c..fa615ad 100644 (file)
@@ -32,6 +32,7 @@ Debug = debug.Debug
 var evaluate_callback;
 
 function listener(event, exec_state, event_data, data) {
+  if (event !== Debug.DebugEvent.Break) return;
   try {
     var context = { what_is_capybara: "a fish" };
     var context2 = { what_is_capybara: "a fish", what_is_parrot: "a beard" };
index b51e8b4..8992fe7 100644 (file)
@@ -48,7 +48,8 @@ var ScopeType = { Global: 0,
                   With: 2,
                   Closure: 3,
                   Catch: 4,
-                  Block: 5 };
+                  Block: 5,
+                  Script: 6};
 
 var f1 = (function F1(x) {
   function F2(y) {
@@ -68,21 +69,23 @@ var f1 = (function F1(x) {
 
 var mirror = Debug.MakeMirror(f1);
 
-assertEquals(5, mirror.scopeCount());
+assertEquals(6, mirror.scopeCount());
 
 CheckScope(mirror.scope(0), { a: 4, b: 5 }, ScopeType.Closure);
 CheckScope(mirror.scope(1), { w: 5, v: "Capybara" }, ScopeType.With);
 CheckScope(mirror.scope(2), { y: 17, z: 22 }, ScopeType.Closure);
 CheckScope(mirror.scope(3), { x: 5 }, ScopeType.Closure);
-CheckScope(mirror.scope(4), {}, ScopeType.Global);
+CheckScope(mirror.scope(4), {}, ScopeType.Script);
+CheckScope(mirror.scope(5), {}, ScopeType.Global);
 
 var f2 = function() { return 5; }
 
 var mirror = Debug.MakeMirror(f2);
 
-assertEquals(1, mirror.scopeCount());
+assertEquals(2, mirror.scopeCount());
 
-CheckScope(mirror.scope(0), {}, ScopeType.Global);
+CheckScope(mirror.scope(0), {}, ScopeType.Script);
+CheckScope(mirror.scope(1), {}, ScopeType.Global);
 
 var f3 = (function F1(invisible_parameter) {
   var invisible1 = 1;
@@ -99,11 +102,12 @@ var f3 = (function F1(invisible_parameter) {
 
 var mirror = Debug.MakeMirror(f3);
 
-assertEquals(3, mirror.scopeCount());
+assertEquals(4, mirror.scopeCount());
 
 CheckScope(mirror.scope(0), { visible2: 20 }, ScopeType.Closure);
 CheckScope(mirror.scope(1), { visible1: 10 }, ScopeType.Closure);
-CheckScope(mirror.scope(2), {}, ScopeType.Global);
+CheckScope(mirror.scope(2), {}, ScopeType.Script);
+CheckScope(mirror.scope(3), {}, ScopeType.Global);
 
 
 var f4 = (function One() {
@@ -122,11 +126,12 @@ var f4 = (function One() {
 
 var mirror = Debug.MakeMirror(f4);
 
-assertEquals(3, mirror.scopeCount());
+assertEquals(4, mirror.scopeCount());
 
 CheckScope(mirror.scope(0), { e2: "I'm error 2" }, ScopeType.Catch);
 CheckScope(mirror.scope(1), { e1: "I'm error 1" }, ScopeType.Catch);
-CheckScope(mirror.scope(2), {}, ScopeType.Global);
+CheckScope(mirror.scope(2), {}, ScopeType.Script);
+CheckScope(mirror.scope(3), {}, ScopeType.Global);
 
 
 var f5 = (function Raz(p1, p2) {
@@ -141,11 +146,12 @@ var f5 = (function Raz(p1, p2) {
 
 var mirror = Debug.MakeMirror(f5);
 
-assertEquals(3, mirror.scopeCount());
+assertEquals(4, mirror.scopeCount());
 
 CheckScope(mirror.scope(0), { p4: 20, p6: 22 }, ScopeType.Closure);
 CheckScope(mirror.scope(1), { p1: 1 }, ScopeType.Closure);
-CheckScope(mirror.scope(2), {}, ScopeType.Global);
+CheckScope(mirror.scope(2), {}, ScopeType.Script);
+CheckScope(mirror.scope(3), {}, ScopeType.Global);
 
 
 function CheckNoScopeVisible(f) {
index 4823496..7c08120 100644 (file)
@@ -130,6 +130,7 @@ function CheckScopeChain(scopes, exec_state) {
     assertEquals(i, response.body.scopes[i].index);
     assertEquals(scopes[i], response.body.scopes[i].type);
     if (scopes[i] == debug.ScopeType.Local ||
+        scopes[i] == debug.ScopeType.Script ||
         scopes[i] == debug.ScopeType.Closure) {
       assertTrue(response.body.scopes[i].object.ref < 0);
     } else {
@@ -193,6 +194,7 @@ function CheckScopeContent(content, number, exec_state) {
   assertEquals(scope.scopeType(), response.body.type);
   assertEquals(number, response.body.index);
   if (scope.scopeType() == debug.ScopeType.Local ||
+      scope.scopeType() == debug.ScopeType.Script ||
       scope.scopeType() == debug.ScopeType.Closure) {
     assertTrue(response.body.object.ref < 0);
   } else {
@@ -215,6 +217,7 @@ function local_1() {
 
 listener_delegate = function(exec_state) {
   CheckScopeChain([debug.ScopeType.Local,
+                   debug.ScopeType.Script,
                    debug.ScopeType.Global], exec_state);
   CheckScopeContent({}, 0, exec_state);
 };
@@ -231,6 +234,7 @@ function local_2(a) {
 
 listener_delegate = function(exec_state) {
   CheckScopeChain([debug.ScopeType.Local,
+                   debug.ScopeType.Script,
                    debug.ScopeType.Global], exec_state);
   CheckScopeContent({a:1}, 0, exec_state);
 };
@@ -248,6 +252,7 @@ function local_3(a) {
 
 listener_delegate = function(exec_state) {
   CheckScopeChain([debug.ScopeType.Local,
+                   debug.ScopeType.Script,
                    debug.ScopeType.Global], exec_state);
   CheckScopeContent({a:1,x:3}, 0, exec_state);
 };
@@ -266,6 +271,7 @@ function local_4(a, b) {
 
 listener_delegate = function(exec_state) {
   CheckScopeChain([debug.ScopeType.Local,
+                   debug.ScopeType.Script,
                    debug.ScopeType.Global], exec_state);
   CheckScopeContent({a:1,b:2,x:3,y:4}, 0, exec_state);
 };
@@ -283,6 +289,7 @@ function local_5() {
 
 listener_delegate = function(exec_state) {
   CheckScopeChain([debug.ScopeType.Local,
+                   debug.ScopeType.Script,
                    debug.ScopeType.Global], exec_state);
   CheckScopeContent({}, 0, exec_state);
 };
@@ -300,6 +307,7 @@ function local_6() {
 
 listener_delegate = function(exec_state) {
   CheckScopeChain([debug.ScopeType.Local,
+                   debug.ScopeType.Script,
                    debug.ScopeType.Global], exec_state);
   CheckScopeContent({i:5}, 0, exec_state);
 };
@@ -321,6 +329,7 @@ function local_7(a, b) {
 
 listener_delegate = function(exec_state) {
   CheckScopeChain([debug.ScopeType.Local,
+                   debug.ScopeType.Script,
                    debug.ScopeType.Global], exec_state);
   CheckScopeContent({a:1,b:2,x:3,y:4,i:5,j:6}, 0, exec_state);
 };
@@ -340,6 +349,7 @@ function with_1() {
 listener_delegate = function(exec_state) {
   CheckScopeChain([debug.ScopeType.With,
                    debug.ScopeType.Local,
+                   debug.ScopeType.Script,
                    debug.ScopeType.Global], exec_state);
   CheckScopeContent({}, 0, exec_state);
 };
@@ -362,6 +372,7 @@ listener_delegate = function(exec_state) {
   CheckScopeChain([debug.ScopeType.With,
                    debug.ScopeType.With,
                    debug.ScopeType.Local,
+                   debug.ScopeType.Script,
                    debug.ScopeType.Global], exec_state);
   CheckScopeContent({}, 0, exec_state);
   CheckScopeContent({}, 1, exec_state);
@@ -382,6 +393,7 @@ function with_3() {
 listener_delegate = function(exec_state) {
   CheckScopeChain([debug.ScopeType.With,
                    debug.ScopeType.Local,
+                   debug.ScopeType.Script,
                    debug.ScopeType.Global], exec_state);
   CheckScopeContent({a:1,b:2}, 0, exec_state);
 };
@@ -404,6 +416,7 @@ listener_delegate = function(exec_state) {
   CheckScopeChain([debug.ScopeType.With,
                    debug.ScopeType.With,
                    debug.ScopeType.Local,
+                   debug.ScopeType.Script,
                    debug.ScopeType.Global], exec_state);
   CheckScopeContent({a:2,b:1}, 0, exec_state);
   CheckScopeContent({a:1,b:2}, 1, exec_state);
@@ -428,6 +441,7 @@ listener_delegate = function(exec_state) {
   CheckScopeChain([debug.ScopeType.With,
                    debug.ScopeType.With,
                    debug.ScopeType.Local,
+                   debug.ScopeType.Script,
                    debug.ScopeType.Global], exec_state);
   CheckScopeContent(with_object, 0, exec_state);
   CheckScopeContent(with_object, 1, exec_state);
@@ -443,6 +457,7 @@ BeginTest("With 6");
 listener_delegate = function(exec_state) {
   CheckScopeChain([debug.ScopeType.With,
                    debug.ScopeType.With,
+                   debug.ScopeType.Script,
                    debug.ScopeType.Global], exec_state);
   CheckScopeContent(with_object, 0, exec_state);
   CheckScopeContent(with_object, 1, exec_state);
@@ -472,6 +487,7 @@ function with_7() {
 listener_delegate = function(exec_state) {
   CheckScopeChain([debug.ScopeType.With,
                    debug.ScopeType.Local,
+                   debug.ScopeType.Script,
                    debug.ScopeType.Global], exec_state);
   CheckScopeContent({}, 0, exec_state);
 };
@@ -494,6 +510,7 @@ function closure_1(a) {
 listener_delegate = function(exec_state) {
   CheckScopeChain([debug.ScopeType.Local,
                    debug.ScopeType.Closure,
+                   debug.ScopeType.Script,
                    debug.ScopeType.Global], exec_state);
   CheckScopeContent({a:1}, 1, exec_state);
 };
@@ -519,6 +536,7 @@ function closure_2(a, b) {
 listener_delegate = function(exec_state) {
   CheckScopeChain([debug.ScopeType.Local,
                    debug.ScopeType.Closure,
+                   debug.ScopeType.Script,
                    debug.ScopeType.Global], exec_state);
   CheckScopeContent({a:1,x:3}, 1, exec_state);
 };
@@ -545,6 +563,7 @@ function closure_3(a, b) {
 listener_delegate = function(exec_state) {
   CheckScopeChain([debug.ScopeType.Local,
                    debug.ScopeType.Closure,
+                   debug.ScopeType.Script,
                    debug.ScopeType.Global], exec_state);
   CheckScopeContent({a:1,b:2,x:3,y:4}, 1, exec_state);
 };
@@ -574,6 +593,7 @@ function closure_4(a, b) {
 listener_delegate = function(exec_state) {
   CheckScopeChain([debug.ScopeType.Local,
                    debug.ScopeType.Closure,
+                   debug.ScopeType.Script,
                    debug.ScopeType.Global], exec_state);
   CheckScopeContent({a:1,b:2,x:3,y:4,f:function(){}}, 1, exec_state);
 };
@@ -602,6 +622,7 @@ function closure_5(a, b) {
 listener_delegate = function(exec_state) {
   CheckScopeChain([debug.ScopeType.Local,
                    debug.ScopeType.Closure,
+                   debug.ScopeType.Script,
                    debug.ScopeType.Global], exec_state);
   CheckScopeContent({a:1,b:2,x:3,y:4,f:function(){}}, 1, exec_state);
 };
@@ -631,6 +652,7 @@ listener_delegate = function(exec_state) {
   CheckScopeChain([debug.ScopeType.Local,
                    debug.ScopeType.Closure,
                    debug.ScopeType.Closure,
+                   debug.ScopeType.Script,
                    debug.ScopeType.Global], exec_state);
   CheckScopeContent({a:1}, 1, exec_state);
   CheckScopeContent({f:function(){}}, 2, exec_state);
@@ -665,6 +687,7 @@ listener_delegate = function(exec_state) {
   CheckScopeChain([debug.ScopeType.Local,
                    debug.ScopeType.Closure,
                    debug.ScopeType.Closure,
+                   debug.ScopeType.Script,
                    debug.ScopeType.Global], exec_state);
   CheckScopeContent({}, 0, exec_state);
   CheckScopeContent({a:1,b:2,x:3,y:4,i:5,j:6}, 1, exec_state);
@@ -684,6 +707,7 @@ function closure_8() {
 
 listener_delegate = function(exec_state) {
   CheckScopeChain([debug.ScopeType.Local,
+                   debug.ScopeType.Script,
                    debug.ScopeType.Global], exec_state);
   CheckScopeContent({x: 2}, 0, exec_state);
 };
@@ -705,6 +729,7 @@ function closure_9() {
 listener_delegate = function(exec_state) {
   CheckScopeChain([debug.ScopeType.Local,
                    debug.ScopeType.Closure,
+                   debug.ScopeType.Script,
                    debug.ScopeType.Global], exec_state);
 };
 closure_9();
@@ -746,6 +771,7 @@ listener_delegate = function(exec_state) {
                    debug.ScopeType.With,
                    debug.ScopeType.Closure,
                    debug.ScopeType.Closure,
+                   debug.ScopeType.Script,
                    debug.ScopeType.Global], exec_state);
   CheckScopeContent({b:16}, 0, exec_state);
   CheckScopeContent({a:15}, 1, exec_state);
@@ -771,6 +797,7 @@ listener_delegate = function(exec_state) {
   CheckScopeChain([debug.ScopeType.Local,
                    debug.ScopeType.With,
                    debug.ScopeType.Closure,
+                   debug.ScopeType.Script,
                    debug.ScopeType.Global], exec_state);
   CheckScopeContent({x: 2}, 0, exec_state);
 };
@@ -794,6 +821,7 @@ listener_delegate = function(exec_state) {
                    debug.ScopeType.Local,
                    debug.ScopeType.With,
                    debug.ScopeType.Closure,
+                   debug.ScopeType.Script,
                    debug.ScopeType.Global], exec_state);
   CheckScopeContent({x: 3}, 0, exec_state);
   CheckScopeContent({x: 2}, 1, exec_state);
@@ -826,6 +854,7 @@ listener_delegate = function(exec_state) {
                    debug.ScopeType.Local,
                    debug.ScopeType.Closure,
                    debug.ScopeType.Closure,
+                   debug.ScopeType.Script,
                    debug.ScopeType.Global], exec_state);
 }
 closure_in_with_3();
@@ -836,6 +865,7 @@ BeginTest("Closure inside With 4");
 listener_delegate = function(exec_state) {
   CheckScopeChain([debug.ScopeType.Local,
                    debug.ScopeType.With,
+                   debug.ScopeType.Script,
                    debug.ScopeType.Global], exec_state);
   CheckScopeContent({x: 2}, 0, exec_state);
   CheckScopeContent({x: 1}, 1, exec_state);
@@ -852,7 +882,7 @@ EndTest();
 // Test global scope.
 BeginTest("Global");
 listener_delegate = function(exec_state) {
-  CheckScopeChain([debug.ScopeType.Global], exec_state);
+  CheckScopeChain([debug.ScopeType.Script, debug.ScopeType.Global], exec_state);
 };
 debugger;
 EndTest();
@@ -871,6 +901,7 @@ function catch_block_1() {
 listener_delegate = function(exec_state) {
   CheckScopeChain([debug.ScopeType.Catch,
                    debug.ScopeType.Local,
+                   debug.ScopeType.Script,
                    debug.ScopeType.Global], exec_state);
   CheckScopeContent({e:'Exception'}, 0, exec_state);
 };
@@ -894,6 +925,7 @@ listener_delegate = function(exec_state) {
   CheckScopeChain([debug.ScopeType.With,
                    debug.ScopeType.Catch,
                    debug.ScopeType.Local,
+                   debug.ScopeType.Script,
                    debug.ScopeType.Global], exec_state);
   CheckScopeContent({n:10}, 0, exec_state);
   CheckScopeContent({e:'Exception'}, 1, exec_state);
@@ -918,6 +950,7 @@ function catch_block_3() {
 listener_delegate = function(exec_state) {
   CheckScopeChain([debug.ScopeType.Catch,
                    debug.ScopeType.Local,
+                   debug.ScopeType.Script,
                    debug.ScopeType.Global], exec_state);
   CheckScopeContent({e:'Exception'}, 0, exec_state);
   CheckScopeContent({y:78}, 1, exec_state);
@@ -944,6 +977,7 @@ listener_delegate = function(exec_state) {
   CheckScopeChain([debug.ScopeType.With,
                    debug.ScopeType.Catch,
                    debug.ScopeType.Local,
+                   debug.ScopeType.Script,
                    debug.ScopeType.Global], exec_state);
   CheckScopeContent({n:10}, 0, exec_state);
   CheckScopeContent({e:'Exception'}, 1, exec_state);
@@ -957,6 +991,7 @@ EndTest();
 BeginTest("Catch block 5");
 listener_delegate = function(exec_state) {
   CheckScopeChain([debug.ScopeType.Catch,
+                   debug.ScopeType.Script,
                    debug.ScopeType.Global], exec_state);
   CheckScopeContent({e:'Exception'}, 0, exec_state);
 };
@@ -975,6 +1010,7 @@ BeginTest("Catch block 6");
 listener_delegate = function(exec_state) {
   CheckScopeChain([debug.ScopeType.Local,
                    debug.ScopeType.Catch,
+                   debug.ScopeType.Script,
                    debug.ScopeType.Global], exec_state);
   CheckScopeContent({x: 2}, 0, exec_state);
   CheckScopeContent({e:'Exception'}, 1, exec_state);
@@ -1005,6 +1041,7 @@ function catch_block_7() {
 listener_delegate = function(exec_state) {
   CheckScopeChain([debug.ScopeType.Catch,
                    debug.ScopeType.Local,
+                   debug.ScopeType.Script,
                    debug.ScopeType.Global], exec_state);
   CheckScopeContent({e:'Exception'}, 0, exec_state);
 };
index 44102c2..07f0e3c 100644 (file)
 
 // Flags: --expose-debug-as debug --expose-gc --send-idle-notification
 // Flags: --allow-natives-syntax
+// Flags: --noharmony-shipping
+// Note: this test checks that that the number of scripts reported as native
+// by Debug.scripts() is the same as a number of core native scripts.
+// Native scripts that are added by --harmony-shipping are classified
+// as 'experimental', but are still returned by Debug.scripts(), so
+// we disable harmony-shipping for this test
 
 // Get the Debug object exposed from the debug context global object.
 Debug = debug.Debug;
index 2233e36..45f077f 100644 (file)
@@ -68,7 +68,7 @@ bp1 = Debug.setBreakPoint(f, 1);
 state = 0;
 result = -1;
 f();
-assertEquals(499, result);
+assertEquals(332, result);
 
 // Check that performing 1000 steps with a break point on the statement in the
 // for loop (line 2) will only make i 0 as a real break point breaks even when
diff --git a/deps/v8/test/mjsunit/debug-stepin-foreach.js b/deps/v8/test/mjsunit/debug-stepin-foreach.js
new file mode 100644 (file)
index 0000000..fa728e0
--- /dev/null
@@ -0,0 +1,51 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug
+// Tests stepping into through Array.prototype.forEach callbacks.
+
+Debug = debug.Debug
+var exception = null;
+var break_count = 0;
+var expected_breaks = -1;
+
+function listener(event, exec_state, event_data, data) {
+  try {
+    if (event == Debug.DebugEvent.Break) {
+      assertTrue(exec_state.frameCount() != 0, "FAIL: Empty stack trace");
+      if (!break_count) {
+        // Count number of expected breakpoints in this source file.
+        var source_text = exec_state.frame(0).func().script().source();
+        expected_breaks = source_text.match(/\/\/\s*Break\s+\d+\./g).length;
+        print("Expected breaks: " + expected_breaks);
+      }
+      var source = exec_state.frame(0).sourceLineText();
+      print("paused at: " + source);
+      assertTrue(source.indexOf("// Break " + break_count + ".") > 0,
+                 "Unexpected pause at: " + source + "\n" +
+                 "Expected: // Break " + break_count + ".");
+      ++break_count;
+      if (break_count !== expected_breaks) {
+        exec_state.prepareStep(Debug.StepAction.StepIn, 1);
+      }
+    }
+  } catch(e) {
+    exception = e;
+    print(e, e.stack);
+  }
+};
+
+Debug.setListener(listener);
+
+debugger; // Break 0.
+[1,2].forEach(callback); // Break 1.
+
+function callback(x) {
+  return x; // Break 2. // Break 4.
+} // Break 3. // Break 5.
+
+assertNull(exception); // Break 6.
+assertEquals(expected_breaks, break_count);
+
+Debug.setListener(null);
diff --git a/deps/v8/test/mjsunit/deserialize-optimize-inner.js b/deps/v8/test/mjsunit/deserialize-optimize-inner.js
new file mode 100644 (file)
index 0000000..72df320
--- /dev/null
@@ -0,0 +1,13 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --cache=code --no-lazy --serialize-inner
+
+function f(x, y) { return x + y; }
+
+assertEquals(1, f(0, 1));
+assertEquals(5, f(2, 3));
+%OptimizeFunctionOnNextCall(f);
+assertEquals(9, f(4, 5));
+assertOptimized(f);
index 60ce46b..92cd087 100644 (file)
@@ -266,7 +266,6 @@ assertTrue(WeakMap.prototype.set instanceof Function)
 assertTrue(WeakMap.prototype.get instanceof Function)
 assertTrue(WeakMap.prototype.has instanceof Function)
 assertTrue(WeakMap.prototype.delete instanceof Function)
-assertTrue(WeakMap.prototype.clear instanceof Function)
 
 
 // Test some common JavaScript idioms for WeakSets
@@ -275,7 +274,6 @@ assertTrue(s instanceof WeakSet);
 assertTrue(WeakSet.prototype.add instanceof Function)
 assertTrue(WeakSet.prototype.has instanceof Function)
 assertTrue(WeakSet.prototype.delete instanceof Function)
-assertTrue(WeakSet.prototype.clear instanceof Function)
 
 
 // Test class of instance and prototype.
@@ -471,30 +469,6 @@ for (var i = 9; i >= 0; i--) {
 })();
 
 
-// Test WeakMap clear
-(function() {
-  var k = new Object();
-  var w = new WeakMap();
-  w.set(k, 23);
-  assertTrue(w.has(k));
-  assertEquals(23, w.get(k));
-  w.clear();
-  assertFalse(w.has(k));
-  assertEquals(undefined, w.get(k));
-})();
-
-
-// Test WeakSet clear
-(function() {
-  var k = new Object();
-  var w = new WeakSet();
-  w.add(k);
-  assertTrue(w.has(k));
-  w.clear();
-  assertFalse(w.has(k));
-})();
-
-
 (function TestMinusZeroSet() {
   var s = new Set();
   s.add(-0);
diff --git a/deps/v8/test/mjsunit/es6/debug-stepin-microtasks.js b/deps/v8/test/mjsunit/es6/debug-stepin-microtasks.js
new file mode 100644 (file)
index 0000000..8dbdb34
--- /dev/null
@@ -0,0 +1,101 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --expose-debug-as debug
+
+Debug = debug.Debug
+var exception = null;
+var break_count = 0;
+var expected_breaks = -1;
+
+function listener(event, exec_state, event_data, data) {
+  try {
+    if (event == Debug.DebugEvent.Break) {
+      assertTrue(exec_state.frameCount() != 0, "FAIL: Empty stack trace");
+      if (!break_count) {
+        // Count number of expected breakpoints in this source file.
+        var source_text = exec_state.frame(0).func().script().source();
+        expected_breaks = source_text.match(/\/\/\s*Break\s+\d+\./g).length;
+        print("Expected breaks: " + expected_breaks);
+      }
+      var source = exec_state.frame(0).sourceLineText();
+      print("paused at: " + source);
+      assertTrue(source.indexOf("// Break " + break_count + ".") > 0,
+                 "Unexpected pause at: " + source + "\n" +
+                 "Expected: // Break " + break_count + ".");
+      if (source.indexOf("StepOver.") !== -1) {
+        exec_state.prepareStep(Debug.StepAction.StepNext, 1);
+      } else {
+        exec_state.prepareStep(Debug.StepAction.StepIn, 1);
+      }
+      ++break_count;
+    }
+  } catch (e) {
+    exception = e;
+    print(e, e.stack);
+  }
+};
+
+Debug.setListener(listener);
+
+Promise.resolve(42)
+  .then(promise1)
+  .then(Object) // Should skip stepping into native.
+  .then(Boolean) // Should skip stepping into native.
+  .then(promise2)
+  .catch(promise3)
+  .catch(function(e) {
+    %AbortJS("FAIL: uncaught exception " + e);
+  });
+
+function promise1() {
+  debugger; // Break 0.
+  return exception || 1; // Break 1.
+} // Break 2.
+
+function promise2() {
+  throw new Error; // Break 3.
+}
+
+function promise3() {
+  installObservers(); // Break 4. StepOver.
+  return break_count; // Break 5.
+} // Break 6.
+
+function installObservers() {
+  var dummy = {};
+  Object.observe(dummy, observer1);
+  Object.observe(dummy, Object); // Should skip stepping into native.
+  Object.observe(dummy, Boolean); // Should skip stepping into native.
+  Object.observe(dummy, observer2);
+  dummy.foo = 1;
+}
+
+function observer1() {
+  return exception || 3; // Break 7.
+} // Break 8.
+
+function observer2() {
+  Promise.resolve().then(promise4); // Break 9. StepOver.
+  return break_count + 1; // Break 10.
+} // Break 11.
+
+function promise4() {
+  finalize(); // Break 12. StepOver.
+  return 0; // Break 13.
+} // Break 14. StepOver.
+
+function finalize() {
+  var dummy = {};
+  Object.observe(dummy, function() {
+    if (expected_breaks !== break_count) {
+      %AbortJS("FAIL: expected <" + expected_breaks + "> breaks instead of <" +
+               break_count + ">");
+    }
+    if (exception !== null) {
+      %AbortJS("FAIL: exception: " + exception);
+    }
+  });
+  dummy.foo = 1;
+}
diff --git a/deps/v8/test/mjsunit/es6/debug-stepnext-for.js b/deps/v8/test/mjsunit/es6/debug-stepnext-for.js
new file mode 100644 (file)
index 0000000..98af911
--- /dev/null
@@ -0,0 +1,116 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --harmony
+
+Debug = debug.Debug;
+var break_count = 0
+var exception = null;
+var log = []
+
+var s = 0;
+var a = [1, 2, 3];
+var i = 0;
+
+function f() {
+  "use strict";
+  debugger;                      // Break a
+  var j;                         // Break b
+
+  for (var i in null) {          // Break c
+    s += a[i];
+  }
+
+  for (j in null) {              // Break d
+    s += a[j];
+  }
+
+  for (var i in a) {             // Break e
+    s += a[i];                   // Break E
+  }
+
+  for (j in a) {                 // Break f
+    s += a[j];                   // Break F
+  }
+
+  for (let i in a) {             // Break g
+    s += a[i];                   // Break G
+  }
+
+  for (var i of a) {             // Break h
+    s += i;                      // Break H
+  }
+
+  for (j of a) {                 // Break i
+    s += j;                      // Break I
+  }
+
+  for (let i of a) {             // Break j
+    s += i;                      // Break J
+  }
+
+  for (var i = 0; i < 3; i++) {  // Break k
+    s += a[i];                   // Break K
+  }
+
+  for (j = 0; j < 3; j++) {      // Break l
+    s += a[j];                   // Break L
+  }
+
+  // TODO(yangguo): add test case for for-let.
+}                                // Break y
+
+function listener(event, exec_state, event_data, data) {
+  if (event != Debug.DebugEvent.Break) return;
+  try {
+    var line = exec_state.frame(0).sourceLineText();
+    var col = exec_state.frame(0).sourceColumn();
+    print(line);
+    var match = line.match(/\/\/ Break (\w)$/);
+    assertEquals(2, match.length);
+    log.push(match[1] + col);
+    exec_state.prepareStep(Debug.StepAction.StepNext, 1);
+    break_count++;
+  } catch (e) {
+    exception = e;
+  }
+}
+
+Debug.setListener(listener);
+f();
+Debug.setListener(null);         // Break z
+
+print(JSON.stringify(log));
+// The let declaration differs from var in that the loop variable
+// is declared in every iteration.
+var expected = [
+  // Entry
+  "a2","b2",
+  // Empty for-in-var: var decl, get enumerable
+  "c7","c16",
+  // Empty for-in: get enumerable
+  "d12",
+  // For-in-var: var decl, get enumerable, assign, body, assign, body, ...
+  "e7","e16","e11","E4","e11","E4","e11","E4","e11",
+  // For-in: get enumerable, assign, body, assign, body, ...
+  "f12","f7","F4","f7","F4","f7","F4","f7",
+  // For-in-let: get enumerable, next, new let, body, next, new let, ...
+  "g16","g11","g7","G4","g11","g7","G4","g11","g7","G4","g11",
+  // For-of-var: var decl, next(), body, next(), body, ...
+  "h7","h16","H4","h16","H4","h16","H4","h16",
+  // For-of: next(), body, next(), body, ...
+  "i12","I4","i12","I4","i12","I4","i12",
+  // For-of-let: next(), new let, body, next(), new let, ...
+  "j16","j7","J4","j16","j7","J4","j16","j7","J4","j16",
+  // For-var: var decl, condition, body, next, condition, body, ...
+  "k7","k20","K4","k23","k20","K4","k23","k20","K4","k23","k20",
+  // For: init, condition, body, next, condition, body, ...
+  "l11","l16","L4","l19","l16","L4","l19","l16","L4","l19","l16",
+  // Exit.
+  "y0","z0",
+]
+
+assertArrayEquals(expected, log);
+assertEquals(48, s);
+assertNull(exception);
index d55e561..126572d 100644 (file)
@@ -97,6 +97,7 @@ function CheckScopeChain(scopes, exec_state) {
     assertEquals(i, response.body.scopes[i].index);
     assertEquals(scopes[i], response.body.scopes[i].type);
     if (scopes[i] == debug.ScopeType.Local ||
+        scopes[i] == debug.ScopeType.Script ||
         scopes[i] == debug.ScopeType.Closure) {
       assertTrue(response.body.scopes[i].object.ref < 0);
     } else {
@@ -159,6 +160,7 @@ function CheckScopeContent(content, number, exec_state) {
   assertEquals(scope.scopeType(), response.body.type);
   assertEquals(number, response.body.index);
   if (scope.scopeType() == debug.ScopeType.Local ||
+      scope.scopeType() == debug.ScopeType.Script ||
       scope.scopeType() == debug.ScopeType.Closure) {
     assertTrue(response.body.object.ref < 0);
   } else {
@@ -178,6 +180,7 @@ RunTest("Local 1",
         [],
         function (exec_state) {
           CheckScopeChain([debug.ScopeType.Local,
+                           debug.ScopeType.Script,
                            debug.ScopeType.Global], exec_state);
           CheckScopeContent({}, 0, exec_state);
         });
@@ -188,6 +191,7 @@ RunTest("Local 2",
         [1],
         function (exec_state) {
           CheckScopeChain([debug.ScopeType.Local,
+                           debug.ScopeType.Script,
                            debug.ScopeType.Global], exec_state);
           CheckScopeContent({a:1}, 0, exec_state);
         });
@@ -198,6 +202,7 @@ RunTest("Local 3",
         [1],
         function (exec_state) {
           CheckScopeChain([debug.ScopeType.Local,
+                           debug.ScopeType.Script,
                            debug.ScopeType.Global], exec_state);
           CheckScopeContent({a:1,x:3}, 0, exec_state);
         });
@@ -208,6 +213,7 @@ RunTest("Local 4",
         [1, 2],
         function (exec_state) {
           CheckScopeChain([debug.ScopeType.Local,
+                           debug.ScopeType.Script,
                            debug.ScopeType.Global], exec_state);
           CheckScopeContent({a:1,b:2,x:3,y:4}, 0, exec_state);
         });
@@ -218,6 +224,7 @@ RunTest("Local 5",
         [],
         function (exec_state) {
           CheckScopeChain([debug.ScopeType.Local,
+                           debug.ScopeType.Script,
                            debug.ScopeType.Global], exec_state);
           CheckScopeContent({}, 0, exec_state);
         });
@@ -228,6 +235,7 @@ RunTest("Local 6",
         [],
         function (exec_state) {
           CheckScopeChain([debug.ScopeType.Local,
+                           debug.ScopeType.Script,
                            debug.ScopeType.Global], exec_state);
           CheckScopeContent({i:5}, 0, exec_state);
         });
@@ -242,6 +250,7 @@ RunTest("Local 7",
         [1, 2],
         function (exec_state) {
           CheckScopeChain([debug.ScopeType.Local,
+                           debug.ScopeType.Script,
                            debug.ScopeType.Global], exec_state);
           CheckScopeContent({a:1,b:2,x:3,y:4,i:5,j:6}, 0, exec_state);
         });
@@ -254,6 +263,7 @@ RunTest("With",
           CheckScopeChain([debug.ScopeType.With,
                            debug.ScopeType.With,
                            debug.ScopeType.Local,
+                           debug.ScopeType.Script,
                            debug.ScopeType.Global], exec_state);
           CheckScopeContent({}, 0, exec_state);
           CheckScopeContent({}, 1, exec_state);
@@ -267,6 +277,7 @@ RunTest("Closure 1",
         function (exec_state) {
           CheckScopeChain([debug.ScopeType.Local,
                            debug.ScopeType.Closure,
+                           debug.ScopeType.Script,
                            debug.ScopeType.Global], exec_state);
           CheckScopeContent({a:1}, 1, exec_state);
         },
@@ -305,6 +316,7 @@ RunTest("The full monty",
                            debug.ScopeType.With,
                            debug.ScopeType.Closure,
                            debug.ScopeType.Closure,
+                           debug.ScopeType.Script,
                            debug.ScopeType.Global], exec_state);
           CheckScopeContent({b:16}, 0, exec_state);
           CheckScopeContent({a:15}, 1, exec_state);
@@ -321,6 +333,7 @@ RunTest("Catch block 1",
         function (exec_state) {
           CheckScopeChain([debug.ScopeType.Catch,
                            debug.ScopeType.Local,
+                           debug.ScopeType.Script,
                            debug.ScopeType.Global], exec_state);
           CheckScopeContent({e:'Exception'}, 0, exec_state);
         });
index b6fcdaa..faeb683 100644 (file)
@@ -41,10 +41,7 @@ function assertIteratorIsClosed(iter) {
 }
 
 function assertThrownIteratorIsClosed(iter) {
-  // TODO(yusukesuzuki): Since status of a thrown generator is "executing",
-  // following tests are failed.
-  // https://code.google.com/p/v8/issues/detail?id=3096
-  // assertIteratorIsClosed(iter);
+  assertIteratorIsClosed(iter);
 }
 
 function TestGeneratorResultPrototype() {
index 6925285..bf21f4d 100644 (file)
@@ -24,7 +24,7 @@ MirrorRefCache.prototype.lookup = function(handle) {
   return this.refs_[handle];
 }
 
-function TestGeneratorMirror(g, test) {
+function TestGeneratorMirror(g, status, line, column, receiver) {
   // Create mirror and JSON representation.
   var mirror = debug.MakeMirror(g);
   var serializer = debug.MakeMirrorSerializer();
@@ -44,41 +44,69 @@ function TestGeneratorMirror(g, test) {
   assertFalse(mirror.isPrimitive());
   assertEquals('Generator', mirror.className());
 
-  assertTrue(mirror.receiver().isUndefined());
+  assertEquals(receiver, mirror.receiver().value());
   assertEquals(generator, mirror.func().value());
 
-  test(mirror);
-}
+  assertEquals(status, mirror.status());
+
+  // Note that line numbers are 0-based, not 1-based.
+  var loc = mirror.sourceLocation();
+  if (status === 'suspended') {
+    assertTrue(!!loc);
+    assertEquals(line, loc.line);
+    assertEquals(column, loc.column);
+  } else {
+    assertEquals(undefined, loc);
+  }
 
-var iter = generator(function () {
-  assertEquals('running', debug.MakeMirror(iter).status());
-})
+  TestInternalProperties(mirror, status, receiver);
+}
 
-// Note that line numbers are 0-based, not 1-based.
-function assertSourceLocation(loc, line, column) {
-  assertEquals(line, loc.line);
-  assertEquals(column, loc.column);
+function TestInternalProperties(mirror, status, receiver) {
+  var properties = mirror.internalProperties();
+  assertEquals(3, properties.length);
+  assertEquals("[[GeneratorStatus]]", properties[0].name());
+  assertEquals(status, properties[0].value().value());
+  assertEquals("[[GeneratorFunction]]", properties[1].name());
+  assertEquals(generator, properties[1].value().value());
+  assertEquals("[[GeneratorReceiver]]", properties[2].name());
+  assertEquals(receiver, properties[2].value().value());
 }
 
-TestGeneratorMirror(iter, function (mirror) {
-  assertEquals('suspended', mirror.status())
-  assertSourceLocation(mirror.sourceLocation(), 7, 19);
+var iter = generator(function() {
+  var mirror = debug.MakeMirror(iter);
+  assertEquals('running', mirror.status());
+  assertEquals(undefined, mirror.sourceLocation());
+  TestInternalProperties(mirror, 'running');
 });
 
+TestGeneratorMirror(iter, 'suspended', 7, 19);
+
 iter.next();
-TestGeneratorMirror(iter, function (mirror) {
-  assertEquals('suspended', mirror.status())
-  assertSourceLocation(mirror.sourceLocation(), 9, 2);
-});
+TestGeneratorMirror(iter, 'suspended', 9, 2);
 
 iter.next();
-TestGeneratorMirror(iter, function (mirror) {
-  assertEquals('suspended', mirror.status())
-  assertSourceLocation(mirror.sourceLocation(), 11, 2);
-});
+TestGeneratorMirror(iter, 'suspended', 11, 2);
 
 iter.next();
-TestGeneratorMirror(iter, function (mirror) {
-  assertEquals('closed', mirror.status())
+TestGeneratorMirror(iter, 'closed');
+
+// Test generator with receiver.
+var obj = {foo: 42};
+var iter2 = generator.call(obj, function() {
+  var mirror = debug.MakeMirror(iter2);
+  assertEquals('running', mirror.status());
   assertEquals(undefined, mirror.sourceLocation());
+  TestInternalProperties(mirror, 'running', obj);
 });
+
+TestGeneratorMirror(iter2, 'suspended', 7, 19, obj);
+
+iter2.next();
+TestGeneratorMirror(iter2, 'suspended', 9, 2, obj);
+
+iter2.next();
+TestGeneratorMirror(iter2, 'suspended', 11, 2, obj);
+
+iter2.next();
+TestGeneratorMirror(iter2, 'closed', 0, 0, obj);
index 25bd0de..8039ca8 100644 (file)
@@ -67,6 +67,8 @@ function TestGeneratorObject() {
   assertEquals("Generator", %_ClassOf(iter));
   assertEquals("[object Generator]", String(iter));
   assertEquals("[object Generator]", Object.prototype.toString.call(iter));
+  var gf = iter.__proto__.constructor;
+  assertEquals("[object GeneratorFunction]", Object.prototype.toString.call(gf));
   assertEquals([], Object.getOwnPropertyNames(iter));
   assertTrue(iter !== new g());
 }
diff --git a/deps/v8/test/mjsunit/es6/generators-states.js b/deps/v8/test/mjsunit/es6/generators-states.js
new file mode 100644 (file)
index 0000000..0a2173a
--- /dev/null
@@ -0,0 +1,67 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Test generator states.
+
+function Foo() {}
+function Bar() {}
+
+function assertIteratorResult(value, done, result) {
+  assertEquals({ value: value, done: done}, result);
+}
+
+function assertIteratorIsClosed(iter) {
+  assertIteratorResult(undefined, true, iter.next());
+  // Next and throw on a closed iterator.
+  assertDoesNotThrow(function() { iter.next(); });
+  assertThrows(function() { iter.throw(new Bar); }, Bar);
+}
+
+var iter;
+function* nextGenerator() { yield iter.next(); }
+function* throwGenerator() { yield iter.throw(new Bar); }
+
+// Throw on a suspendedStart iterator.
+iter = nextGenerator();
+assertThrows(function() { iter.throw(new Foo) }, Foo)
+assertThrows(function() { iter.throw(new Foo) }, Foo)
+assertIteratorIsClosed(iter);
+
+// The same.
+iter = throwGenerator();
+assertThrows(function() { iter.throw(new Foo) }, Foo)
+assertThrows(function() { iter.throw(new Foo) }, Foo)
+assertIteratorIsClosed(iter);
+
+// Next on an executing iterator raises a TypeError.
+iter = nextGenerator();
+assertThrows(function() { iter.next() }, TypeError)
+assertIteratorIsClosed(iter);
+
+// Throw on an executing iterator raises a TypeError.
+iter = throwGenerator();
+assertThrows(function() { iter.next() }, TypeError)
+assertIteratorIsClosed(iter);
+
+// Next on an executing iterator doesn't change the state of the
+// generator.
+iter = (function* () {
+  try {
+    iter.next();
+    yield 1;
+  } catch (e) {
+    try {
+      // This next() should raise the same exception, because the first
+      // next() left the iter in the executing state.
+      iter.next();
+      yield 2;
+    } catch (e) {
+      yield 3;
+    }
+  }
+  yield 4;
+})();
+assertIteratorResult(3, false, iter.next());
+assertIteratorResult(4, false, iter.next());
+assertIteratorIsClosed(iter);
index 4479894..fa3f468 100644 (file)
@@ -25,6 +25,8 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
+// Flags: --allow-natives-syntax
+
 [Math.log10, Math.log2].forEach( function(fun) {
   assertTrue(isNaN(fun(NaN)));
   assertTrue(isNaN(fun(fun)));
   assertEquals("Infinity", String(fun(Infinity)));
 });
 
-for (var i = -300; i < 300; i += 0.7) {
-  assertEqualsDelta(i, Math.log10(Math.pow(10, i)), 1E-13);
-  assertEqualsDelta(i, Math.log2(Math.pow(2, i)), 1E-13);
+for (var i = -310; i <= 308; i += 0.5) {
+  assertEquals(i, Math.log10(Math.pow(10, i)));
+  // Square roots are tested below.
+  if (i != -0.5 && i != 0.5) assertEquals(i, Math.log2(Math.pow(2, i)));
+}
+
+// Test denormals.
+assertEquals(-307.77759430519706, Math.log10(1.5 * Math.pow(2, -1023)));
+
+// Test Math.log2(2^k) for -1074 <= k <= 1023.
+var n = -1074;
+// This loop covers n from -1074 to -1043
+for (var lowbits = 1; lowbits <= 0x80000000; lowbits *= 2) {
+  var x = %_ConstructDouble(0, lowbits);
+  assertEquals(n, Math.log2(x));
+  n++;
+}
+// This loop covers n from -1042 to -1023
+for (var hibits = 1; hibits <= 0x80000; hibits *= 2) {
+  var x = %_ConstructDouble(hibits, 0);
+  assertEquals(n, Math.log2(x));
+  n++;
+}
+// The rest of the normal values of 2^n
+var x = 1;
+for (var n = -1022; n <= 1023; ++n) {
+  var x = Math.pow(2, n);
+  assertEquals(n, Math.log2(x));
+}
+
+// Test special values.
+// Expectation isn't exactly 1/2 because Math.SQRT2 isn't exactly sqrt(2).
+assertEquals(0.5000000000000001, Math.log2(Math.SQRT2));
+
+// Expectation isn't exactly -1/2 because Math.SQRT1_2 isn't exactly sqrt(1/2).
+assertEquals(-0.4999999999999999, Math.log2(Math.SQRT1_2));
+
+assertEquals(3.321928094887362, Math.log2(10));
+assertEquals(6.643856189774724, Math.log2(100));
+
+// Test relationships
+x = 1;
+for (var k = 0; k < 1000; ++k) {
+  var y = Math.abs(Math.log2(x) + Math.log2(1/x));
+  assertEqualsDelta(0, y, 1.5e-14);
+  x *= 1.1;
+}
+
+x = Math.pow(2, -100);
+for (var k = 0; k < 1000; ++k) {
+  var y = Math.log2(x);
+  var expected = Math.log(x) / Math.LN2;
+  var err = Math.abs(y - expected) / expected;
+  assertEqualsDelta(0, err, 1e-15);
+  x *= 1.1;
 }
index e10f5c1..81a98b8 100644 (file)
@@ -51,6 +51,7 @@ map.set(o2, 22);
 map.delete(o1);
 var mapMirror = debug.MakeMirror(map);
 testMapMirror(mapMirror);
+
 var entries = mapMirror.entries();
 assertEquals(1, entries.length);
 assertSame(o2, entries[0].key);
@@ -59,6 +60,7 @@ map.set(o1, 33);
 map.set(o3, o2);
 map.delete(o2);
 map.set(undefined, 44);
+
 entries = mapMirror.entries();
 assertEquals(3, entries.length);
 assertSame(o1, entries[0].key);
@@ -68,6 +70,10 @@ assertSame(o2, entries[1].value);
 assertEquals(undefined, entries[2].key);
 assertEquals(44, entries[2].value);
 
+assertEquals(3, mapMirror.entries(0).length);
+assertEquals(1, mapMirror.entries(1).length);
+assertEquals(2, mapMirror.entries(2).length);
+
 // Test the mirror object for Sets
 var set = new Set();
 set.add(o1);
@@ -78,6 +84,7 @@ var setMirror = debug.MakeMirror(set);
 testSetMirror(setMirror);
 var values = setMirror.values();
 assertEquals(2, values.length);
+assertEquals(1, setMirror.values(1).length);
 assertSame(o2, values[0]);
 assertEquals(undefined, values[1]);
 
@@ -96,6 +103,8 @@ gc();
 function testWeakMapEntries(weakMapMirror) {
   var entries = weakMapMirror.entries();
   assertEquals(2, entries.length);
+  assertEquals(2, weakMapMirror.entries(0).length);
+  assertEquals(1, weakMapMirror.entries(1).length);
   var found = 0;
   for (var i = 0; i < entries.length; i++) {
     if (Object.is(entries[i].key, o1)) {
@@ -129,6 +138,8 @@ gc();
 function testWeakSetValues(weakSetMirror) {
   var values = weakSetMirror.values();
   assertEquals(2, values.length);
+  assertEquals(2, weakSetMirror.values(0).length);
+  assertEquals(1, weakSetMirror.values(1).length);
   var found = 0;
   for (var i = 0; i < values.length; i++) {
     if (Object.is(values[i], o1)) {
index 02fe7ff..22ce424 100644 (file)
@@ -5,21 +5,40 @@
 // Flags: --expose-debug-as debug
 // Test the mirror object for collection iterators.
 
-function testIteratorMirror(iter, offset, expected) {
+function testIteratorMirror(iter, offset, expected, opt_limit) {
   while (offset-- > 0) iter.next();
 
   var mirror = debug.MakeMirror(iter);
   assertTrue(mirror.isIterator());
 
-  var preview = mirror.preview();
+  var preview = mirror.preview(opt_limit);
   assertArrayEquals(expected, preview);
 
   // Check that iterator has not changed after taking preview.
   var values = [];
-  for (var i of iter) values.push(i);
+  for (var i of iter) {
+    if (opt_limit && values.length >= opt_limit) break;
+    values.push(i);
+  }
   assertArrayEquals(expected, values);
 }
 
+function testIteratorInternalProperties(iter, offset, kind, index, has_more) {
+  while (offset-- > 0) iter.next();
+
+  var mirror = debug.MakeMirror(iter);
+  assertTrue(mirror.isIterator());
+
+  var properties = mirror.internalProperties();
+  assertEquals(3, properties.length);
+  assertEquals("[[IteratorHasMore]]", properties[0].name());
+  assertEquals(has_more, properties[0].value().value());
+  assertEquals("[[IteratorIndex]]", properties[1].name());
+  assertEquals(index, properties[1].value().value());
+  assertEquals("[[IteratorKind]]", properties[2].name());
+  assertEquals(kind, properties[2].value().value());
+}
+
 var o1 = { foo: 1 };
 var o2 = { foo: 2 };
 
@@ -39,6 +58,16 @@ testIteratorMirror(map.keys(), 2, []);
 testIteratorMirror(map.values(), 2, []);
 testIteratorMirror(map.entries(), 2, []);
 
+// Test with maximum limit.
+testIteratorMirror(map.keys(), 0, [41], 1);
+testIteratorMirror(map.values(), 0, [42], 1);
+testIteratorMirror(map.entries(), 0, [[41, 42]], 1);
+
+testIteratorInternalProperties(map.keys(), 0, "keys", 0, true);
+testIteratorInternalProperties(map.values(), 1, "values", 1, true);
+testIteratorInternalProperties(map.entries(), 2, "entries", 2, false);
+testIteratorInternalProperties(map.keys(), 3, "keys", 2, false);
+
 var set = new Set();
 set.add(41);
 set.add(42);
@@ -60,3 +89,15 @@ testIteratorMirror(set.entries(), 3, [[o2, o2]]);
 testIteratorMirror(set.keys(), 5, []);
 testIteratorMirror(set.values(), 5, []);
 testIteratorMirror(set.entries(), 5, []);
+
+// Test with maximum limit.
+testIteratorMirror(set.keys(), 1, [42, o1], 2);
+testIteratorMirror(set.values(), 1, [42, o1], 2);
+testIteratorMirror(set.entries(), 1, [[42, 42], [o1, o1]], 2);
+
+testIteratorInternalProperties(set.keys(), 0, "values", 0, true);
+testIteratorInternalProperties(set.values(), 1, "values", 1, true);
+testIteratorInternalProperties(set.entries(), 2, "entries", 2, true);
+testIteratorInternalProperties(set.keys(), 3, "values", 3, true);
+testIteratorInternalProperties(set.values(), 4, "values", 4, false);
+testIteratorInternalProperties(set.entries(), 5, "entries", 4, false);
index 36365d2..03612be 100644 (file)
@@ -143,6 +143,13 @@ function TestBasics(object) {
     assertEquals(2, y);
     assertEquals(3, z);
   }
+
+  object[Symbol.unscopables] = {x: 0, y: undefined};
+  with (object) {
+    assertEquals(1, x);
+    assertEquals(5, y);
+    assertEquals(3, z);
+  }
 }
 runTest(TestBasics);
 
@@ -161,6 +168,13 @@ function TestUnscopableChain(object) {
   with (object) {
     assertEquals(1, x);
   }
+
+  object[Symbol.unscopables] = {
+    __proto__: {x: undefined}
+  };
+  with (object) {
+    assertEquals(2, x);
+  }
 }
 runTest(TestUnscopableChain);
 
@@ -222,6 +236,14 @@ function TestOnProto(object, proto) {
     assertEquals(5, y);
     assertEquals(3, z);
   }
+
+  proto[Symbol.unscopables] = {y: true};
+  object[Symbol.unscopables] = {x: true, y: undefined};
+  with (object) {
+    assertEquals(1, x);
+    assertEquals(5, y);
+    assertEquals(3, z);
+  }
 }
 runTest(TestOnProto);
 
@@ -341,6 +363,20 @@ function TestChangeDuringWithWithPossibleOptimization4(object) {
 TestChangeDuringWithWithPossibleOptimization4({});
 
 
+function TestChangeDuringWithWithPossibleOptimization4(object) {
+  var x = 1;
+  object.x = 2;
+  object[Symbol.unscopables] = {x: true};
+  with (object) {
+    for (var i = 0; i < 1000; i++) {
+      if (i === 500) object[Symbol.unscopables].x = undefined;
+      assertEquals(i < 500 ? 1 : 2, x);
+    }
+  }
+}
+TestChangeDuringWithWithPossibleOptimization4({});
+
+
 function TestAccessorReceiver(object, proto) {
   var x = 'local';
 
@@ -532,9 +568,11 @@ function TestAccessorOnUnscopables(object) {
   var x = 1;
   object.x = 2;
 
+  var calls = 0;
   var unscopables = {
     get x() {
-      assertUnreachable();
+      calls++;
+      return calls === 1 ? true : undefined;
     }
   };
 
@@ -542,7 +580,9 @@ function TestAccessorOnUnscopables(object) {
     assertEquals(2, x);
     object[Symbol.unscopables] = unscopables;
     assertEquals(1, x);
+    assertEquals(2, x);
   }
+  assertEquals(2, calls);
 }
 runTest(TestAccessorOnUnscopables);
 
@@ -659,3 +699,25 @@ function TestGetUnscopablesGetterThrows() {
   }, CustomError);
 }
 TestGetUnscopablesGetterThrows();
+
+
+function TestGetUnscopablesGetterThrows2() {
+  var object = {
+    get x() {
+      assertUnreachable();
+    }
+  };
+  function CustomError() {}
+
+  object[Symbol.unscopables] = {
+    get x() {
+      throw new CustomError();
+    }
+  };
+  assertThrows(function() {
+    with (object) {
+      x;
+    }
+  }, CustomError);
+}
+TestGetUnscopablesGetterThrows();
diff --git a/deps/v8/test/mjsunit/es7/regress/regress-443982.js b/deps/v8/test/mjsunit/es7/regress/regress-443982.js
new file mode 100644 (file)
index 0000000..5a2e9cd
--- /dev/null
@@ -0,0 +1,22 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var records;
+function observer(r) {
+  records = r;
+}
+
+Object.defineProperty(Array.prototype, '0', {
+  get: function() { return 0; },
+  set: function() { throw "boom!"; }
+});
+arr = [1, 2];
+Array.observe(arr, observer);
+arr.length = 0;
+assertEquals(0, arr.length);
+
+Object.deliverChangeRecords(observer);
+assertEquals(1, records.length);
+assertEquals('splice', records[0].type);
+assertArrayEquals([1, 2], records[0].removed);
index 357ac3f..97c9f65 100644 (file)
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-// Flags: --harmony-scoping
+// Flags: --harmony-scoping --lazy
 
 function foo(a, b, c, d) {
   "use strict"
diff --git a/deps/v8/test/mjsunit/harmony/array-concat.js b/deps/v8/test/mjsunit/harmony/array-concat.js
new file mode 100644 (file)
index 0000000..286aefd
--- /dev/null
@@ -0,0 +1,686 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-arrays --harmony-classes
+
+(function testArrayConcatArity() {
+  "use strict";
+  assertEquals(1, Array.prototype.concat.length);
+})();
+
+
+(function testArrayConcatNoPrototype() {
+  "use strict";
+  assertEquals(void 0, Array.prototype.concat.prototype);
+})();
+
+
+(function testArrayConcatDescriptor() {
+  "use strict";
+  var desc = Object.getOwnPropertyDescriptor(Array.prototype, 'concat');
+  assertEquals(false, desc.enumerable);
+})();
+
+
+(function testConcatArrayLike() {
+  "use strict";
+  var obj = {
+    "length": 6,
+    "1": "A",
+    "3": "B",
+    "5": "C"
+  };
+  obj[Symbol.isConcatSpreadable] = true;
+  var obj2 = { length: 3, "0": "0", "1": "1", "2": "2" };
+  var arr = ["X", "Y", "Z"];
+  assertEquals([void 0, "A", void 0, "B", void 0, "C",
+               { "length": 3, "0": "0", "1": "1", "2": "2" },
+               "X", "Y", "Z"], Array.prototype.concat.call(obj, obj2, arr));
+})();
+
+
+(function testConcatArrayLikeStringLength() {
+  "use strict";
+  var obj = {
+    "length": "6",
+    "1": "A",
+    "3": "B",
+    "5": "C"
+  };
+  obj[Symbol.isConcatSpreadable] = true;
+  var obj2 = { length: 3, "0": "0", "1": "1", "2": "2" };
+  var arr = ["X", "Y", "Z"];
+  assertEquals([void 0, "A", void 0, "B", void 0, "C",
+               { "length": 3, "0": "0", "1": "1", "2": "2" },
+               "X", "Y", "Z"], Array.prototype.concat.call(obj, obj2, arr));
+})();
+
+
+(function testConcatArrayLikeNegativeLength() {
+  "use strict";
+  var obj = {
+    "length": -6,
+    "1": "A",
+    "3": "B",
+    "5": "C"
+  };
+  obj[Symbol.isConcatSpreadable] = true;
+  assertEquals([], [].concat(obj));
+  obj.length = -6.7;
+  assertEquals([], [].concat(obj));
+  obj.length = "-6";
+  assertEquals([], [].concat(obj));
+})();
+
+
+(function testConcatArrayLikeToLengthThrows() {
+  "use strict";
+  var obj = {
+    "length": {valueOf: null, toString: null},
+    "1": "A",
+    "3": "B",
+    "5": "C"
+  };
+  obj[Symbol.isConcatSpreadable] = true;
+  var obj2 = { length: 3, "0": "0", "1": "1", "2": "2" };
+  var arr = ["X", "Y", "Z"];
+  assertThrows(function() {
+    Array.prototype.concat.call(obj, obj2, arr);
+  }, TypeError);
+})();
+
+
+(function testConcatArrayLikePrimitiveNonNumberLength() {
+  "use strict";
+  var obj = {
+    "1": "A",
+    "3": "B",
+    "5": "C"
+  };
+  obj[Symbol.isConcatSpreadable] = true;
+  obj.length = {toString: function() { return "SIX"; }, valueOf: null };
+  assertEquals([], [].concat(obj));
+  obj.length = {toString: null, valueOf: function() { return "SIX"; } };
+  assertEquals([], [].concat(obj));
+})();
+
+
+(function testConcatArrayLikeLengthToStringThrows() {
+  "use strict";
+  function MyError() {}
+  var obj = {
+    "length": { toString: function() {
+        throw new MyError();
+      }, valueOf: null
+    },
+    "1": "A",
+    "3": "B",
+    "5": "C"
+  };
+  obj[Symbol.isConcatSpreadable] = true;
+  assertThrows(function() {
+    [].concat(obj);
+  }, MyError);
+})();
+
+
+(function testConcatArrayLikeLengthValueOfThrows() {
+  "use strict";
+  function MyError() {}
+  var obj = {
+    "length": { valueOf: function() {
+      throw new MyError();
+    }, toString: null
+  },
+  "1": "A",
+  "3": "B",
+  "5": "C"
+};
+obj[Symbol.isConcatSpreadable] = true;
+assertThrows(function() {
+  [].concat(obj);
+}, MyError);
+})();
+
+
+(function testConcatHoleyArray() {
+  "use strict";
+  var arr = [];
+  arr[4] = "Item 4";
+  arr[8] = "Item 8";
+  var arr2 = [".", "!", "?"];
+  assertEquals([void 0, void 0, void 0, void 0, "Item 4", void 0, void 0,
+                void 0, "Item 8", ".", "!", "?"], arr.concat(arr2));
+})();
+
+
+(function testIsConcatSpreadableGetterThrows() {
+  "use strict";
+  function MyError() {}
+  var obj = {};
+  Object.defineProperty(obj, Symbol.isConcatSpreadable, {
+    get: function() { throw new MyError(); }
+  });
+
+  assertThrows(function() {
+    [].concat(obj);
+  }, MyError);
+
+  assertThrows(function() {
+    Array.prototype.concat.call(obj, 1, 2, 3);
+  }, MyError);
+})();
+
+
+(function testConcatLengthThrows() {
+  "use strict";
+  function MyError() {}
+  var obj = {};
+  obj[Symbol.isConcatSpreadable] = true;
+  Object.defineProperty(obj, "length", {
+    get: function() { throw new MyError(); }
+  });
+
+  assertThrows(function() {
+    [].concat(obj);
+  }, MyError);
+
+  assertThrows(function() {
+    Array.prototype.concat.call(obj, 1, 2, 3);
+  }, MyError);
+})();
+
+
+(function testConcatArraySubclass() {
+  "use strict";
+  // TODO(caitp): when concat is called on instances of classes which extend
+  // Array, they should:
+  //
+  // - return an instance of the class, rather than an Array instance (if from
+  //   same Realm)
+  // - always treat such classes as concat-spreadable
+})();
+
+
+(function testConcatNonArray() {
+  "use strict";
+  class NonArray {
+    constructor() { Array.apply(this, arguments); }
+  };
+
+  var obj = new NonArray(1,2,3);
+  var result = Array.prototype.concat.call(obj, 4, 5, 6);
+  assertEquals(Array, result.constructor);
+  assertEquals([obj,4,5,6], result);
+  assertFalse(result instanceof NonArray);
+})();
+
+
+function testConcatTypedArray(type, elems, modulo) {
+  "use strict";
+  var items = new Array(elems);
+  var ta_by_len = new type(elems);
+  for (var i = 0; i < elems; ++i) {
+    ta_by_len[i] = items[i] = modulo === false ? i : elems % modulo;
+  }
+  var ta = new type(items);
+  assertEquals([ta, ta], [].concat(ta, ta));
+  ta[Symbol.isConcatSpreadable] = true;
+  assertEquals(items, [].concat(ta));
+
+  assertEquals([ta_by_len, ta_by_len], [].concat(ta_by_len, ta_by_len));
+  ta_by_len[Symbol.isConcatSpreadable] = true;
+  assertEquals(items, [].concat(ta_by_len));
+
+  // TypedArray with fake `length`.
+  ta = new type(1);
+  var defValue = ta[0];
+  var expected = new Array(4000);
+  expected[0] = defValue;
+
+  Object.defineProperty(ta, "length", { value: 4000 });
+  ta[Symbol.isConcatSpreadable] = true;
+  assertEquals(expected, [].concat(ta));
+}
+
+(function testConcatSmallTypedArray() {
+  var max = [2^8, 2^16, 2^32, false, false];
+  [
+    Uint8Array,
+    Uint16Array,
+    Uint32Array,
+    Float32Array,
+    Float64Array
+  ].forEach(function(ctor, i) {
+    testConcatTypedArray(ctor, 1, max[i]);
+  });
+})();
+
+
+(function testConcatLargeTypedArray() {
+  var max = [2^8, 2^16, 2^32, false, false];
+  [
+    Uint8Array,
+    Uint16Array,
+    Uint32Array,
+    Float32Array,
+    Float64Array
+  ].forEach(function(ctor, i) {
+    testConcatTypedArray(ctor, 4000, max[i]);
+  });
+})();
+
+
+(function testConcatStrictArguments() {
+  var args = (function(a, b, c) { "use strict"; return arguments; })(1,2,3);
+  args[Symbol.isConcatSpreadable] = true;
+  assertEquals([1, 2, 3, 1, 2, 3], [].concat(args, args));
+
+  Object.defineProperty(args, "length", { value: 6 });
+  assertEquals([1, 2, 3, void 0, void 0, void 0], [].concat(args));
+})();
+
+
+(function testConcatSloppyArguments() {
+  var args = (function(a, b, c) { return arguments; })(1,2,3);
+  args[Symbol.isConcatSpreadable] = true;
+  assertEquals([1, 2, 3, 1, 2, 3], [].concat(args, args));
+
+  Object.defineProperty(args, "length", { value: 6 });
+  assertEquals([1, 2, 3, void 0, void 0, void 0], [].concat(args));
+})();
+
+
+(function testConcatSloppyArgumentsWithDupes() {
+  var args = (function(a, a, a) { return arguments; })(1,2,3);
+  args[Symbol.isConcatSpreadable] = true;
+  assertEquals([1, 2, 3, 1, 2, 3], [].concat(args, args));
+
+  Object.defineProperty(args, "length", { value: 6 });
+  assertEquals([1, 2, 3, void 0, void 0, void 0], [].concat(args));
+})();
+
+
+(function testConcatSloppyArgumentsThrows() {
+  function MyError() {}
+  var args = (function(a) { return arguments; })(1,2,3);
+  Object.defineProperty(args, 0, {
+    get: function() { throw new MyError(); }
+  });
+  args[Symbol.isConcatSpreadable] = true;
+  assertThrows(function() {
+    return [].concat(args, args);
+  }, MyError);
+})();
+
+
+(function testConcatHoleySloppyArguments() {
+  var args = (function(a) { return arguments; })(1,2,3);
+  delete args[1];
+  args[Symbol.isConcatSpreadable] = true;
+  assertEquals([1, void 0, 3, 1, void 0, 3], [].concat(args, args));
+})();
+
+
+(function testConcatSpreadableStringWrapper() {
+  "use strict";
+  var str1 = new String("yuck\uD83D\uDCA9")
+  // String wrapper objects are not concat-spreadable by default
+  assertEquals([str1], [].concat(str1));
+
+  // String wrapper objects may be individually concat-spreadable
+  str1[Symbol.isConcatSpreadable] = true;
+  assertEquals(["y", "u", "c", "k", "\uD83D", "\uDCA9"],
+               [].concat(str1));
+
+  String.prototype[Symbol.isConcatSpreadable] = true;
+  // String wrapper objects may be concat-spreadable
+  assertEquals(["y", "u", "c", "k", "\uD83D", "\uDCA9"],
+               [].concat(new String("yuck\uD83D\uDCA9")));
+
+  // String values are never concat-spreadable
+  assertEquals(["yuck\uD83D\uDCA9"], [].concat("yuck\uD83D\uDCA9"));
+  delete String.prototype[Symbol.isConcatSpreadable];
+})();
+
+
+(function testConcatSpreadableBooleanWrapper() {
+  "use strict";
+  var bool = new Boolean(true)
+  // Boolean wrapper objects are not concat-spreadable by default
+  assertEquals([bool], [].concat(bool));
+
+  // Boolean wrapper objects may be individually concat-spreadable
+  bool[Symbol.isConcatSpreadable] = true;
+  bool.length = 3;
+  bool[0] = 1, bool[1] = 2, bool[2] = 3;
+  assertEquals([1, 2, 3], [].concat(bool));
+
+  Boolean.prototype[Symbol.isConcatSpreadable] = true;
+  // Boolean wrapper objects may be concat-spreadable
+  assertEquals([], [].concat(new Boolean(true)));
+  Boolean.prototype[0] = 1;
+  Boolean.prototype[1] = 2;
+  Boolean.prototype[2] = 3;
+  Boolean.prototype.length = 3;
+  assertEquals([1,2,3], [].concat(new Boolean(true)));
+
+  // Boolean values are never concat-spreadable
+  assertEquals([true], [].concat(true));
+  delete Boolean.prototype[Symbol.isConcatSpreadable];
+  delete Boolean.prototype[0];
+  delete Boolean.prototype[1];
+  delete Boolean.prototype[2];
+  delete Boolean.prototype.length;
+})();
+
+
+(function testConcatSpreadableNumberWrapper() {
+  "use strict";
+  var num = new Number(true)
+  // Number wrapper objects are not concat-spreadable by default
+  assertEquals([num], [].concat(num));
+
+  // Number wrapper objects may be individually concat-spreadable
+  num[Symbol.isConcatSpreadable] = true;
+  num.length = 3;
+  num[0] = 1, num[1] = 2, num[2] = 3;
+  assertEquals([1, 2, 3], [].concat(num));
+
+  Number.prototype[Symbol.isConcatSpreadable] = true;
+  // Number wrapper objects may be concat-spreadable
+  assertEquals([], [].concat(new Number(123)));
+  Number.prototype[0] = 1;
+  Number.prototype[1] = 2;
+  Number.prototype[2] = 3;
+  Number.prototype.length = 3;
+  assertEquals([1,2,3], [].concat(new Number(123)));
+
+  // Number values are never concat-spreadable
+  assertEquals([true], [].concat(true));
+  delete Number.prototype[Symbol.isConcatSpreadable];
+  delete Number.prototype[0];
+  delete Number.prototype[1];
+  delete Number.prototype[2];
+  delete Number.prototype.length;
+})();
+
+
+(function testConcatSpreadableFunction() {
+  "use strict";
+  var fn = function(a, b, c) {}
+  // Functions are not concat-spreadable by default
+  assertEquals([fn], [].concat(fn));
+
+  // Functions may be individually concat-spreadable
+  fn[Symbol.isConcatSpreadable] = true;
+  fn[0] = 1, fn[1] = 2, fn[2] = 3;
+  assertEquals([1, 2, 3], [].concat(fn));
+
+  Function.prototype[Symbol.isConcatSpreadable] = true;
+  // Functions may be concat-spreadable
+  assertEquals([void 0, void 0, void 0], [].concat(function(a,b,c) {}));
+  Function.prototype[0] = 1;
+  Function.prototype[1] = 2;
+  Function.prototype[2] = 3;
+  assertEquals([1,2,3], [].concat(function(a, b, c) {}));
+
+  delete Function.prototype[Symbol.isConcatSpreadable];
+  delete Function.prototype[0];
+  delete Function.prototype[1];
+  delete Function.prototype[2];
+})();
+
+
+(function testConcatSpreadableRegExp() {
+  "use strict";
+  var re = /abc/;
+  // RegExps are not concat-spreadable by default
+  assertEquals([re], [].concat(re));
+
+  // RegExps may be individually concat-spreadable
+  re[Symbol.isConcatSpreadable] = true;
+  re[0] = 1, re[1] = 2, re[2] = 3, re.length = 3;
+  assertEquals([1, 2, 3], [].concat(re));
+
+  // RegExps may be concat-spreadable
+  RegExp.prototype[Symbol.isConcatSpreadable] = true;
+  RegExp.prototype.length = 3;
+
+  assertEquals([void 0, void 0, void 0], [].concat(/abc/));
+  RegExp.prototype[0] = 1;
+  RegExp.prototype[1] = 2;
+  RegExp.prototype[2] = 3;
+  assertEquals([1,2,3], [].concat(/abc/));
+
+  delete RegExp.prototype[Symbol.isConcatSpreadable];
+  delete RegExp.prototype[0];
+  delete RegExp.prototype[1];
+  delete RegExp.prototype[2];
+  delete RegExp.prototype.length;
+})();
+
+
+(function testArrayConcatSpreadableSparseObject() {
+  "use strict";
+  var obj = { length: 5 };
+  obj[Symbol.isConcatSpreadable] = true;
+  assertEquals([void 0, void 0, void 0, void 0, void 0], [].concat(obj));
+
+  obj.length = 4000;
+  assertEquals(new Array(4000), [].concat(obj));
+})();
+
+
+// ES5 tests
+(function testArrayConcatES5() {
+  "use strict";
+  var poses;
+  var pos;
+
+  poses = [140, 4000000000];
+  while (pos = poses.shift()) {
+    var a = new Array(pos);
+    var array_proto = [];
+    a.__proto__ = array_proto;
+    assertEquals(pos, a.length);
+    a.push('foo');
+    assertEquals(pos + 1, a.length);
+    var b = ['bar'];
+    var c = a.concat(b);
+    assertEquals(pos + 2, c.length);
+    assertEquals("undefined", typeof(c[pos - 1]));
+    assertEquals("foo", c[pos]);
+    assertEquals("bar", c[pos + 1]);
+
+    // Can we fool the system by putting a number in a string?
+    var onetwofour = "124";
+    a[onetwofour] = 'doo';
+    assertEquals(a[124], 'doo');
+    c = a.concat(b);
+    assertEquals(c[124], 'doo');
+
+    // If we put a number in the prototype, then the spec says it should be
+    // copied on concat.
+    array_proto["123"] = 'baz';
+    assertEquals(a[123], 'baz');
+
+    c = a.concat(b);
+    assertEquals(pos + 2, c.length);
+    assertEquals("baz", c[123]);
+    assertEquals("undefined", typeof(c[pos - 1]));
+    assertEquals("foo", c[pos]);
+    assertEquals("bar", c[pos + 1]);
+
+    // When we take the number off the prototype it disappears from a, but
+    // the concat put it in c itself.
+    array_proto["123"] = undefined;
+    assertEquals("undefined", typeof(a[123]));
+    assertEquals("baz", c[123]);
+
+    // If the element of prototype is shadowed, the element on the instance
+    // should be copied, but not the one on the prototype.
+    array_proto[123] = 'baz';
+    a[123] = 'xyz';
+    assertEquals('xyz', a[123]);
+    c = a.concat(b);
+    assertEquals('xyz', c[123]);
+
+    // Non-numeric properties on the prototype or the array shouldn't get
+    // copied.
+    array_proto.moe = 'joe';
+    a.ben = 'jerry';
+    assertEquals(a["moe"], 'joe');
+    assertEquals(a["ben"], 'jerry');
+    c = a.concat(b);
+    // ben was not copied
+    assertEquals("undefined", typeof(c.ben));
+
+    // When we take moe off the prototype it disappears from all arrays.
+    array_proto.moe = undefined;
+    assertEquals("undefined", typeof(c.moe));
+
+    // Negative indices don't get concated.
+    a[-1] = 'minus1';
+    assertEquals("minus1", a[-1]);
+    assertEquals("undefined", typeof(a[0xffffffff]));
+    c = a.concat(b);
+    assertEquals("undefined", typeof(c[-1]));
+    assertEquals("undefined", typeof(c[0xffffffff]));
+    assertEquals(c.length, a.length + 1);
+  }
+
+  poses = [140, 4000000000];
+  while (pos = poses.shift()) {
+    var a = new Array(pos);
+    assertEquals(pos, a.length);
+    a.push('foo');
+    assertEquals(pos + 1, a.length);
+    var b = ['bar'];
+    var c = a.concat(b);
+    assertEquals(pos + 2, c.length);
+    assertEquals("undefined", typeof(c[pos - 1]));
+    assertEquals("foo", c[pos]);
+    assertEquals("bar", c[pos + 1]);
+
+    // Can we fool the system by putting a number in a string?
+    var onetwofour = "124";
+    a[onetwofour] = 'doo';
+    assertEquals(a[124], 'doo');
+    c = a.concat(b);
+    assertEquals(c[124], 'doo');
+
+    // If we put a number in the prototype, then the spec says it should be
+    // copied on concat.
+    Array.prototype["123"] = 'baz';
+    assertEquals(a[123], 'baz');
+
+    c = a.concat(b);
+    assertEquals(pos + 2, c.length);
+    assertEquals("baz", c[123]);
+    assertEquals("undefined", typeof(c[pos - 1]));
+    assertEquals("foo", c[pos]);
+    assertEquals("bar", c[pos + 1]);
+
+    // When we take the number off the prototype it disappears from a, but
+    // the concat put it in c itself.
+    Array.prototype["123"] = undefined;
+    assertEquals("undefined", typeof(a[123]));
+    assertEquals("baz", c[123]);
+
+    // If the element of prototype is shadowed, the element on the instance
+    // should be copied, but not the one on the prototype.
+    Array.prototype[123] = 'baz';
+    a[123] = 'xyz';
+    assertEquals('xyz', a[123]);
+    c = a.concat(b);
+    assertEquals('xyz', c[123]);
+
+    // Non-numeric properties on the prototype or the array shouldn't get
+    // copied.
+    Array.prototype.moe = 'joe';
+    a.ben = 'jerry';
+    assertEquals(a["moe"], 'joe');
+    assertEquals(a["ben"], 'jerry');
+    c = a.concat(b);
+    // ben was not copied
+    assertEquals("undefined", typeof(c.ben));
+    // moe was not copied, but we can see it through the prototype
+    assertEquals("joe", c.moe);
+
+    // When we take moe off the prototype it disappears from all arrays.
+    Array.prototype.moe = undefined;
+    assertEquals("undefined", typeof(c.moe));
+
+    // Negative indices don't get concated.
+    a[-1] = 'minus1';
+    assertEquals("minus1", a[-1]);
+    assertEquals("undefined", typeof(a[0xffffffff]));
+    c = a.concat(b);
+    assertEquals("undefined", typeof(c[-1]));
+    assertEquals("undefined", typeof(c[0xffffffff]));
+    assertEquals(c.length, a.length + 1);
+
+  }
+
+  a = [];
+  c = a.concat('Hello');
+  assertEquals(1, c.length);
+  assertEquals("Hello", c[0]);
+  assertEquals("Hello", c.toString());
+
+  // Check that concat preserves holes.
+  var holey = [void 0,'a',,'c'].concat(['d',,'f',[0,,2],void 0])
+  assertEquals(9, holey.length);  // hole in embedded array is ignored
+  for (var i = 0; i < holey.length; i++) {
+    if (i == 2 || i == 5) {
+      assertFalse(i in holey);
+    } else {
+      assertTrue(i in holey);
+    }
+  }
+
+  // Polluted prototype from prior tests.
+  delete Array.prototype[123];
+
+  // Check that concat reads getters in the correct order.
+  var arr1 = [,2];
+  var arr2 = [1,3];
+  var r1 = [].concat(arr1, arr2);  // [,2,1,3]
+  assertEquals([,2,1,3], r1);
+
+  // Make first array change length of second array.
+  Object.defineProperty(arr1, 0, {get: function() {
+        arr2.push("X");
+        return undefined;
+      }, configurable: true})
+  var r2 = [].concat(arr1, arr2);  // [undefined,2,1,3,"X"]
+  assertEquals([undefined,2,1,3,"X"], r2);
+
+  // Make first array change length of second array massively.
+  arr2.length = 2;
+  Object.defineProperty(arr1, 0, {get: function() {
+        arr2[500000] = "X";
+        return undefined;
+      }, configurable: true})
+  var r3 = [].concat(arr1, arr2);  // [undefined,2,1,3,"X"]
+  var expected = [undefined,2,1,3];
+  expected[500000 + 2] = "X";
+
+  assertEquals(expected, r3);
+
+  var arr3 = [];
+  var trace = [];
+  var expectedTrace = []
+  function mkGetter(i) { return function() { trace.push(i); }; }
+  arr3.length = 10000;
+  for (var i = 0; i < 100; i++) {
+    Object.defineProperty(arr3, i * i, {get: mkGetter(i)});
+    expectedTrace[i] = i;
+    expectedTrace[100 + i] = i;
+  }
+  var r4 = [0].concat(arr3, arr3);
+  assertEquals(1 + arr3.length * 2, r4.length);
+  assertEquals(expectedTrace, trace);
+})();
diff --git a/deps/v8/test/mjsunit/harmony/array-from.js b/deps/v8/test/mjsunit/harmony/array-from.js
new file mode 100644 (file)
index 0000000..e7c9fef
--- /dev/null
@@ -0,0 +1,123 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-arrays --harmony-generators
+(function() {
+
+assertEquals(1, Array.from.length);
+
+function assertArrayLikeEquals(value, expected, type) {
+  assertInstanceof(value, type);
+  assertEquals(expected.length, value.length);
+  for (var i=0; i<value.length; ++i) {
+    assertEquals(expected[i], value[i]);
+  }
+}
+
+// Assert that constructor is called with "length" for array-like objects
+var myCollectionCalled = false;
+function MyCollection(length) {
+  myCollectionCalled = true;
+  assertEquals(1, arguments.length);
+  assertEquals(5, length);
+}
+
+Array.from.call(MyCollection, {length: 5});
+assertTrue(myCollectionCalled);
+
+// Assert that calling mapfn with / without thisArg in sloppy and strict modes
+// works as expected.
+var global = this;
+function non_strict(){ assertEquals(global, this); }
+function strict(){ "use strict"; assertEquals(void 0, this); }
+function strict_null(){ "use strict"; assertEquals(null, this); }
+Array.from([1], non_strict);
+Array.from([1], non_strict, void 0);
+Array.from([1], non_strict, null);
+Array.from([1], strict);
+Array.from([1], strict, void 0);
+Array.from([1], strict_null, null);
+
+function testArrayFrom(thisArg, constructor) {
+  assertArrayLikeEquals(Array.from.call(thisArg, [], undefined), [],
+      constructor);
+  assertArrayLikeEquals(Array.from.call(thisArg, NaN), [], constructor);
+  assertArrayLikeEquals(Array.from.call(thisArg, Infinity), [], constructor);
+  assertArrayLikeEquals(Array.from.call(thisArg, 10000000), [], constructor);
+  assertArrayLikeEquals(Array.from.call(thisArg, 'test'), ['t', 'e', 's', 't'],
+      constructor);
+
+  assertArrayLikeEquals(Array.from.call(thisArg,
+      { length: 1, '0': { 'foo': 'bar' } }), [{'foo': 'bar'}], constructor);
+
+  assertArrayLikeEquals(Array.from.call(thisArg,
+      { length: -1, '0': { 'foo': 'bar' } }), [], constructor);
+
+  assertArrayLikeEquals(Array.from.call(thisArg,
+      [ 'foo', 'bar', 'baz' ]), ['foo', 'bar', 'baz'], constructor);
+
+  var kSet = new Set(['foo', 'bar', 'baz']);
+  assertArrayLikeEquals(Array.from.call(thisArg, kSet), ['foo', 'bar', 'baz'],
+      constructor);
+
+  var kMap = new Map(['foo', 'bar', 'baz'].entries());
+  assertArrayLikeEquals(Array.from.call(thisArg, kMap),
+      [[0, 'foo'], [1, 'bar'], [2, 'baz']], constructor);
+
+
+  function* generator() {
+    yield 'a';
+    yield 'b';
+    yield 'c';
+  }
+
+  assertArrayLikeEquals(Array.from.call(thisArg, generator()),
+                        ['a', 'b', 'c'], constructor);
+
+  // Mozilla:
+  // Array.from on a string handles surrogate pairs correctly.
+  var gclef = "\uD834\uDD1E"; // U+1D11E MUSICAL SYMBOL G CLEF
+  assertArrayLikeEquals(Array.from.call(thisArg, gclef), [gclef], constructor);
+  assertArrayLikeEquals(Array.from.call(thisArg, gclef + " G"),
+      [gclef, " ", "G"], constructor);
+
+  assertArrayLikeEquals(Array.from.call(thisArg, 'test', function(x) {
+    return this.filter(x);
+  }, {
+    filter: function(x) { return x.toUpperCase(); }
+  }), ['T', 'E', 'S', 'T'], constructor);
+  assertArrayLikeEquals(Array.from.call(thisArg, 'test', function(x) {
+    return x.toUpperCase();
+  }), ['T', 'E', 'S', 'T'], constructor);
+
+  this.thisArg = thisArg;
+  assertThrows('Array.from.call(thisArg, null)', TypeError);
+  assertThrows('Array.from.call(thisArg, undefined)', TypeError);
+  assertThrows('Array.from.call(thisArg, [], null)', TypeError);
+  assertThrows('Array.from.call(thisArg, [], "noncallable")', TypeError);
+
+  this.nullIterator = {};
+  nullIterator[Symbol.iterator] = null;
+  assertThrows('Array.from.call(thisArg, nullIterator)', TypeError);
+
+  this.nonObjIterator = {};
+  nonObjIterator[Symbol.iterator] = function() { return "nonObject"; };
+  assertThrows('Array.from.call(thisArg, nonObjIterator)', TypeError);
+
+  assertThrows('Array.from.call(thisArg, [], null)', TypeError);
+}
+
+function Other() {}
+
+var boundFn = (function() {}).bind(Array, 27);
+
+testArrayFrom(Array, Array);
+testArrayFrom(null, Array);
+testArrayFrom({}, Array);
+testArrayFrom(Object, Object);
+testArrayFrom(Other, Other);
+testArrayFrom(Math.cos, Array);
+testArrayFrom(boundFn, Array);
+
+})();
diff --git a/deps/v8/test/mjsunit/harmony/array-includes-to-object-sloppy.js b/deps/v8/test/mjsunit/harmony/array-includes-to-object-sloppy.js
new file mode 100644 (file)
index 0000000..0f5d731
--- /dev/null
@@ -0,0 +1,29 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-array-includes
+
+// Ported from
+// https://github.com/tc39/Array.prototype.includes/blob/master/test/number-this.js
+// using https://www.npmjs.org/package/test262-to-mjsunit
+
+// Array.prototype.includes should use ToObject on this, so that when called
+// with a number, it picks up numeric properties from Number.prototype
+(function() {
+  Number.prototype[0] = "a";
+  Number.prototype[1] = "b";
+
+  Object.defineProperty(Number.prototype, 2, {
+    get: function() {
+      assertEquals("object", typeof this);
+      return "c";
+    }
+  });
+
+  Number.prototype.length = 3;
+  assertTrue(Array.prototype.includes.call(5, "a"));
+  assertTrue(Array.prototype.includes.call(5, "b"));
+  assertTrue(Array.prototype.includes.call(5, "c"));
+  assertFalse(Array.prototype.includes.call(5, "d"));
+})();
diff --git a/deps/v8/test/mjsunit/harmony/array-includes-to-object-strict.js b/deps/v8/test/mjsunit/harmony/array-includes-to-object-strict.js
new file mode 100644 (file)
index 0000000..ee87136
--- /dev/null
@@ -0,0 +1,32 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-array-includes
+
+// Ported from
+// https://github.com/tc39/Array.prototype.includes/blob/master/test/number-this.js
+// using https://www.npmjs.org/package/test262-to-mjsunit
+
+// Array.prototype.includes should use ToObject on this, so that when called
+// with a number, it picks up numeric properties from Number.prototype (even in
+// strict mode)
+(function() {
+  "use strict";
+
+  Number.prototype[0] = "a";
+  Number.prototype[1] = "b";
+
+  Object.defineProperty(Number.prototype, 2, {
+    get: function() {
+      assertEquals("object", typeof this);
+      return "c";
+    }
+  });
+
+  Number.prototype.length = 3;
+  assertTrue(Array.prototype.includes.call(5, "a"));
+  assertTrue(Array.prototype.includes.call(5, "b"));
+  assertTrue(Array.prototype.includes.call(5, "c"));
+  assertFalse(Array.prototype.includes.call(5, "d"));
+})();
diff --git a/deps/v8/test/mjsunit/harmony/array-includes.js b/deps/v8/test/mjsunit/harmony/array-includes.js
new file mode 100644 (file)
index 0000000..2cdd112
--- /dev/null
@@ -0,0 +1,677 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-array-includes
+
+// Largely ported from
+// https://github.com/tc39/Array.prototype.includes/tree/master/test
+// using https://www.npmjs.org/package/test262-to-mjsunit with further edits
+
+
+// Array.prototype.includes sees a new element added by a getter that is hit
+// during iteration
+(function() {
+  var arrayLike = {
+    length: 5,
+    0: "a",
+
+    get 1() {
+      this[2] = "c";
+      return "b";
+    }
+  };
+
+  assertTrue(Array.prototype.includes.call(arrayLike, "c"));
+})();
+
+
+// Array.prototype.includes works on array-like objects
+(function() {
+  var arrayLike1 = {
+    length: 5,
+    0: "a",
+    1: "b"
+  };
+
+  assertTrue(Array.prototype.includes.call(arrayLike1, "a"));
+  assertFalse(Array.prototype.includes.call(arrayLike1, "c"));
+
+  var arrayLike2 = {
+    length: 2,
+    0: "a",
+    1: "b",
+    2: "c"
+  };
+
+  assertTrue(Array.prototype.includes.call(arrayLike2, "b"));
+  assertFalse(Array.prototype.includes.call(arrayLike2, "c"));
+})();
+
+
+// Array.prototype.includes should fail if used on a null or undefined this
+(function() {
+  assertThrows(function() {
+    Array.prototype.includes.call(null, "a");
+  }, TypeError);
+
+  assertThrows(function() {
+    Array.prototype.includes.call(undefined, "a");
+  }, TypeError);
+})();
+
+
+// Array.prototype.includes should terminate if getting an index throws an
+// exception
+(function() {
+  function Test262Error() {}
+
+  var trappedZero = {
+    length: 2,
+
+    get 0() {
+      throw new Test262Error();
+    },
+
+    get 1() {
+      assertUnreachable("Should not try to get the first element");
+    }
+  };
+
+  assertThrows(function() {
+    Array.prototype.includes.call(trappedZero, "a");
+  }, Test262Error);
+})();
+
+
+// Array.prototype.includes should terminate if ToNumber ends up being called on
+// a symbol fromIndex
+(function() {
+  var trappedZero = {
+    length: 1,
+
+    get 0() {
+      assertUnreachable("Should not try to get the zeroth element");
+    }
+  };
+
+  assertThrows(function() {
+    Array.prototype.includes.call(trappedZero, "a", Symbol());
+  }, TypeError);
+})();
+
+
+// Array.prototype.includes should terminate if an exception occurs converting
+// the fromIndex to a number
+(function() {
+  function Test262Error() {}
+
+  var fromIndex = {
+    valueOf: function() {
+      throw new Test262Error();
+    }
+  };
+
+  var trappedZero = {
+    length: 1,
+
+    get 0() {
+      assertUnreachable("Should not try to get the zeroth element");
+    }
+  };
+
+  assertThrows(function() {
+    Array.prototype.includes.call(trappedZero, "a", fromIndex);
+  }, Test262Error);
+})();
+
+
+// Array.prototype.includes should terminate if an exception occurs getting the
+// length
+(function() {
+  function Test262Error() {}
+
+  var fromIndexTrap = {
+    valueOf: function() {
+      assertUnreachable("Should not try to call ToInteger on valueOf");
+    }
+  };
+
+  var throwingLength = {
+    get length() {
+      throw new Test262Error();
+    },
+
+    get 0() {
+      assertUnreachable("Should not try to get the zeroth element");
+    }
+  };
+
+  assertThrows(function() {
+    Array.prototype.includes.call(throwingLength, "a", fromIndexTrap);
+  }, Test262Error);
+})();
+
+
+// Array.prototype.includes should terminate if ToLength ends up being called on
+// a symbol length
+(function() {
+  var fromIndexTrap = {
+    valueOf: function() {
+      assertUnreachable("Should not try to call ToInteger on valueOf");
+    }
+  };
+
+  var badLength = {
+    length: Symbol(),
+
+    get 0() {
+      assertUnreachable("Should not try to get the zeroth element");
+    }
+  };
+
+  assertThrows(function() {
+    Array.prototype.includes.call(badLength, "a", fromIndexTrap);
+  }, TypeError);
+})();
+
+
+// Array.prototype.includes should terminate if an exception occurs converting
+// the length to a number
+(function() {
+  function Test262Error() {}
+
+  var fromIndexTrap = {
+    valueOf: function() {
+      assertUnreachable("Should not try to call ToInteger on valueOf");
+    }
+  };
+
+  var badLength = {
+    length: {
+      valueOf: function() {
+        throw new Test262Error();
+      }
+    },
+
+    get 0() {
+      assertUnreachable("Should not try to get the zeroth element");
+    }
+  };
+
+  assertThrows(function() {
+    Array.prototype.includes.call(badLength, "a", fromIndexTrap);
+  }, Test262Error);
+})();
+
+
+// Array.prototype.includes should search the whole array, as the optional
+// second argument fromIndex defaults to 0
+(function() {
+  assertTrue([10, 11].includes(10));
+  assertTrue([10, 11].includes(11));
+
+  var arrayLike = {
+    length: 2,
+
+    get 0() {
+      return "1";
+    },
+
+    get 1() {
+      return "2";
+    }
+  };
+
+  assertTrue(Array.prototype.includes.call(arrayLike, "1"));
+  assertTrue(Array.prototype.includes.call(arrayLike, "2"));
+})();
+
+
+// Array.prototype.includes returns false if fromIndex is greater or equal to
+// the length of the array
+(function() {
+  assertFalse([1, 2].includes(2, 3));
+  assertFalse([1, 2].includes(2, 2));
+
+  var arrayLikeWithTrap = {
+    length: 2,
+
+    get 0() {
+      assertUnreachable("Getter for 0 was called");
+    },
+
+    get 1() {
+      assertUnreachable("Getter for 1 was called");
+    }
+  };
+
+  assertFalse(Array.prototype.includes.call(arrayLikeWithTrap, "c", 2));
+  assertFalse(Array.prototype.includes.call(arrayLikeWithTrap, "c", 3));
+})();
+
+
+// Array.prototype.includes searches the whole array if the computed index from
+// the given negative fromIndex argument is less than 0
+(function() {
+  assertTrue([1, 3].includes(1, -4));
+  assertTrue([1, 3].includes(3, -4));
+
+  var arrayLike = {
+    length: 2,
+    0: "a",
+
+    get 1() {
+      return "b";
+    },
+
+    get "-1"() {
+      assertUnreachable("Should not try to get the element at index -1");
+    }
+  };
+
+  assertTrue(Array.prototype.includes.call(arrayLike, "a", -4));
+  assertTrue(Array.prototype.includes.call(arrayLike, "b", -4));
+})();
+
+
+// Array.prototype.includes should use a negative value as the offset from the
+// end of the array to compute fromIndex
+(function() {
+  assertTrue([12, 13].includes(13, -1));
+  assertFalse([12, 13].includes(12, -1));
+  assertTrue([12, 13].includes(12, -2));
+
+  var arrayLike = {
+    length: 2,
+
+    get 0() {
+      return "a";
+    },
+
+    get 1() {
+      return "b";
+    }
+  };
+
+  assertTrue(Array.prototype.includes.call(arrayLike, "b", -1));
+  assertFalse(Array.prototype.includes.call(arrayLike, "a", -1));
+  assertTrue(Array.prototype.includes.call(arrayLike, "a", -2));
+})();
+
+
+// Array.prototype.includes converts its fromIndex parameter to an integer
+(function() {
+  assertFalse(["a", "b"].includes("a", 2.3));
+
+  var arrayLikeWithTraps = {
+    length: 2,
+
+    get 0() {
+      assertUnreachable("Getter for 0 was called");
+    },
+
+    get 1() {
+      assertUnreachable("Getter for 1 was called");
+    }
+  };
+
+  assertFalse(Array.prototype.includes.call(arrayLikeWithTraps, "c", 2.1));
+  assertFalse(Array.prototype.includes.call(arrayLikeWithTraps, "c", +Infinity));
+  assertTrue(["a", "b", "c"].includes("a", -Infinity));
+  assertTrue(["a", "b", "c"].includes("c", 2.9));
+  assertTrue(["a", "b", "c"].includes("c", NaN));
+
+  var arrayLikeWithTrapAfterZero = {
+    length: 2,
+
+    get 0() {
+      return "a";
+    },
+
+    get 1() {
+      assertUnreachable("Getter for 1 was called");
+    }
+  };
+
+  assertTrue(Array.prototype.includes.call(arrayLikeWithTrapAfterZero, "a", NaN));
+
+  var numberLike = {
+    valueOf: function() {
+      return 2;
+    }
+  };
+
+  assertFalse(["a", "b", "c"].includes("a", numberLike));
+  assertFalse(["a", "b", "c"].includes("a", "2"));
+  assertTrue(["a", "b", "c"].includes("c", numberLike));
+  assertTrue(["a", "b", "c"].includes("c", "2"));
+})();
+
+
+// Array.prototype.includes should have length 1
+(function() {
+  assertEquals(1, Array.prototype.includes.length);
+})();
+
+
+// Array.prototype.includes should have name property with value 'includes'
+(function() {
+  assertEquals("includes", Array.prototype.includes.name);
+})();
+
+
+// !!! Test failed to convert:
+// Cannot convert tests with includes.
+// !!!
+
+
+// Array.prototype.includes does not skip holes; if the array has a prototype it
+// gets from that
+(function() {
+  var holesEverywhere = [,,,];
+
+  holesEverywhere.__proto__ = {
+    1: "a"
+  };
+
+  holesEverywhere.__proto__.__proto__ = Array.prototype;
+  assertTrue(holesEverywhere.includes("a"));
+  var oneHole = ["a", "b",, "d"];
+
+  oneHole.__proto__ = {
+    get 2() {
+      return "c";
+    }
+  };
+
+  assertTrue(Array.prototype.includes.call(oneHole, "c"));
+})();
+
+
+// Array.prototype.includes does not skip holes; instead it treates them as
+// undefined
+(function() {
+  assertTrue([,,,].includes(undefined));
+  assertTrue(["a", "b",, "d"].includes(undefined));
+})();
+
+
+// Array.prototype.includes gets length property from the prototype if it's
+// available
+(function() {
+  var proto = {
+    length: 1
+  };
+
+  var arrayLike = Object.create(proto);
+  arrayLike[0] = "a";
+
+  Object.defineProperty(arrayLike, "1", {
+    get: function() {
+      assertUnreachable("Getter for 1 was called");
+    }
+  });
+
+  assertTrue(Array.prototype.includes.call(arrayLike, "a"));
+})();
+
+
+// Array.prototype.includes treats a missing length property as zero
+(function() {
+  var arrayLikeWithTraps = {
+    get 0() {
+      assertUnreachable("Getter for 0 was called");
+    },
+
+    get 1() {
+      assertUnreachable("Getter for 1 was called");
+    }
+  };
+
+  assertFalse(Array.prototype.includes.call(arrayLikeWithTraps, "a"));
+})();
+
+
+// Array.prototype.includes should always return false on negative-length
+// objects
+(function() {
+  assertFalse(Array.prototype.includes.call({
+    length: -1
+  }, 2));
+
+  assertFalse(Array.prototype.includes.call({
+    length: -2
+  }));
+
+  assertFalse(Array.prototype.includes.call({
+    length: -Infinity
+  }, undefined));
+
+  assertFalse(Array.prototype.includes.call({
+    length: -Math.pow(2, 53)
+  }, NaN));
+
+  assertFalse(Array.prototype.includes.call({
+    length: -1,
+    "-1": 2
+  }, 2));
+
+  assertFalse(Array.prototype.includes.call({
+    length: -3,
+    "-1": 2
+  }, 2));
+
+  assertFalse(Array.prototype.includes.call({
+    length: -Infinity,
+    "-1": 2
+  }, 2));
+
+  var arrayLikeWithTrap = {
+    length: -1,
+
+    get 0() {
+      assertUnreachable("Getter for 0 was called");
+    }
+  };
+
+  assertFalse(Array.prototype.includes.call(arrayLikeWithTrap, 2));
+})();
+
+
+// Array.prototype.includes should clamp positive lengths to 2^53 - 1
+(function() {
+  var fromIndexForLargeIndexTests = 9007199254740990;
+
+  assertFalse(Array.prototype.includes.call({
+    length: 1
+  }, 2));
+
+  assertTrue(Array.prototype.includes.call({
+    length: 1,
+    0: "a"
+  }, "a"));
+
+  assertTrue(Array.prototype.includes.call({
+    length: +Infinity,
+    0: "a"
+  }, "a"));
+
+  assertFalse(Array.prototype.includes.call({
+    length: +Infinity
+  }, "a", fromIndexForLargeIndexTests));
+
+  var arrayLikeWithTrap = {
+    length: +Infinity,
+
+    get 9007199254740992() {
+      assertUnreachable("Getter for 9007199254740992 (i.e. 2^53) was called");
+    },
+
+    "9007199254740993": "a"
+  };
+
+  assertFalse(
+    Array.prototype.includes.call(arrayLikeWithTrap, "a", fromIndexForLargeIndexTests)
+  );
+
+  var arrayLikeWithTooBigLength = {
+    length: 9007199254740996,
+    "9007199254740992": "a"
+  };
+
+  assertFalse(
+    Array.prototype.includes.call(arrayLikeWithTooBigLength, "a", fromIndexForLargeIndexTests)
+  );
+})();
+
+
+// Array.prototype.includes should always return false on zero-length objects
+(function() {
+  assertFalse([].includes(2));
+  assertFalse([].includes());
+  assertFalse([].includes(undefined));
+  assertFalse([].includes(NaN));
+
+  assertFalse(Array.prototype.includes.call({
+    length: 0
+  }, 2));
+
+  assertFalse(Array.prototype.includes.call({
+    length: 0
+  }));
+
+  assertFalse(Array.prototype.includes.call({
+    length: 0
+  }, undefined));
+
+  assertFalse(Array.prototype.includes.call({
+    length: 0
+  }, NaN));
+
+  assertFalse(Array.prototype.includes.call({
+    length: 0,
+    0: 2
+  }, 2));
+
+  assertFalse(Array.prototype.includes.call({
+    length: 0,
+    0: undefined
+  }));
+
+  assertFalse(Array.prototype.includes.call({
+    length: 0,
+    0: undefined
+  }, undefined));
+
+  assertFalse(Array.prototype.includes.call({
+    length: 0,
+    0: NaN
+  }, NaN));
+
+  var arrayLikeWithTrap = {
+    length: 0,
+
+    get 0() {
+      assertUnreachable("Getter for 0 was called");
+    }
+  };
+
+  Array.prototype.includes.call(arrayLikeWithTrap);
+
+  var trappedFromIndex = {
+    valueOf: function() {
+      assertUnreachable("Should not try to convert fromIndex to a number on a zero-length array");
+    }
+  };
+
+  [].includes("a", trappedFromIndex);
+
+  Array.prototype.includes.call({
+    length: 0
+  }, trappedFromIndex);
+})();
+
+
+// Array.prototype.includes works on objects
+(function() {
+  assertFalse(["a", "b", "c"].includes({}));
+  assertFalse([{}, {}].includes({}));
+  var obj = {};
+  assertTrue([obj].includes(obj));
+  assertFalse([obj].includes(obj, 1));
+  assertTrue([obj, obj].includes(obj, 1));
+
+  var stringyObject = {
+    toString: function() {
+      return "a";
+    }
+  };
+
+  assertFalse(["a", "b", obj].includes(stringyObject));
+})();
+
+
+// Array.prototype.includes does not see an element removed by a getter that is
+// hit during iteration
+(function() {
+  var arrayLike = {
+    length: 5,
+    0: "a",
+
+    get 1() {
+      delete this[2];
+      return "b";
+    },
+
+    2: "c"
+  };
+
+  assertFalse(Array.prototype.includes.call(arrayLike, "c"));
+})();
+
+
+// Array.prototype.includes should use the SameValueZero algorithm to compare
+(function() {
+  assertTrue([1, 2, 3].includes(2));
+  assertFalse([1, 2, 3].includes(4));
+  assertTrue([1, 2, NaN].includes(NaN));
+  assertTrue([1, 2, -0].includes(+0));
+  assertTrue([1, 2, -0].includes(-0));
+  assertTrue([1, 2, +0].includes(-0));
+  assertTrue([1, 2, +0].includes(+0));
+  assertFalse([1, 2, -Infinity].includes(+Infinity));
+  assertTrue([1, 2, -Infinity].includes(-Infinity));
+  assertFalse([1, 2, +Infinity].includes(-Infinity));
+  assertTrue([1, 2, +Infinity].includes(+Infinity));
+})();
+
+
+// Array.prototype.includes stops once it hits the length of an array-like, even
+// if there are more after
+(function() {
+  var arrayLike = {
+    length: 2,
+    0: "a",
+    1: "b",
+
+    get 2() {
+      assertUnreachable("Should not try to get the second element");
+    }
+  };
+
+  assertFalse(Array.prototype.includes.call(arrayLike, "c"));
+})();
+
+
+// Array.prototype.includes works on typed arrays
+(function() {
+  assertTrue(Array.prototype.includes.call(new Uint8Array([1, 2, 3]), 2));
+
+  assertTrue(
+    Array.prototype.includes.call(new Float32Array([2.5, 3.14, Math.PI]), 3.1415927410125732)
+  );
+
+  assertFalse(Array.prototype.includes.call(new Uint8Array([1, 2, 3]), 4));
+  assertFalse(Array.prototype.includes.call(new Uint8Array([1, 2, 3]), 2, 2));
+})();
index 1eedb68..d19a34a 100644 (file)
@@ -1,30 +1,7 @@
 // Copyright 2011 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
 //
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
 // Flags: --harmony-scoping
 
 // Test for conflicting variable bindings.
@@ -175,7 +152,7 @@ for (var v = 0; v < varbinds.length; ++v) {
 
 // Test conflicting catch/var bindings.
 for (var v = 0; v < varbinds.length; ++v) {
-  TestConflict('try {} catch(x) {' + varbinds[v] + '}');
+  TestNoConflict('try {} catch(x) {' + varbinds[v] + '}');
 }
 
 // Test conflicting parameter/var bindings.
index b71729e..c21a0a3 100644 (file)
 
 // Function local const.
 function constDecl0(use) {
-  return "(function() { const constvar = 1; " + use + "; });";
+  return "(function() { const constvar = 1; " + use + "; })();";
 }
 
 
 function constDecl1(use) {
-  return "(function() { " + use + "; const constvar = 1; });";
+  return "(function() { " + use + "; const constvar = 1; })();";
 }
 
 
 // Function local const, assign from eval.
 function constDecl2(use) {
-  use = "eval('(function() { " + use + " })')";
+  use = "eval('(function() { " + use + " })')()";
   return "(function() { const constvar = 1; " + use + "; })();";
 }
 
 
 function constDecl3(use) {
-  use = "eval('(function() { " + use + " })')";
+  use = "eval('(function() { " + use + " })')()";
   return "(function() { " + use + "; const constvar = 1; })();";
 }
 
 
 // Block local const.
 function constDecl4(use) {
-  return "(function() { { const constvar = 1; " + use + "; } });";
+  return "(function() { { const constvar = 1; " + use + "; } })();";
 }
 
 
 function constDecl5(use) {
-  return "(function() { { " + use + "; const constvar = 1; } });";
+  return "(function() { { " + use + "; const constvar = 1; } })();";
 }
 
 
 // Block local const, assign from eval.
 function constDecl6(use) {
-  use = "eval('(function() {" + use + "})')";
+  use = "eval('(function() {" + use + "})')()";
   return "(function() { { const constvar = 1; " + use + "; } })();";
 }
 
 
 function constDecl7(use) {
-  use = "eval('(function() {" + use + "})')";
+  use = "eval('(function() {" + use + "})')()";
   return "(function() { { " + use + "; const constvar = 1; } })();";
 }
 
 
 // Function expression name.
 function constDecl8(use) {
-  return "(function constvar() { " + use + "; });";
+  return "(function constvar() { " + use + "; })();";
 }
 
 
 // Function expression name, assign from eval.
 function constDecl9(use) {
-  use = "eval('(function(){" + use + "})')";
+  use = "eval('(function(){" + use + "})')()";
   return "(function constvar() { " + use + "; })();";
 }
 
@@ -104,6 +104,7 @@ let decls = [ constDecl0,
               constDecl8,
               constDecl9
               ];
+let declsForTDZ = new Set([constDecl1, constDecl3, constDecl5, constDecl7]);
 let uses = [ 'constvar = 1;',
              'constvar += 1;',
              '++constvar;',
@@ -116,7 +117,13 @@ function Test(d,u) {
     print(d(u));
     eval(d(u));
   } catch (e) {
-    assertInstanceof(e, SyntaxError);
+    if (declsForTDZ.has(d) && u !== uses[0]) {
+      // In these cases, read of a const variable occurs
+      // before a write to it, so TDZ kicks in before const check.
+      assertInstanceof(e, ReferenceError);
+      return;
+    }
+    assertInstanceof(e, TypeError);
     assertTrue(e.toString().indexOf("Assignment to constant variable") >= 0);
     return;
   }
diff --git a/deps/v8/test/mjsunit/harmony/block-non-strict-errors.js b/deps/v8/test/mjsunit/harmony/block-non-strict-errors.js
new file mode 100644 (file)
index 0000000..11fa5c6
--- /dev/null
@@ -0,0 +1,41 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-scoping --harmony-classes
+
+function CheckError(source) {
+  var exception = null;
+  try {
+    eval(source);
+  } catch (e) {
+    exception = e;
+  }
+  assertNotNull(exception);
+  assertEquals(
+      "Block-scoped declarations (let, const, function, class) not yet supported outside strict mode",
+      exception.message);
+}
+
+
+function CheckOk(source) {
+  eval(source);
+}
+
+CheckError("let x = 1;");
+CheckError("{ let x = 1; }");
+CheckError("function f() { let x = 1; }");
+CheckError("for (let x = 1; x < 1; x++) {}");
+CheckError("for (let x of []) {}");
+CheckError("for (let x in []) {}");
+CheckError("class C {}");
+CheckError("class C extends Array {}");
+CheckError("(class {});");
+CheckError("(class extends Array {});");
+CheckError("(class C {});");
+CheckError("(class C exends Array {});");
+
+CheckOk("let = 1;");
+CheckOk("{ let = 1; }");
+CheckOk("function f() { let = 1; }");
+CheckOk("for (let = 1; let < 1; let++) {}");
index 59371e4..29ffbf8 100644 (file)
@@ -2,7 +2,7 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-// Flags: --harmony-classes
+// Flags: --harmony-classes --harmony-sloppy
 
 (function TestBasics() {
   var C = class C {}
   assertEquals(Function.prototype, Object.getPrototypeOf(D));
   assertEquals('D', D.name);
 
+  class D2 { constructor() {} }
+  assertEquals('D2', D2.name);
+
+  // TODO(arv): The logic for the name of anonymous functions in ES6 requires
+  // the below to be 'E';
   var E = class {}
-  assertEquals('', E.name);
+  assertEquals('', E.name);  // Should be 'E'.
+
+  var F = class { constructor() {} };
+  assertEquals('', F.name);  // Should be 'F'.
 })();
 
 
   assertThrows('class C extends function B() { with ({}); return B; }() {}',
                SyntaxError);
 
+  var D = class extends function() {
+    arguments.caller;
+  } {};
+  assertThrows(function() {
+    Object.getPrototypeOf(D).arguments;
+  }, TypeError);
+  assertThrows(function() {
+    new D;
+  }, TypeError);
 })();
 
 
@@ -589,15 +606,274 @@ function assertAccessorDescriptor(object, name) {
 })();
 
 
-/* TODO(arv): Implement
-(function TestNameBindingInConstructor() {
+(function TestDefaultConstructorNoCrash() {
+  // Regression test for https://code.google.com/p/v8/issues/detail?id=3661
+  class C {}
+  assertEquals(undefined, C());
+  assertEquals(undefined, C(1));
+  assertTrue(new C() instanceof C);
+  assertTrue(new C(1) instanceof C);
+})();
+
+
+(function TestDefaultConstructor() {
+  var calls = 0;
+  class Base {
+    constructor() {
+      calls++;
+    }
+  }
+  class Derived extends Base {}
+  var object = new Derived;
+  assertEquals(1, calls);
+
+  calls = 0;
+  Derived();
+  assertEquals(1, calls);
+})();
+
+
+(function TestDefaultConstructorArguments() {
+  var args, self;
+  class Base {
+    constructor() {
+      self = this;
+      args = arguments;
+    }
+  }
+  class Derived extends Base {}
+
+  new Derived;
+  assertEquals(0, args.length);
+
+  new Derived(0, 1, 2);
+  assertEquals(3, args.length);
+  assertTrue(self instanceof Derived);
+
+  var arr = new Array(100);
+  var obj = {};
+  Derived.apply(obj, arr);
+  assertEquals(100, args.length);
+  assertEquals(obj, self);
+})();
+
+
+(function TestDefaultConstructorArguments2() {
+  var args;
+  class Base {
+    constructor(x, y) {
+      args = arguments;
+    }
+  }
+  class Derived extends Base {}
+
+  new Derived;
+  assertEquals(0, args.length);
+
+  new Derived(1);
+  assertEquals(1, args.length);
+  assertEquals(1, args[0]);
+
+  new Derived(1, 2, 3);
+  assertEquals(3, args.length);
+  assertEquals(1, args[0]);
+  assertEquals(2, args[1]);
+  assertEquals(3, args[2]);
+})();
+
+
+(function TestNameBindingConst() {
+  assertThrows('class C { constructor() { C = 42; } }; new C();', TypeError);
+  assertThrows('new (class C { constructor() { C = 42; } })', TypeError);
+  assertThrows('class C { m() { C = 42; } }; new C().m()', TypeError);
+  assertThrows('new (class C { m() { C = 42; } }).m()', TypeError);
+  assertThrows('class C { get x() { C = 42; } }; new C().x', TypeError);
+  assertThrows('(new (class C { get x() { C = 42; } })).x', TypeError);
+  assertThrows('class C { set x(_) { C = 42; } }; new C().x = 15;', TypeError);
+  assertThrows('(new (class C { set x(_) { C = 42; } })).x = 15;', TypeError);
+})();
+
+
+(function TestNameBinding() {
+  var C2;
   class C {
     constructor() {
-      assertThrows(function() {
-        C = 42;
-      }, ReferenceError);
+      C2 = C;
+    }
+    m() {
+      C2 = C;
+    }
+    get x() {
+      C2 = C;
+    }
+    set x(_) {
+      C2 = C;
+    }
+  }
+  new C();
+  assertEquals(C, C2);
+
+  C2 = undefined;
+  new C().m();
+  assertEquals(C, C2);
+
+  C2 = undefined;
+  new C().x;
+  assertEquals(C, C2);
+
+  C2 = undefined;
+  new C().x = 1;
+  assertEquals(C, C2);
+})();
+
+
+(function TestNameBindingExpression() {
+  var C3;
+  var C = class C2 {
+    constructor() {
+      assertEquals(C2, C);
+      C3 = C2;
+    }
+    m() {
+      assertEquals(C2, C);
+      C3 = C2;
+    }
+    get x() {
+      assertEquals(C2, C);
+      C3 = C2;
+    }
+    set x(_) {
+      assertEquals(C2, C);
+      C3 = C2;
     }
   }
   new C();
+  assertEquals(C, C3);
+
+  C3 = undefined;
+  new C().m();
+  assertEquals(C, C3);
+
+  C3 = undefined;
+  new C().x;
+  assertEquals(C, C3);
+
+  C3 = undefined;
+  new C().x = 1;
+  assertEquals(C, C3);
+})();
+
+
+(function TestNameBindingInExtendsExpression() {
+  assertThrows(function() {
+    class x extends x {}
+  }, ReferenceError);
+
+  assertThrows(function() {
+    (class x extends x {});
+  }, ReferenceError);
+
+  assertThrows(function() {
+    var x = (class x extends x {});
+  }, ReferenceError);
 })();
-*/
+
+
+(function TestSuperCallSyntacticRestriction() {
+  assertThrows(function() {
+    class C {
+      constructor() {
+        var y;
+        super();
+      }
+    }; new C();
+  }, TypeError);
+  assertThrows(function() {
+    class C {
+      constructor() {
+        super(this.x);
+      }
+    }; new C();
+  }, TypeError);
+  assertThrows(function() {
+    class C {
+      constructor() {
+        super(this);
+      }
+    }; new C();
+  }, TypeError);
+  assertThrows(function() {
+    class C {
+      constructor() {
+        super.method();
+        super(this);
+      }
+    }; new C();
+  }, TypeError);
+  assertThrows(function() {
+    class C {
+      constructor() {
+        super(super.method());
+      }
+    }; new C();
+  }, TypeError);
+  assertThrows(function() {
+    class C {
+      constructor() {
+        super(super());
+      }
+    }; new C();
+  }, TypeError);
+  assertThrows(function() {
+    class C {
+      constructor() {
+        super(1, 2, Object.getPrototypeOf(this));
+      }
+    }; new C();
+  }, TypeError);
+  assertThrows(function() {
+    class C {
+      constructor() {
+        { super(1, 2); }
+      }
+    }; new C();
+  }, TypeError);
+  assertThrows(function() {
+    class C {
+      constructor() {
+        if (1) super();
+      }
+    }; new C();
+  }, TypeError);
+
+  class C1 extends Object {
+    constructor() {
+      'use strict';
+      super();
+    }
+  };
+  new C1();
+
+  class C2 extends Object {
+    constructor() {
+      ; 'use strict';;;;;
+      super();
+    }
+  };
+  new C2();
+
+  class C3 extends Object {
+    constructor() {
+      ; 'use strict';;;;;
+      // This is a comment.
+      super();
+    }
+  };
+  new C3();
+
+  class C4 extends Object {
+    constructor() {
+      super(new super());
+    }
+  }; new C4();
+}());
index 2db4942..8180377 100644 (file)
@@ -73,7 +73,7 @@ function BeginTest(name) {
 // Check result of a test.
 function EndTest() {
   assertTrue(listener_called, "listerner not called for " + test_name);
-  assertNull(exception, test_name);
+  assertNull(exception, test_name, exception);
   end_test_count++;
 }
 
@@ -108,6 +108,7 @@ function CheckScopeChain(scopes, exec_state) {
     assertEquals(i, response.body.scopes[i].index);
     assertEquals(scopes[i], response.body.scopes[i].type);
     if (scopes[i] == debug.ScopeType.Local ||
+        scopes[i] == debug.ScopeType.Script ||
         scopes[i] == debug.ScopeType.Closure) {
       assertTrue(response.body.scopes[i].object.ref < 0);
     } else {
@@ -197,6 +198,7 @@ function local_block_1() {
 
 listener_delegate = function(exec_state) {
   CheckScopeChain([debug.ScopeType.Local,
+                   debug.ScopeType.Script,
                    debug.ScopeType.Global], exec_state);
   CheckScopeContent({}, 0, exec_state);
 };
@@ -215,6 +217,7 @@ function local_2(a) {
 
 listener_delegate = function(exec_state) {
   CheckScopeChain([debug.ScopeType.Local,
+                   debug.ScopeType.Script,
                    debug.ScopeType.Global], exec_state);
   CheckScopeContent({a:1}, 0, exec_state);
 };
@@ -232,6 +235,7 @@ function local_3(a) {
 
 listener_delegate = function(exec_state) {
   CheckScopeChain([debug.ScopeType.Local,
+                   debug.ScopeType.Script,
                    debug.ScopeType.Global], exec_state);
   CheckScopeContent({a:1,x:3}, 0, exec_state);
 };
@@ -250,6 +254,7 @@ function local_4(a, b) {
 
 listener_delegate = function(exec_state) {
   CheckScopeChain([debug.ScopeType.Local,
+                   debug.ScopeType.Script,
                    debug.ScopeType.Global], exec_state);
   CheckScopeContent({a:1,b:2,x:3,y:4}, 0, exec_state);
 };
@@ -270,6 +275,7 @@ function local_5(a) {
 listener_delegate = function(exec_state) {
   CheckScopeChain([debug.ScopeType.Block,
                    debug.ScopeType.Local,
+                   debug.ScopeType.Script,
                    debug.ScopeType.Global], exec_state);
   CheckScopeContent({x:5}, 0, exec_state);
   CheckScopeContent({a:1}, 1, exec_state);
@@ -292,6 +298,7 @@ function local_6(a) {
 listener_delegate = function(exec_state) {
   CheckScopeChain([debug.ScopeType.Block,
                    debug.ScopeType.Local,
+                   debug.ScopeType.Script,
                    debug.ScopeType.Global], exec_state);
   CheckScopeContent({x:6,y:7}, 0, exec_state);
   CheckScopeContent({a:1}, 1, exec_state);
@@ -315,6 +322,7 @@ function local_7(a) {
 listener_delegate = function(exec_state) {
   CheckScopeChain([debug.ScopeType.Block,
                    debug.ScopeType.Local,
+                   debug.ScopeType.Script,
                    debug.ScopeType.Global], exec_state);
   CheckScopeContent({x:8}, 0, exec_state);
   CheckScopeContent({a:1}, 1, exec_state);
@@ -344,6 +352,7 @@ listener_delegate = function(exec_state) {
   CheckScopeChain([debug.ScopeType.Local,
                    debug.ScopeType.Block,
                    debug.ScopeType.Closure,
+                   debug.ScopeType.Script,
                    debug.ScopeType.Global], exec_state);
   CheckScopeContent({}, 0, exec_state);
   CheckScopeContent({a:1,x:2,y:3}, 2, exec_state);
@@ -364,6 +373,7 @@ function for_loop_1() {
 listener_delegate = function(exec_state) {
   CheckScopeChain([debug.ScopeType.Block,
                    debug.ScopeType.Local,
+                   debug.ScopeType.Script,
                    debug.ScopeType.Global], exec_state);
   CheckScopeContent({x:'y'}, 0, exec_state);
   // The function scope contains a temporary iteration variable, but it is
@@ -389,6 +399,7 @@ listener_delegate = function(exec_state) {
   CheckScopeChain([debug.ScopeType.Block,
                    debug.ScopeType.Block,
                    debug.ScopeType.Local,
+                   debug.ScopeType.Script,
                    debug.ScopeType.Global], exec_state);
   CheckScopeContent({x:3}, 0, exec_state);
   CheckScopeContent({x:'y'}, 1, exec_state);
@@ -413,6 +424,7 @@ listener_delegate = function(exec_state) {
   CheckScopeChain([debug.ScopeType.Block,
                    debug.ScopeType.Block,
                    debug.ScopeType.Local,
+                   debug.ScopeType.Script,
                    debug.ScopeType.Global], exec_state);
   CheckScopeContent({x:3}, 0, exec_state);
   CheckScopeContent({x:3}, 1, exec_state);
@@ -437,6 +449,7 @@ listener_delegate = function(exec_state) {
                    debug.ScopeType.Block,
                    debug.ScopeType.Block,
                    debug.ScopeType.Local,
+                   debug.ScopeType.Script,
                    debug.ScopeType.Global], exec_state);
   CheckScopeContent({x:5}, 0, exec_state);
   CheckScopeContent({x:3}, 1, exec_state);
@@ -460,6 +473,7 @@ listener_delegate = function(exec_state) {
   CheckScopeChain([debug.ScopeType.Block,
                    debug.ScopeType.Block,
                    debug.ScopeType.Local,
+                   debug.ScopeType.Script,
                    debug.ScopeType.Global], exec_state);
   CheckScopeContent({x:3,y:5}, 0, exec_state);
   CheckScopeContent({x:3,y:5}, 1, exec_state);
@@ -467,3 +481,24 @@ listener_delegate = function(exec_state) {
 };
 for_loop_5();
 EndTest();
+
+
+// Uninitialized variables
+BeginTest("Uninitialized 1");
+
+function uninitialized_1() {
+  {
+    debugger;
+    let x = 1;
+  }
+}
+
+listener_delegate = function(exec_state) {
+  CheckScopeChain([debug.ScopeType.Block,
+                   debug.ScopeType.Local,
+                   debug.ScopeType.Script,
+                   debug.ScopeType.Global], exec_state);
+  CheckScopeContent({}, 0, exec_state);
+};
+uninitialized_1();
+EndTest();
index 16885d0..d133cc0 100644 (file)
@@ -67,3 +67,43 @@ assertEquals(1, result);
 Debug.clearBreakPoint(bp);
 // Get rid of the debug event listener.
 Debug.setListener(null);
+
+
+function f1() {
+  {
+    let i = 1;
+    debugger;
+    assertEquals(2, i);
+  }
+}
+
+function f2() {
+  {
+    let i = 1;
+    debugger;
+    assertEquals(2, i);
+    return function() { return i++; }
+  }
+}
+
+var exception;
+Debug.setListener(function (event, exec_state, event_data, data) {
+  try {
+    if (event == Debug.DebugEvent.Break) {
+      var frame = exec_state.frame();
+      assertEquals(1, frame.evaluate("i").value());
+      var allScopes = frame.allScopes();
+      assertEquals(1, allScopes[0].scopeObject().value().i);
+      allScopes[0].setVariableValue("i", 2);
+    }
+  } catch (e) {
+    exception = e;
+  }
+});
+
+exception = null;
+f1();
+assertEquals(null, exception, exception);
+exception = null;
+f2();
+assertEquals(null, exception, exception);
index 0113be6..1b380c2 100644 (file)
@@ -28,6 +28,7 @@
 // Flags: --expose-debug-as debug --harmony-scoping
 
 "use strict";
+let top_level_let = 255;
 
 // Get the Debug object exposed from the debug context global object.
 var Debug = debug.Debug;
@@ -50,7 +51,8 @@ var ScopeType = { Global: 0,
                   With: 2,
                   Closure: 3,
                   Catch: 4,
-                  Block: 5 };
+                  Block: 5,
+                  Script: 6};
 
 var f1 = (function F1(x) {
   function F2(y) {
@@ -72,12 +74,13 @@ var f1 = (function F1(x) {
 
 var mirror = Debug.MakeMirror(f1);
 
-assertEquals(4, mirror.scopeCount());
+assertEquals(5, mirror.scopeCount());
 
 CheckScope(mirror.scope(0), { a: 4, b: 5 }, ScopeType.Closure);
 CheckScope(mirror.scope(1), { z: 22, w: 5, v: "Capybara" }, ScopeType.Closure);
 CheckScope(mirror.scope(2), { x: 5 }, ScopeType.Closure);
-CheckScope(mirror.scope(3), {}, ScopeType.Global);
+CheckScope(mirror.scope(3), { top_level_let: 255 }, ScopeType.Script);
+CheckScope(mirror.scope(4), {}, ScopeType.Global);
 
 var f2 = (function() {
   var v1 = 3;
@@ -104,7 +107,7 @@ var f2 = (function() {
 
 var mirror = Debug.MakeMirror(f2);
 
-assertEquals(5, mirror.scopeCount());
+assertEquals(6, mirror.scopeCount());
 
 // Implementation artifact: l4 isn't used in closure, but still it is saved.
 CheckScope(mirror.scope(0), { l4: 11 }, ScopeType.Block);
@@ -112,4 +115,5 @@ CheckScope(mirror.scope(0), { l4: 11 }, ScopeType.Block);
 CheckScope(mirror.scope(1), { l3: 9 }, ScopeType.Block);
 CheckScope(mirror.scope(2), { l1: 6, l2: 7 }, ScopeType.Block);
 CheckScope(mirror.scope(3), { v1:3, l0: 0, v3: 5, v6: 11 }, ScopeType.Closure);
-CheckScope(mirror.scope(4), {}, ScopeType.Global);
+CheckScope(mirror.scope(4), { top_level_let: 255 }, ScopeType.Script);
+CheckScope(mirror.scope(5), {}, ScopeType.Global);
diff --git a/deps/v8/test/mjsunit/harmony/debug-step-into-class-extends.js b/deps/v8/test/mjsunit/harmony/debug-step-into-class-extends.js
new file mode 100644 (file)
index 0000000..ffc6fda
--- /dev/null
@@ -0,0 +1,42 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --harmony-classes
+
+'use strict';
+
+var Debug = debug.Debug
+
+var done = false;
+var stepCount = 0;
+
+function listener(event, execState, eventData, data) {
+  if (event == Debug.DebugEvent.Break) {
+    if (!done) {
+      execState.prepareStep(Debug.StepAction.StepInto);
+      var s = execState.frame().sourceLineText();
+      assertTrue(s.indexOf('// ' + stepCount + '.') !== -1);
+      stepCount++;
+    }
+  }
+};
+
+Debug.setListener(listener);
+
+function GetBase() {
+  var x = 1;   // 1.
+  var y = 2;   // 2.
+  done = true; // 3.
+  return null;
+}
+
+function f() {
+  class Derived extends GetBase() {} // 0.
+}
+
+var bp = Debug.setBreakPoint(f, 0);
+f();
+assertEquals(4, stepCount);
+
+Debug.setListener(null);
diff --git a/deps/v8/test/mjsunit/harmony/debug-step-into-constructor.js b/deps/v8/test/mjsunit/harmony/debug-step-into-constructor.js
new file mode 100644 (file)
index 0000000..dbef60f
--- /dev/null
@@ -0,0 +1,113 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --harmony-classes
+
+'use strict';
+
+var Debug = debug.Debug
+var done, stepCount;
+
+function listener(event, execState, eventData, data) {
+  if (event == Debug.DebugEvent.Break) {
+    if (!done) {
+      execState.prepareStep(Debug.StepAction.StepInto);
+      var s = execState.frame().sourceLineText();
+      assertTrue(s.indexOf('// ' + stepCount + '.') !== -1);
+      stepCount++;
+    }
+  }
+};
+
+Debug.setListener(listener);
+
+
+class Base {
+  constructor() {
+    var x = 1;   // 1.
+    var y = 2;   // 2.
+    done = true; // 3.
+  }
+}
+
+class Derived extends Base {}
+
+
+(function TestBreakPointInConstructor() {
+  done = false;
+  stepCount = 1;
+  var bp = Debug.setBreakPoint(Base, 0);
+
+  new Base();
+  assertEquals(4, stepCount);
+
+  Debug.clearBreakPoint(bp);
+})();
+
+
+(function TestDefaultConstructor() {
+  done = false;
+  stepCount = 1;
+
+  var bp = Debug.setBreakPoint(Base, 0);
+  new Derived();
+  assertEquals(4, stepCount);
+
+  Debug.clearBreakPoint(bp);
+})();
+
+
+(function TestStepInto() {
+  done = false;
+  stepCount = 0;
+
+  function f() {
+    new Derived();  // 0.
+  }
+
+  var bp = Debug.setBreakPoint(f, 0);
+  f();
+  assertEquals(4, stepCount);
+
+  Debug.clearBreakPoint(bp);
+})();
+
+
+(function TestExtraIndirection() {
+  done = false;
+  stepCount = 0;
+
+  class Derived2 extends Derived {}
+
+  function f() {
+    new Derived2();  // 0.
+  }
+
+  var bp = Debug.setBreakPoint(f, 0);
+  f();
+  assertEquals(4, stepCount);
+
+  Debug.clearBreakPoint(bp);
+})();
+
+
+(function TestBoundClass() {
+  done = false;
+  stepCount = 0;
+
+  var bound = Derived.bind(null);
+
+  function f() {
+    new bound();  // 0.
+  }
+
+  var bp = Debug.setBreakPoint(f, 0);
+  f();
+  assertEquals(4, stepCount);
+
+  Debug.clearBreakPoint(bp);
+})();
+
+
+Debug.setListener(null);
diff --git a/deps/v8/test/mjsunit/harmony/disable-harmony-string.js b/deps/v8/test/mjsunit/harmony/disable-harmony-string.js
new file mode 100644 (file)
index 0000000..0b88ae0
--- /dev/null
@@ -0,0 +1,7 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --noharmony-strings
+
+assertEquals(undefined, String.prototype.includes);
index 3c0f18c..3a5bc89 100644 (file)
@@ -109,7 +109,7 @@ module R {
   assertThrows(function() { l }, ReferenceError)
   assertThrows(function() { R.l }, ReferenceError)
 
-  assertThrows(function() { eval("c = -1") }, SyntaxError)
+  assertThrows(function() { eval("c = -1") }, TypeError)
   assertThrows(function() { R.c = -2 }, TypeError)
 
   // Initialize first bunch of variables.
@@ -129,7 +129,7 @@ module R {
   assertEquals(-4, R.v = -4)
   assertEquals(-3, l = -3)
   assertEquals(-4, R.l = -4)
-  assertThrows(function() { eval("c = -3") }, SyntaxError)
+  assertThrows(function() { eval("c = -3") }, TypeError)
   assertThrows(function() { R.c = -4 }, TypeError)
 
   assertEquals(-4, v)
diff --git a/deps/v8/test/mjsunit/harmony/object-literals-super.js b/deps/v8/test/mjsunit/harmony/object-literals-super.js
new file mode 100644 (file)
index 0000000..ec22b8a
--- /dev/null
@@ -0,0 +1,168 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-classes --allow-natives-syntax
+
+
+(function TestHomeObject() {
+  var object = {
+    method() {
+      return super.method();
+    },
+    get getter() {
+      return super.getter;
+    },
+    set setter(v) {
+      super.setter = v;
+    },
+    get accessor() {
+      return super.accessor;
+    },
+    set accessor(v) {
+      super.accessor = v;
+    },
+    property: function() {
+      super.property();
+    },
+    propertyWithParen: (function() {
+      super.property();
+    }),
+    propertyWithParens: ((function() {
+      super.property();
+    })),
+
+    methodNoSuper() {},
+    get getterNoSuper() {},
+    set setterNoSuper(v) {},
+    get accessorNoSuper() {},
+    set accessorNoSuper(v) {},
+    propertyNoSuper: function() {},
+    propertyWithParenNoSuper: (function() {}),
+    propertyWithParensNoSuper: ((function() {}))
+  };
+
+  assertEquals(object, object.method[%HomeObjectSymbol()]);
+  var desc = Object.getOwnPropertyDescriptor(object, 'getter');
+  assertEquals(object, desc.get[%HomeObjectSymbol()]);
+  desc = Object.getOwnPropertyDescriptor(object, 'setter');
+  assertEquals(object, desc.set[%HomeObjectSymbol()]);
+  desc = Object.getOwnPropertyDescriptor(object, 'accessor');
+  assertEquals(object, desc.get[%HomeObjectSymbol()]);
+  assertEquals(object, desc.set[%HomeObjectSymbol()]);
+  assertEquals(object, object.property[%HomeObjectSymbol()]);
+  assertEquals(object, object.propertyWithParen[%HomeObjectSymbol()]);
+  assertEquals(object, object.propertyWithParens[%HomeObjectSymbol()]);
+
+  assertEquals(undefined, object.methodNoSuper[%HomeObjectSymbol()]);
+  desc = Object.getOwnPropertyDescriptor(object, 'getterNoSuper');
+  assertEquals(undefined, desc.get[%HomeObjectSymbol()]);
+  desc = Object.getOwnPropertyDescriptor(object, 'setterNoSuper');
+  assertEquals(undefined, desc.set[%HomeObjectSymbol()]);
+  desc = Object.getOwnPropertyDescriptor(object, 'accessorNoSuper');
+  assertEquals(undefined, desc.get[%HomeObjectSymbol()]);
+  assertEquals(undefined, desc.set[%HomeObjectSymbol()]);
+  assertEquals(undefined, object.propertyNoSuper[%HomeObjectSymbol()]);
+  assertEquals(undefined, object.propertyWithParenNoSuper[%HomeObjectSymbol()]);
+  assertEquals(undefined,
+               object.propertyWithParensNoSuper[%HomeObjectSymbol()]);
+})();
+
+
+(function TestMethod() {
+  var object = {
+    __proto__: {
+      method(x) {
+        return 'proto' + x;
+      }
+    },
+    method(x) {
+      return super.method(x);
+    }
+  };
+  assertEquals('proto42', object.method(42));
+})();
+
+
+(function TestGetter() {
+  var object = {
+    __proto__: {
+      _x: 42,
+      get x() {
+        return 'proto' + this._x;
+      }
+    },
+    get x() {
+      return super.x;
+    }
+  };
+  assertEquals('proto42', object.x);
+})();
+
+
+(function TestSetter() {
+  var object = {
+    __proto__: {
+      _x: 0,
+      set x(v) {
+        return this._x = v;
+      }
+    },
+    set x(v) {
+      super.x = v;
+    }
+  };
+  assertEquals(1, object.x = 1);
+  assertEquals(1, object._x);
+  assertEquals(0, Object.getPrototypeOf(object)._x);
+})();
+
+
+(function TestMethodAsProperty() {
+  var object = {
+    __proto__: {
+      method: function(x) {
+        return 'proto' + x;
+      }
+    },
+    method: function(x) {
+      return super.method(x);
+    }
+  };
+  assertEquals('proto42', object.method(42));
+})();
+
+
+(function TestOptimized() {
+  // Object literals without any accessors get optimized.
+  var object = {
+    method() {
+      return super.toString;
+    }
+  };
+  assertEquals(Object.prototype.toString, object.method());
+})();
+
+
+(function TestConciseGenerator() {
+  var o = {
+    __proto__: {
+      m() {
+        return 42;
+      }
+    },
+    *g() {
+      yield super.m();
+    },
+    g2: function*() {
+      yield super.m() + 1;
+    },
+    g3: (function*() {
+      yield super.m() + 2;
+    })
+  };
+
+  assertEquals(42, o.g().next().value);
+  assertEquals(43, o.g2().next().value);
+  assertEquals(44, o.g3().next().value);
+})();
index 191bad3..8a03ef4 100644 (file)
@@ -74,12 +74,17 @@ function TestUseProxyAsUnscopables() {
   var calls = 0;
   var proxy = Proxy.create({
     has: function(key) {
-      calls++;
-      assertEquals('x', key);
-      return calls === 2;
+      assertUnreachable();
     },
     getPropertyDescriptor: function(key) {
-      assertUnreachable();
+      calls++;
+      assertEquals('x', key);
+      return {
+        value: calls === 2 ? true : undefined,
+        configurable: true,
+        enumerable: true,
+        writable: true,
+      };
     }
   });
 
@@ -107,12 +112,12 @@ function TestThrowInHasUnscopables() {
   var calls = 0;
   var proxy = Proxy.create({
     has: function(key) {
-      if (calls++ === 0) {
-        throw new CustomError();
-      }
       assertUnreachable();
     },
     getPropertyDescriptor: function(key) {
+      if (calls++ === 0) {
+        throw new CustomError();
+      }
       assertUnreachable();
     }
   });
diff --git a/deps/v8/test/mjsunit/harmony/regexp-flags.js b/deps/v8/test/mjsunit/harmony/regexp-flags.js
new file mode 100644 (file)
index 0000000..475fda4
--- /dev/null
@@ -0,0 +1,61 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-regexps
+
+RegExp.prototype.flags = 'setter should be undefined';
+
+assertEquals('', RegExp('').flags);
+assertEquals('', /./.flags);
+assertEquals('gimy', RegExp('', 'ygmi').flags);
+assertEquals('gimy', /foo/ymig.flags);
+
+// TODO(dslomov): When support for the `u` flag is added, uncomment the first
+// line below and remove the second line.
+//assertEquals(RegExp('', 'yumig').flags, 'gimuy');
+assertThrows(function() { RegExp('', 'yumig').flags; }, SyntaxError);
+
+var descriptor = Object.getOwnPropertyDescriptor(RegExp.prototype, 'flags');
+assertTrue(descriptor.configurable);
+assertFalse(descriptor.enumerable);
+assertInstanceof(descriptor.get, Function);
+assertEquals(undefined, descriptor.set);
+
+function testGenericFlags(object) {
+  return descriptor.get.call(object);
+}
+
+assertEquals('', testGenericFlags({}));
+assertEquals('i', testGenericFlags({ ignoreCase: true }));
+assertEquals('uy', testGenericFlags({ global: 0, sticky: 1, unicode: 1 }));
+assertEquals('m', testGenericFlags({ __proto__: { multiline: true } }));
+assertThrows(function() { testGenericFlags(); }, TypeError);
+assertThrows(function() { testGenericFlags(undefined); }, TypeError);
+assertThrows(function() { testGenericFlags(null); }, TypeError);
+assertThrows(function() { testGenericFlags(true); }, TypeError);
+assertThrows(function() { testGenericFlags(false); }, TypeError);
+assertThrows(function() { testGenericFlags(''); }, TypeError);
+assertThrows(function() { testGenericFlags(42); }, TypeError);
+
+var counter = 0;
+var map = {};
+var object = {
+  get global() {
+    map.g = counter++;
+  },
+  get ignoreCase() {
+    map.i = counter++;
+  },
+  get multiline() {
+    map.m = counter++;
+  },
+  get unicode() {
+    map.u = counter++;
+  },
+  get sticky() {
+    map.y = counter++;
+  }
+};
+testGenericFlags(object);
+assertEquals({ g: 0, i: 1, m: 2, u: 3, y: 4 }, map);
index 31c2e55..e2411d2 100644 (file)
@@ -27,5 +27,5 @@
 
 // Flags: --harmony-scoping
 
-assertThrows("'use strict'; (function f() { f = 123; })", SyntaxError);
-assertThrows("(function f() { 'use strict'; f = 123; })", SyntaxError);
+assertThrows("'use strict'; (function f() { f = 123; })()", TypeError);
+assertThrows("(function f() { 'use strict'; f = 123; })()", TypeError);
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-2858.js b/deps/v8/test/mjsunit/harmony/regress/regress-2858.js
new file mode 100644 (file)
index 0000000..4ce9478
--- /dev/null
@@ -0,0 +1,27 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-scoping
+"use strict";
+
+function f() {
+    var y = 1;
+    var q1;
+    var q;
+    var z = new Error();
+    try {
+        throw z;
+    } catch (y) {
+      assertTrue(z === y);
+      q1 = function() { return y; }
+      var y = 15;
+      q = function() { return y; }
+      assertSame(15, y);
+    }
+    assertSame(1, y);
+    assertSame(15, q1());
+    assertSame(15, q());
+}
+
+f();
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-3683.js b/deps/v8/test/mjsunit/harmony/regress/regress-3683.js
new file mode 100644 (file)
index 0000000..a00d82b
--- /dev/null
@@ -0,0 +1,84 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-scoping
+
+"use strict";
+
+// Simplest case
+var count = 0;
+for (let x = 0; x < 10;) {
+  x++;
+  count++;
+  continue;
+}
+assertEquals(10, count);
+
+// Labeled
+count = 0;
+label: for (let x = 0; x < 10;) {
+  while (true) {
+    x++;
+    count++;
+    continue label;
+  }
+}
+assertEquals(10, count);
+
+// Simple and labeled
+count = 0;
+label: for (let x = 0; x < 10;) {
+  x++;
+  count++;
+  continue label;
+}
+assertEquals(10, count);
+
+// Shadowing loop variable in same scope as continue
+count = 0;
+for (let x = 0; x < 10;) {
+  x++;
+  count++;
+  {
+    let x = "hello";
+    continue;
+  }
+}
+assertEquals(10, count);
+
+// Nested let-bound for loops, inner continue
+count = 0;
+for (let x = 0; x < 10;) {
+  x++;
+  for (let y = 0; y < 2;) {
+    y++;
+    count++;
+    continue;
+  }
+}
+assertEquals(20, count);
+
+// Nested let-bound for loops, outer continue
+count = 0;
+for (let x = 0; x < 10;) {
+  x++;
+  for (let y = 0; y < 2;) {
+    y++;
+    count++;
+  }
+  continue;
+}
+assertEquals(20, count);
+
+// Nested let-bound for loops, labeled continue
+count = 0;
+outer: for (let x = 0; x < 10;) {
+  x++;
+  for (let y = 0; y < 2;) {
+    y++;
+    count++;
+    if (y == 2) continue outer;
+  }
+}
+assertEquals(20, count);
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-3741.js b/deps/v8/test/mjsunit/harmony/regress/regress-3741.js
new file mode 100644 (file)
index 0000000..8a9dd9e
--- /dev/null
@@ -0,0 +1,26 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-scoping --allow-natives-syntax
+'use strict';
+function f24(deopt) {
+  let x = 1;
+  {
+    let x = 2;
+    {
+      let x = 3;
+      assertEquals(3, x);
+    }
+    deopt + 1;
+    assertEquals(2, x);
+  }
+  assertEquals(1, x);
+}
+
+
+for (var j = 0; j < 10; ++j) {
+  f24(12);
+}
+%OptimizeFunctionOnNextCall(f24);
+f24({});
diff --git a/deps/v8/test/mjsunit/harmony/regress/regress-3750.js b/deps/v8/test/mjsunit/harmony/regress/regress-3750.js
new file mode 100644 (file)
index 0000000..d1f21f9
--- /dev/null
@@ -0,0 +1,8 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+//
+// Flags: --harmony-classes
+'use strict';
+class Example { }
+Object.observe(Example.prototype, function(){});
diff --git a/deps/v8/test/mjsunit/harmony/string-contains.js b/deps/v8/test/mjsunit/harmony/string-contains.js
deleted file mode 100644 (file)
index b853ed9..0000000
+++ /dev/null
@@ -1,166 +0,0 @@
-// Copyright 2013 the V8 project authors. All rights reserved.
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-//     * Redistributions of source code must retain the above copyright
-//       notice, this list of conditions and the following disclaimer.
-//     * Redistributions in binary form must reproduce the above
-//       copyright notice, this list of conditions and the following
-//       disclaimer in the documentation and/or other materials provided
-//       with the distribution.
-//     * Neither the name of Google Inc. nor the names of its
-//       contributors may be used to endorse or promote products derived
-//       from this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// Flags: --harmony-strings
-
-assertEquals(1, String.prototype.contains.length);
-
-var reString = "asdf[a-z]+(asdf)?";
-assertTrue(reString.contains("[a-z]+"));
-assertTrue(reString.contains("(asdf)?"));
-
-// Random greek letters
-var twoByteString = "\u039a\u0391\u03a3\u03a3\u0395";
-
-// Test single char pattern
-assertTrue(twoByteString.contains("\u039a"), "Lamda");
-assertTrue(twoByteString.contains("\u0391"), "Alpha");
-assertTrue(twoByteString.contains("\u03a3"), "First Sigma");
-assertTrue(twoByteString.contains("\u03a3",3), "Second Sigma");
-assertTrue(twoByteString.contains("\u0395"), "Epsilon");
-assertFalse(twoByteString.contains("\u0392"), "Not beta");
-
-// Test multi-char pattern
-assertTrue(twoByteString.contains("\u039a\u0391"), "lambda Alpha");
-assertTrue(twoByteString.contains("\u0391\u03a3"), "Alpha Sigma");
-assertTrue(twoByteString.contains("\u03a3\u03a3"), "Sigma Sigma");
-assertTrue(twoByteString.contains("\u03a3\u0395"), "Sigma Epsilon");
-
-assertFalse(twoByteString.contains("\u0391\u03a3\u0395"),
-    "Not Alpha Sigma Epsilon");
-
-//single char pattern
-assertTrue(twoByteString.contains("\u0395"));
-
-assertThrows("String.prototype.contains.call(null, 'test')", TypeError);
-assertThrows("String.prototype.contains.call(null, null)", TypeError);
-assertThrows("String.prototype.contains.call(undefined, undefined)", TypeError);
-
-assertThrows("String.prototype.contains.apply(null, ['test'])", TypeError);
-assertThrows("String.prototype.contains.apply(null, [null])", TypeError);
-assertThrows("String.prototype.contains.apply(undefined, [undefined])", TypeError);
-
-var TEST_INPUT = [{
-  msg: "Empty string", val: ""
-}, {
-  msg: "Number 1234.34", val: 1234.34
-}, {
-  msg: "Integer number 0", val: 0
-}, {
-  msg: "Negative number -1", val: -1
-}, {
-  msg: "Boolean true", val: true
-}, {
-  msg: "Boolean false", val: false
-}, {
-  msg: "Empty array []", val: []
-}, {
-  msg: "Empty object {}", val: {}
-}, {
-  msg: "Array of size 3", val: new Array(3)
-}];
-
-var i = 0;
-var l = TEST_INPUT.length;
-
-for (; i < l; i++) {
-  var e = TEST_INPUT[i];
-  var v = e.val;
-  var s = String(v);
-  assertTrue(s.contains(v), e.msg);
-  assertTrue(String.prototype.contains.call(v, v), e.msg);
-  assertTrue(String.prototype.contains.apply(v, [v]), e.msg);
-}
-
-// Test cases found in FF
-assertTrue("abc".contains("a"));
-assertTrue("abc".contains("b"));
-assertTrue("abc".contains("abc"));
-assertTrue("abc".contains("bc"));
-assertFalse("abc".contains("d"));
-assertFalse("abc".contains("abcd"));
-assertFalse("abc".contains("ac"));
-assertTrue("abc".contains("abc", 0));
-assertTrue("abc".contains("bc", 0));
-assertFalse("abc".contains("de", 0));
-assertTrue("abc".contains("bc", 1));
-assertTrue("abc".contains("c", 1));
-assertFalse("abc".contains("a", 1));
-assertFalse("abc".contains("abc", 1));
-assertTrue("abc".contains("c", 2));
-assertFalse("abc".contains("d", 2));
-assertFalse("abc".contains("dcd", 2));
-assertFalse("abc".contains("a", 42));
-assertFalse("abc".contains("a", Infinity));
-assertTrue("abc".contains("ab", -43));
-assertFalse("abc".contains("cd", -42));
-assertTrue("abc".contains("ab", -Infinity));
-assertFalse("abc".contains("cd", -Infinity));
-assertTrue("abc".contains("ab", NaN));
-assertFalse("abc".contains("cd", NaN));
-assertFalse("xyzzy".contains("zy\0", 2));
-
-var dots = Array(10000).join(".");
-assertFalse(dots.contains("\x01", 10000));
-assertFalse(dots.contains("\0", 10000));
-
-var myobj = {
-  toString: function () {
-    return "abc";
-  },
-  contains: String.prototype.contains
-};
-assertTrue(myobj.contains("abc"));
-assertFalse(myobj.contains("cd"));
-
-var gotStr = false;
-var gotPos = false;
-myobj = {
-  toString: function () {
-    assertFalse(gotPos);
-    gotStr = true;
-    return "xyz";
-  },
-  contains: String.prototype.contains
-};
-
-assertEquals("foo[a-z]+(bar)?".contains("[a-z]+"), true);
-assertThrows("'foo[a-z]+(bar)?'.contains(/[a-z]+/)", TypeError);
-assertThrows("'foo/[a-z]+/(bar)?'.contains(/[a-z]+/)", TypeError);
-assertEquals("foo[a-z]+(bar)?".contains("(bar)?"), true);
-assertThrows("'foo[a-z]+(bar)?'.contains(/(bar)?/)", TypeError);
-assertThrows("'foo[a-z]+/(bar)?/'.contains(/(bar)?/)", TypeError);
-
-assertThrows("String.prototype.contains.call({ 'toString': function() { " +
-  "throw RangeError(); } }, /./)", RangeError);
-assertThrows("String.prototype.contains.call({ 'toString': function() { " +
-  "return 'abc'; } }, /./)", TypeError);
-
-assertThrows("String.prototype.contains.apply({ 'toString': function() { " +
-  "throw RangeError(); } }, [/./])", RangeError);
-assertThrows("String.prototype.contains.apply({ 'toString': function() { " +
-  "return 'abc'; } }, [/./])", TypeError);
diff --git a/deps/v8/test/mjsunit/harmony/string-includes.js b/deps/v8/test/mjsunit/harmony/string-includes.js
new file mode 100644 (file)
index 0000000..33ed8ea
--- /dev/null
@@ -0,0 +1,166 @@
+// Copyright 2013 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --harmony-strings
+
+assertEquals(1, String.prototype.includes.length);
+
+var reString = "asdf[a-z]+(asdf)?";
+assertTrue(reString.includes("[a-z]+"));
+assertTrue(reString.includes("(asdf)?"));
+
+// Random greek letters
+var twoByteString = "\u039a\u0391\u03a3\u03a3\u0395";
+
+// Test single char pattern
+assertTrue(twoByteString.includes("\u039a"), "Lamda");
+assertTrue(twoByteString.includes("\u0391"), "Alpha");
+assertTrue(twoByteString.includes("\u03a3"), "First Sigma");
+assertTrue(twoByteString.includes("\u03a3",3), "Second Sigma");
+assertTrue(twoByteString.includes("\u0395"), "Epsilon");
+assertFalse(twoByteString.includes("\u0392"), "Not beta");
+
+// Test multi-char pattern
+assertTrue(twoByteString.includes("\u039a\u0391"), "lambda Alpha");
+assertTrue(twoByteString.includes("\u0391\u03a3"), "Alpha Sigma");
+assertTrue(twoByteString.includes("\u03a3\u03a3"), "Sigma Sigma");
+assertTrue(twoByteString.includes("\u03a3\u0395"), "Sigma Epsilon");
+
+assertFalse(twoByteString.includes("\u0391\u03a3\u0395"),
+    "Not Alpha Sigma Epsilon");
+
+//single char pattern
+assertTrue(twoByteString.includes("\u0395"));
+
+assertThrows("String.prototype.includes.call(null, 'test')", TypeError);
+assertThrows("String.prototype.includes.call(null, null)", TypeError);
+assertThrows("String.prototype.includes.call(undefined, undefined)", TypeError);
+
+assertThrows("String.prototype.includes.apply(null, ['test'])", TypeError);
+assertThrows("String.prototype.includes.apply(null, [null])", TypeError);
+assertThrows("String.prototype.includes.apply(undefined, [undefined])", TypeError);
+
+var TEST_INPUT = [{
+  msg: "Empty string", val: ""
+}, {
+  msg: "Number 1234.34", val: 1234.34
+}, {
+  msg: "Integer number 0", val: 0
+}, {
+  msg: "Negative number -1", val: -1
+}, {
+  msg: "Boolean true", val: true
+}, {
+  msg: "Boolean false", val: false
+}, {
+  msg: "Empty array []", val: []
+}, {
+  msg: "Empty object {}", val: {}
+}, {
+  msg: "Array of size 3", val: new Array(3)
+}];
+
+var i = 0;
+var l = TEST_INPUT.length;
+
+for (; i < l; i++) {
+  var e = TEST_INPUT[i];
+  var v = e.val;
+  var s = String(v);
+  assertTrue(s.includes(v), e.msg);
+  assertTrue(String.prototype.includes.call(v, v), e.msg);
+  assertTrue(String.prototype.includes.apply(v, [v]), e.msg);
+}
+
+// Test cases found in FF
+assertTrue("abc".includes("a"));
+assertTrue("abc".includes("b"));
+assertTrue("abc".includes("abc"));
+assertTrue("abc".includes("bc"));
+assertFalse("abc".includes("d"));
+assertFalse("abc".includes("abcd"));
+assertFalse("abc".includes("ac"));
+assertTrue("abc".includes("abc", 0));
+assertTrue("abc".includes("bc", 0));
+assertFalse("abc".includes("de", 0));
+assertTrue("abc".includes("bc", 1));
+assertTrue("abc".includes("c", 1));
+assertFalse("abc".includes("a", 1));
+assertFalse("abc".includes("abc", 1));
+assertTrue("abc".includes("c", 2));
+assertFalse("abc".includes("d", 2));
+assertFalse("abc".includes("dcd", 2));
+assertFalse("abc".includes("a", 42));
+assertFalse("abc".includes("a", Infinity));
+assertTrue("abc".includes("ab", -43));
+assertFalse("abc".includes("cd", -42));
+assertTrue("abc".includes("ab", -Infinity));
+assertFalse("abc".includes("cd", -Infinity));
+assertTrue("abc".includes("ab", NaN));
+assertFalse("abc".includes("cd", NaN));
+assertFalse("xyzzy".includes("zy\0", 2));
+
+var dots = Array(10000).join(".");
+assertFalse(dots.includes("\x01", 10000));
+assertFalse(dots.includes("\0", 10000));
+
+var myobj = {
+  toString: function () {
+    return "abc";
+  },
+  includes: String.prototype.includes
+};
+assertTrue(myobj.includes("abc"));
+assertFalse(myobj.includes("cd"));
+
+var gotStr = false;
+var gotPos = false;
+myobj = {
+  toString: function () {
+    assertFalse(gotPos);
+    gotStr = true;
+    return "xyz";
+  },
+  includes: String.prototype.includes
+};
+
+assertEquals("foo[a-z]+(bar)?".includes("[a-z]+"), true);
+assertThrows("'foo[a-z]+(bar)?'.includes(/[a-z]+/)", TypeError);
+assertThrows("'foo/[a-z]+/(bar)?'.includes(/[a-z]+/)", TypeError);
+assertEquals("foo[a-z]+(bar)?".includes("(bar)?"), true);
+assertThrows("'foo[a-z]+(bar)?'.includes(/(bar)?/)", TypeError);
+assertThrows("'foo[a-z]+/(bar)?/'.includes(/(bar)?/)", TypeError);
+
+assertThrows("String.prototype.includes.call({ 'toString': function() { " +
+  "throw RangeError(); } }, /./)", RangeError);
+assertThrows("String.prototype.includes.call({ 'toString': function() { " +
+  "return 'abc'; } }, /./)", TypeError);
+
+assertThrows("String.prototype.includes.apply({ 'toString': function() { " +
+  "throw RangeError(); } }, [/./])", RangeError);
+assertThrows("String.prototype.includes.apply({ 'toString': function() { " +
+  "return 'abc'; } }, [/./])", TypeError);
diff --git a/deps/v8/test/mjsunit/harmony/string-raw.js b/deps/v8/test/mjsunit/harmony/string-raw.js
new file mode 100644 (file)
index 0000000..28e2af9
--- /dev/null
@@ -0,0 +1,258 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-templates
+
+(function testStringRawArity() {
+  assertEquals(1, String.raw.length);
+})();
+
+
+(function testStringRawCallSiteToObject() {
+  assertThrows("String.raw()", TypeError);
+})();
+
+
+(function testStringRawCallSiteRawToObject() {
+  assertThrows("String.raw([])", TypeError);
+})();
+
+
+(function testStringRawUndefinedLength() {
+  var callSiteObj = [];
+  callSiteObj.raw = {};
+  assertEquals("", String.raw(callSiteObj));
+
+  callSiteObj.raw = { lengt: 0 };
+  assertEquals("", String.raw(callSiteObj));
+})();
+
+
+(function testStringRawZeroLength() {
+  var callSiteObj = [];
+  callSiteObj.raw = { length: 0 };
+  assertEquals("", String.raw(callSiteObj));
+  assertEquals("", String.raw(callSiteObj, "a", "b", "c"));
+
+  callSiteObj.raw = [];
+  assertEquals("", String.raw(callSiteObj));
+  assertEquals("", String.raw(callSiteObj, "a", "b", "c"));
+})();
+
+
+(function testStringRawNegativeLength() {
+  var callSiteObj = [];
+  callSiteObj.raw = { length: -85 };
+  assertEquals("", String.raw(callSiteObj));
+  assertEquals("", String.raw(callSiteObj, "a", "b", "c"));
+
+  callSiteObj.raw = [];
+  assertEquals("", String.raw(callSiteObj));
+  assertEquals("", String.raw(callSiteObj, "a", "b", "c"));
+})();
+
+
+(function testStringRawNaNLength() {
+  var callSiteObj = [];
+  callSiteObj.raw = { length: NaN };
+  assertEquals("", String.raw(callSiteObj));
+  assertEquals("", String.raw(callSiteObj, "a", "b", "c"));
+
+  callSiteObj.raw = [];
+  assertEquals("", String.raw(callSiteObj));
+  assertEquals("", String.raw(callSiteObj, "a", "b", "c"));
+})();
+
+
+(function testStringRawBasic() {
+  var callSiteObj = [];
+  callSiteObj.raw = ["a"];
+  assertEquals("a", String.raw(callSiteObj));
+})();
+
+
+(function testStringRawNoSubst() {
+  var callSiteObj = [];
+  callSiteObj.raw = ["a", "b"];
+  assertEquals("ab", String.raw(callSiteObj));
+})();
+
+
+(function testStringRawSubst() {
+  var callSiteObj = [];
+  callSiteObj.raw = ["a", "b"];
+  assertEquals("a!b", String.raw(callSiteObj, "!"));
+
+  callSiteObj.raw = ["a", "b", "c"];
+  assertEquals("abc", String.raw(callSiteObj));
+
+  callSiteObj.raw = ["a", "b", "c"];
+  assertEquals("a!bc", String.raw(callSiteObj, "!"));
+
+  callSiteObj.raw = ["a", "b", "c"];
+  assertEquals("a!b?c", String.raw(callSiteObj, "!", "?"));
+
+  callSiteObj.raw = ["\n", "\r\n", "\r"];
+  assertEquals("\nx\r\ny\r", String.raw(callSiteObj, "x", "y"));
+
+  callSiteObj.raw = ["\n", "\r\n", "\r"];
+  assertEquals("\n\r\r\r\n\n\r", String.raw(callSiteObj, "\r\r", "\n"));
+})();
+
+
+(function testStringRawArrayLikeSubst() {
+  var callSiteObj = [];
+  callSiteObj.raw = {"length": 2, "0": "a", "1": "b", "2": "c"};
+  assertEquals("axb", String.raw(callSiteObj, "x", "y"));
+
+  callSiteObj.raw = {"length": 4, "0": "a", "1": "b", "2": "c"};
+  assertEquals("axbycundefined", String.raw(callSiteObj, "x", "y"));
+})();
+
+
+(function testStringRawAccessors() {
+  var callSiteObj = {};
+  callSiteObj.raw = {};
+  Object.defineProperties(callSiteObj, {
+    "length": {
+      get: function() { assertUnreachable(); },
+      set: function(v) { assertUnreachable(); }
+    },
+    "0": {
+      get: function() { assertUnreachable(); },
+      set: function(v) { assertUnreachable(); }
+    },
+    "1": {
+      get: function() { assertUnreachable(); },
+      set: function(v) { assertUnreachable(); }
+    }
+  });
+  Object.defineProperties(callSiteObj.raw, {
+    "length": {
+      get: function() { return 2; },
+      set: function(v) { assertUnreachable(); }
+    },
+    "0": {
+      get: function() { return "getter values"; },
+      set: function(v) { assertUnreachable(); }
+    },
+    "1": {
+      get: function() { return "are nice"; },
+      set: function(v) { assertUnreachable(); }
+    }
+  });
+  assertEquals("getter values are nice", String.raw(callSiteObj, " "));
+})();
+
+
+(function testStringRawHoleyArray() {
+  var callSiteObj = [];
+  callSiteObj.raw = ["1."];
+  callSiteObj.raw[2] = ".2";
+  assertEquals("1.undefined.2", String.raw(callSiteObj));
+})();
+
+
+(function testStringRawAccessorThrows() {
+  var callSiteObj = [];
+  callSiteObj.raw = [1];
+  function MyError() {}
+  Object.defineProperty(callSiteObj.raw, "0", {
+    get: function() { throw new MyError(); }
+  });
+  assertThrows(function() { String.raw(callSiteObj); }, MyError);
+})();
+
+
+(function testStringRawToStringSafe() {
+  var callSiteObj = [];
+  callSiteObj.raw = [null, undefined, 1, "str", true, false, NaN, Infinity, {}];
+  assertEquals("nullundefined1strtruefalseNaNInfinity[object Object]",
+               String.raw(callSiteObj));
+
+  callSiteObj.raw = ["0", "1", "2", "3", "4", "5", "6", "7", "8", "9"];
+  assertEquals("0null1undefined213str4true5false6NaN7Infinity8[object Object]9",
+               String.raw(callSiteObj, null, void 0, 1, "str", true, false,
+               NaN, Infinity, {}));
+})();
+
+
+(function testStringRawToStringSymbolThrows() {
+  var callSiteObj = [];
+  callSiteObj.raw = [Symbol("foo")];
+  assertThrows(function() {
+    String.raw(callSiteObj);
+  }, TypeError);
+
+  callSiteObj.raw = ["1", "2"];
+  assertThrows(function() {
+    String.raw(callSiteObj, Symbol("foo"));
+  }, TypeError);
+})();
+
+
+(function testStringRawToStringThrows() {
+  var callSiteObj = [];
+  var thrower = {};
+  function MyError() {}
+  thrower.toString = function() {
+    throw new MyError();
+  }
+
+  callSiteObj.raw = [thrower];
+  assertThrows(function() {
+    String.raw(callSiteObj);
+  }, MyError);
+
+  callSiteObj.raw = ["1", "2"];
+  assertThrows(function() {
+    String.raw(callSiteObj, thrower);
+  }, MyError);
+})();
+
+
+(function testStringRawToStringValueOfThrows() {
+  var callSiteObj = [];
+  var thrower = {};
+  function MyError() {}
+  thrower.toString = null;
+  thrower.valueOf = function() {
+    throw new MyError();
+  }
+
+  callSiteObj.raw = [thrower];
+  assertThrows(function() {
+    String.raw(callSiteObj);
+  }, MyError);
+
+  callSiteObj.raw = ["1", "2"];
+  assertThrows(function() {
+    String.raw(callSiteObj, thrower);
+  }, MyError);
+})();
+
+
+(function testStringRawOrder() {
+  var order = [];
+  var callSiteObj = [];
+  callSiteObj.raw = {};
+  function arg(v) {
+    var result = {};
+    result.toString = null;
+    result.valueOf = function() { order.push("arg" + v); return v; }
+    return result;
+  }
+
+  Object.defineProperty(callSiteObj.raw, "length", {
+    get: function() { order.push("length"); return 3; }
+  });
+  [1, 3, 5].forEach(function(v, i) {
+    Object.defineProperty(callSiteObj.raw, i, {
+      get: function() { order.push("raw" + v); return v; }
+    });
+  });
+
+  assertEquals("12345", String.raw(callSiteObj, arg(2), arg(4), arg(6)));
+  assertEquals(["length", "raw1", "arg2", "raw3", "arg4", "raw5"], order);
+})();
index d972407..6dcc393 100644 (file)
@@ -1861,3 +1861,61 @@ function Subclass(base, constructor) {
   T1.__proto = null;
   assertThrows(function() { new T1(); }, TypeError);
 }());
+
+
+(function TestSuperCallSyntacticRestriction() {
+  assertThrows(function() {
+    function C() {
+        var y;
+        super();
+    }
+    new C();
+  }, TypeError);
+  assertThrows(function() {
+    function C() {
+      super(this.x);
+    }
+    new C();
+  }, TypeError);
+  assertThrows(function() {
+    function C() {
+      super(this);
+    }
+    new C();
+  }, TypeError);
+  assertThrows(function() {
+    function C() {
+      super(1, 2, Object.getPrototypeOf(this));
+    }
+    new C();
+  }, TypeError);
+  assertThrows(function() {
+    function C() {
+      { super(1, 2); }
+    }; new C();
+  }, TypeError);
+  assertThrows(function() {
+    function C() {
+      if (1) super();
+    }; new C();
+  }, TypeError);
+
+  function C1() {
+    'use strict';
+    super();
+  };
+  new C1();
+
+  function C2() {
+    ; 'use strict';;;;;
+    super();
+  };
+  new C2();
+
+  function C3() {
+    ; 'use strict';;;;;
+    // This is a comment.
+    super();
+  }
+  new C3();
+}());
diff --git a/deps/v8/test/mjsunit/harmony/templates.js b/deps/v8/test/mjsunit/harmony/templates.js
new file mode 100644 (file)
index 0000000..c339bb8
--- /dev/null
@@ -0,0 +1,507 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --harmony-templates --harmony-unicode
+
+var num = 5;
+var str = "str";
+function fn() { return "result"; }
+var obj = {
+  num: num,
+  str: str,
+  fn: function() { return "result"; }
+};
+
+(function testBasicExpressions() {
+  assertEquals("foo 5 bar", `foo ${num} bar`);
+  assertEquals("foo str bar", `foo ${str} bar`);
+  assertEquals("foo [object Object] bar", `foo ${obj} bar`);
+  assertEquals("foo result bar", `foo ${fn()} bar`);
+  assertEquals("foo 5 bar", `foo ${obj.num} bar`);
+  assertEquals("foo str bar", `foo ${obj.str} bar`);
+  assertEquals("foo result bar", `foo ${obj.fn()} bar`);
+})();
+
+(function testExpressionsContainingTemplates() {
+  assertEquals("foo bar 5", `foo ${`bar ${num}`}`);
+})();
+
+(function testMultilineTemplates() {
+  assertEquals("foo\n    bar\n    baz", `foo
+    bar
+    baz`);
+
+  assertEquals("foo\n  bar\n  baz", eval("`foo\r\n  bar\r  baz`"));
+})();
+
+(function testLineContinuation() {
+  assertEquals("\n", `\
+
+`);
+})();
+
+(function testTaggedTemplates() {
+  var calls = 0;
+  (function(s) {
+    calls++;
+  })`test`;
+  assertEquals(1, calls);
+
+  calls = 0;
+  // assert tag is invoked in right context
+  obj = {
+    fn: function() {
+      calls++;
+      assertEquals(obj, this);
+    }
+  };
+
+  obj.fn`test`;
+  assertEquals(1, calls);
+
+  calls = 0;
+  // Simple templates only have a callSiteObj
+  (function(s) {
+    calls++;
+    assertEquals(1, arguments.length);
+  })`test`;
+  assertEquals(1, calls);
+
+  // Templates containing expressions have the values of evaluated expressions
+  calls = 0;
+  (function(site, n, s, o, f, r) {
+    calls++;
+    assertEquals(6, arguments.length);
+    assertEquals("number", typeof n);
+    assertEquals("string", typeof s);
+    assertEquals("object", typeof o);
+    assertEquals("function", typeof f);
+    assertEquals("result", r);
+  })`${num}${str}${obj}${fn}${fn()}`;
+  assertEquals(1, calls);
+
+  // The TV and TRV of NoSubstitutionTemplate :: `` is the empty code unit
+  // sequence.
+  calls = 0;
+  (function(s) {
+    calls++;
+    assertEquals(1, s.length);
+    assertEquals(1, s.raw.length);
+    assertEquals("", s[0]);
+
+    // Failure: expected <""> found <"foo  barfoo  barfoo foo foo foo testtest">
+    assertEquals("", s.raw[0]);
+  })``;
+  assertEquals(1, calls);
+
+  // The TV and TRV of TemplateHead :: `${ is the empty code unit sequence.
+  calls = 0;
+  (function(s) {
+    calls++;
+    assertEquals(2, s.length);
+    assertEquals(2, s.raw.length);
+    assertEquals("", s[0]);
+    assertEquals("", s.raw[0]);
+  })`${1}`;
+  assertEquals(1, calls);
+
+  // The TV and TRV of TemplateMiddle :: }${ is the empty code unit sequence.
+  calls = 0;
+  (function(s) {
+    calls++;
+    assertEquals(3, s.length);
+    assertEquals(3, s.raw.length);
+    assertEquals("", s[1]);
+    assertEquals("", s.raw[1]);
+  })`${1}${2}`;
+  assertEquals(1, calls);
+
+  // The TV and TRV of TemplateTail :: }` is the empty code unit sequence.
+  calls = 0;
+  (function(s) {
+    calls++;
+    assertEquals(2, s.length);
+    assertEquals(2, s.raw.length);
+    assertEquals("", s[1]);
+    assertEquals("", s.raw[1]);
+  })`${1}`;
+  assertEquals(1, calls);
+
+  // The TV of NoSubstitutionTemplate :: ` TemplateCharacters ` is the TV of
+  // TemplateCharacters.
+  calls = 0;
+  (function(s) { calls++; assertEquals("foo", s[0]); })`foo`;
+  assertEquals(1, calls);
+
+  // The TV of TemplateHead :: ` TemplateCharacters ${ is the TV of
+  // TemplateCharacters.
+  calls = 0;
+  (function(s) { calls++; assertEquals("foo", s[0]); })`foo${1}`;
+  assertEquals(1, calls);
+
+  // The TV of TemplateMiddle :: } TemplateCharacters ${ is the TV of
+  // TemplateCharacters.
+  calls = 0;
+  (function(s) { calls++; assertEquals("foo", s[1]); })`${1}foo${2}`;
+  assertEquals(1, calls);
+
+  // The TV of TemplateTail :: } TemplateCharacters ` is the TV of
+  // TemplateCharacters.
+  calls = 0;
+  (function(s) { calls++; assertEquals("foo", s[1]); })`${1}foo`;
+  assertEquals(1, calls);
+
+  // The TV of TemplateCharacters :: TemplateCharacter is the TV of
+  // TemplateCharacter.
+  calls = 0;
+  (function(s) { calls++; assertEquals("f", s[0]); })`f`;
+  assertEquals(1, calls);
+
+  // The TV of TemplateCharacter :: $ is the code unit value 0x0024.
+  calls = 0;
+  (function(s) { calls++; assertEquals("$", s[0]); })`$`;
+  assertEquals(1, calls);
+
+  // The TV of TemplateCharacter :: \ EscapeSequence is the CV of
+  // EscapeSequence.
+  calls = 0;
+  (function(s) { calls++; assertEquals("안녕", s[0]); })`\uc548\uB155`;
+  (function(s) { calls++; assertEquals("\xff", s[0]); })`\xff`;
+  (function(s) { calls++; assertEquals("\n", s[0]); })`\n`;
+  assertEquals(3, calls);
+
+  // The TV of TemplateCharacter :: LineContinuation is the TV of
+  // LineContinuation. The TV of LineContinuation :: \ LineTerminatorSequence is
+  // the empty code unit sequence.
+  calls = 0;
+  (function(s) { calls++; assertEquals("", s[0]); })`\
+`;
+  assertEquals(1, calls);
+
+  // The TRV of NoSubstitutionTemplate :: ` TemplateCharacters ` is the TRV of
+  // TemplateCharacters.
+  calls = 0;
+  (function(s) { calls++; assertEquals("test", s.raw[0]); })`test`;
+  assertEquals(1, calls);
+
+  // The TRV of TemplateHead :: ` TemplateCharacters ${ is the TRV of
+  // TemplateCharacters.
+  calls = 0;
+  (function(s) { calls++; assertEquals("test", s.raw[0]); })`test${1}`;
+  assertEquals(1, calls);
+
+  // The TRV of TemplateMiddle :: } TemplateCharacters ${ is the TRV of
+  // TemplateCharacters.
+  calls = 0;
+  (function(s) { calls++; assertEquals("test", s.raw[1]); })`${1}test${2}`;
+  assertEquals(1, calls);
+
+  // The TRV of TemplateTail :: } TemplateCharacters ` is the TRV of
+  // TemplateCharacters.
+  calls = 0;
+  (function(s) { calls++; assertEquals("test", s.raw[1]); })`${1}test`;
+  assertEquals(1, calls);
+
+  // The TRV of TemplateCharacters :: TemplateCharacter is the TRV of
+  // TemplateCharacter.
+  calls = 0;
+  (function(s) { calls++; assertEquals("f", s.raw[0]); })`f`;
+  assertEquals(1, calls);
+
+  // The TRV of TemplateCharacter :: $ is the code unit value 0x0024.
+  calls = 0;
+  (function(s) { calls++; assertEquals("\u0024", s.raw[0]); })`$`;
+  assertEquals(1, calls);
+
+  // The TRV of EscapeSequence :: 0 is the code unit value 0x0030.
+  calls = 0;
+  (function(s) { calls++; assertEquals("\u005C\u0030", s.raw[0]); })`\0`;
+  assertEquals(1, calls);
+
+  // The TRV of TemplateCharacter :: \ EscapeSequence is the sequence consisting
+  // of the code unit value 0x005C followed by the code units of TRV of
+  // EscapeSequence.
+
+  //   The TRV of EscapeSequence :: HexEscapeSequence is the TRV of the
+  //   HexEscapeSequence.
+  calls = 0;
+  (function(s) { calls++; assertEquals("\u005Cxff", s.raw[0]); })`\xff`;
+  assertEquals(1, calls);
+
+  //   The TRV of EscapeSequence :: UnicodeEscapeSequence is the TRV of the
+  //   UnicodeEscapeSequence.
+  calls = 0;
+  (function(s) { calls++; assertEquals("\u005Cuc548", s.raw[0]); })`\uc548`;
+  assertEquals(1, calls);
+
+  //   The TRV of CharacterEscapeSequence :: SingleEscapeCharacter is the TRV of
+  //   the SingleEscapeCharacter.
+  calls = 0;
+  (function(s) { calls++; assertEquals("\u005C\u0027", s.raw[0]); })`\'`;
+  (function(s) { calls++; assertEquals("\u005C\u0022", s.raw[0]); })`\"`;
+  (function(s) { calls++; assertEquals("\u005C\u005C", s.raw[0]); })`\\`;
+  (function(s) { calls++; assertEquals("\u005Cb", s.raw[0]); })`\b`;
+  (function(s) { calls++; assertEquals("\u005Cf", s.raw[0]); })`\f`;
+  (function(s) { calls++; assertEquals("\u005Cn", s.raw[0]); })`\n`;
+  (function(s) { calls++; assertEquals("\u005Cr", s.raw[0]); })`\r`;
+  (function(s) { calls++; assertEquals("\u005Ct", s.raw[0]); })`\t`;
+  (function(s) { calls++; assertEquals("\u005Cv", s.raw[0]); })`\v`;
+  (function(s) { calls++; assertEquals("\u005C`", s.raw[0]); })`\``;
+  assertEquals(10, calls);
+
+  //   The TRV of CharacterEscapeSequence :: NonEscapeCharacter is the CV of the
+  //   NonEscapeCharacter.
+  calls = 0;
+  (function(s) { calls++; assertEquals("\u005Cz", s.raw[0]); })`\z`;
+  assertEquals(1, calls);
+
+  // The TRV of LineTerminatorSequence :: <LF> is the code unit value 0x000A.
+  // The TRV of LineTerminatorSequence :: <CR> is the code unit value 0x000A.
+  // The TRV of LineTerminatorSequence :: <CR><LF> is the sequence consisting of
+  // the code unit value 0x000A.
+  calls = 0;
+  function testRawLineNormalization(cs) {
+    calls++;
+    assertEquals(cs.raw[0], "\n\n\n");
+    assertEquals(cs.raw[1], "\n\n\n");
+  }
+  eval("testRawLineNormalization`\r\n\n\r${1}\r\n\n\r`");
+  assertEquals(1, calls);
+
+  // The TRV of LineContinuation :: \ LineTerminatorSequence is the sequence
+  // consisting of the code unit value 0x005C followed by the code units of TRV
+  // of LineTerminatorSequence.
+  calls = 0;
+  function testRawLineContinuation(cs) {
+    calls++;
+    assertEquals(cs.raw[0], "\u005C\n\u005C\n\u005C\n");
+    assertEquals(cs.raw[1], "\u005C\n\u005C\n\u005C\n");
+  }
+  eval("testRawLineContinuation`\\\r\n\\\n\\\r${1}\\\r\n\\\n\\\r`");
+  assertEquals(1, calls);
+})();
+
+
+(function testCallSiteObj() {
+  var calls = 0;
+  function tag(cs) {
+    calls++;
+    assertTrue(cs.hasOwnProperty("raw"));
+    assertTrue(Object.isFrozen(cs));
+    assertTrue(Object.isFrozen(cs.raw));
+    var raw = Object.getOwnPropertyDescriptor(cs, "raw");
+    assertFalse(raw.writable);
+    assertFalse(raw.configurable);
+    assertFalse(raw.enumerable);
+    assertEquals(Array.prototype, Object.getPrototypeOf(cs.raw));
+    assertTrue(Array.isArray(cs.raw));
+    assertEquals(Array.prototype, Object.getPrototypeOf(cs));
+    assertTrue(Array.isArray(cs));
+
+    var cooked0 = Object.getOwnPropertyDescriptor(cs, "0");
+    assertFalse(cooked0.writable);
+    assertFalse(cooked0.configurable);
+    assertTrue(cooked0.enumerable);
+
+    var raw0 = Object.getOwnPropertyDescriptor(cs.raw, "0");
+    assertFalse(cooked0.writable);
+    assertFalse(cooked0.configurable);
+    assertTrue(cooked0.enumerable);
+
+    var length = Object.getOwnPropertyDescriptor(cs, "length");
+    assertFalse(length.writable);
+    assertFalse(length.configurable);
+    assertFalse(length.enumerable);
+
+    length = Object.getOwnPropertyDescriptor(cs.raw, "length");
+    assertFalse(length.writable);
+    assertFalse(length.configurable);
+    assertFalse(length.enumerable);
+  }
+  tag`${1}`;
+  assertEquals(1, calls);
+})();
+
+
+(function testUTF16ByteOrderMark() {
+  assertEquals("\uFEFFtest", `\uFEFFtest`);
+  assertEquals("\uFEFFtest", eval("`\uFEFFtest`"));
+})();
+
+
+(function testStringRawAsTagFn() {
+  assertEquals("\\u0065\\`\\r\\r\\n\\ntestcheck",
+               String.raw`\u0065\`\r\r\n\n${"test"}check`);
+  assertEquals("\\\n\\\n\\\n", eval("String.raw`\\\r\\\r\n\\\n`"));
+  assertEquals("", String.raw``);
+})();
+
+
+(function testCallSiteCaching() {
+  var callSites = [];
+  function tag(cs) { callSites.push(cs); }
+  var a = 1;
+  var b = 2;
+
+  tag`head${a}tail`;
+  tag`head${b}tail`;
+
+  assertEquals(2, callSites.length);
+  assertSame(callSites[0], callSites[1]);
+
+  eval("tag`head${a}tail`");
+  assertEquals(3, callSites.length);
+  assertSame(callSites[1], callSites[2]);
+
+  eval("tag`head${b}tail`");
+  assertEquals(4, callSites.length);
+  assertSame(callSites[2], callSites[3]);
+
+  (new Function("tag", "a", "b", "return tag`head${a}tail`;"))(tag, 1, 2);
+  assertEquals(5, callSites.length);
+  assertSame(callSites[3], callSites[4]);
+
+  (new Function("tag", "a", "b", "return tag`head${b}tail`;"))(tag, 1, 2);
+  assertEquals(6, callSites.length);
+  assertSame(callSites[4], callSites[5]);
+
+  callSites = [];
+
+  tag`foo${a}bar`;
+  tag`foo\${.}bar`;
+  assertEquals(2, callSites.length);
+  assertEquals(2, callSites[0].length);
+  assertEquals(1, callSites[1].length);
+
+  callSites = [];
+
+  eval("tag`\\\r\n\\\n\\\r`");
+  eval("tag`\\\r\n\\\n\\\r`");
+  assertEquals(2, callSites.length);
+  assertSame(callSites[0], callSites[1]);
+  assertEquals("", callSites[0][0]);
+  assertEquals("\\\n\\\n\\\n", callSites[0].raw[0]);
+
+  callSites = [];
+
+  tag`\uc548\ub155`;
+  tag`\uc548\ub155`;
+  assertEquals(2, callSites.length);
+  assertSame(callSites[0], callSites[1]);
+  assertEquals("안녕", callSites[0][0]);
+  assertEquals("\\uc548\\ub155", callSites[0].raw[0]);
+
+  callSites = [];
+
+  tag`\uc548\ub155`;
+  tag`안녕`;
+  assertEquals(2, callSites.length);
+  assertTrue(callSites[0] !== callSites[1]);
+  assertEquals("안녕", callSites[0][0]);
+  assertEquals("\\uc548\\ub155", callSites[0].raw[0]);
+  assertEquals("안녕", callSites[1][0]);
+  assertEquals("안녕", callSites[1].raw[0]);
+
+  // Extra-thorough UTF8 decoding test.
+  callSites = [];
+
+  tag`Iñtërnâtiônàlizætiøn\u2603\uD83D\uDCA9`;
+  tag`Iñtërnâtiônàlizætiøn☃💩`;
+
+  assertEquals(2, callSites.length);
+  assertTrue(callSites[0] !== callSites[1]);
+  assertEquals("Iñtërnâtiônàlizætiøn☃💩", callSites[0][0]);
+  assertEquals(
+      "Iñtërnâtiônàlizætiøn\\u2603\\uD83D\\uDCA9", callSites[0].raw[0]);
+  assertEquals("Iñtërnâtiônàlizætiøn☃💩", callSites[1][0]);
+  assertEquals("Iñtërnâtiônàlizætiøn☃💩", callSites[1].raw[0]);
+})();
+
+
+(function testExtendedArrayPrototype() {
+  Object.defineProperty(Array.prototype, 0, {
+    set: function() {
+      assertUnreachable();
+    }
+  });
+  function tag(){}
+  tag`a${1}b`;
+})();
+
+
+(function testRawLineNormalization() {
+  function raw0(callSiteObj) {
+    return callSiteObj.raw[0];
+  }
+  assertEquals(eval("raw0`\r`"), "\n");
+  assertEquals(eval("raw0`\r\n`"), "\n");
+  assertEquals(eval("raw0`\r\r\n`"), "\n\n");
+  assertEquals(eval("raw0`\r\n\r\n`"), "\n\n");
+  assertEquals(eval("raw0`\r\r\r\n`"), "\n\n\n");
+})();
+
+
+(function testHarmonyUnicode() {
+  function raw0(callSiteObj) {
+    return callSiteObj.raw[0];
+  }
+  assertEquals(raw0`a\u{62}c`, "a\\u{62}c");
+  assertEquals(raw0`a\u{000062}c`, "a\\u{000062}c");
+  assertEquals(raw0`a\u{0}c`, "a\\u{0}c");
+
+  assertEquals(`a\u{62}c`, "abc");
+  assertEquals(`a\u{000062}c`, "abc");
+})();
+
+
+(function testLiteralAfterRightBrace() {
+  // Regression test for https://code.google.com/p/v8/issues/detail?id=3734
+  function f() {}
+  `abc`;
+
+  function g() {}`def`;
+
+  {
+    // block
+  }
+  `ghi`;
+
+  {
+    // block
+  }`jkl`;
+})();
+
+
+(function testLegacyOctal() {
+  assertEquals('\u0000', `\0`);
+  assertEquals('\u0000a', `\0a`);
+  for (var i = 0; i < 8; i++) {
+    var code = "`\\0" + i + "`";
+    assertThrows(code, SyntaxError);
+    code = "(function(){})" + code;
+    assertThrows(code, SyntaxError);
+  }
+
+  assertEquals('\\0', String.raw`\0`);
+})();
+
+
+(function testSyntaxErrorsNonEscapeCharacter() {
+  assertThrows("`\\x`", SyntaxError);
+  assertThrows("`\\u`", SyntaxError);
+  for (var i = 1; i < 8; i++) {
+    var code = "`\\" + i + "`";
+    assertThrows(code, SyntaxError);
+    code = "(function(){})" + code;
+    assertThrows(code, SyntaxError);
+  }
+})();
+
+
+(function testValidNumericEscapes() {
+  assertEquals("8", `\8`);
+  assertEquals("9", `\9`);
+  assertEquals("\u00008", `\08`);
+  assertEquals("\u00009", `\09`);
+})();
diff --git a/deps/v8/test/mjsunit/harmony/typedarrays-of.js b/deps/v8/test/mjsunit/harmony/typedarrays-of.js
new file mode 100644 (file)
index 0000000..9df1d30
--- /dev/null
@@ -0,0 +1,135 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Based on Mozilla Array.of() tests at http://dxr.mozilla.org/mozilla-central/source/js/src/jit-test/tests/collections
+
+// Flags: --harmony-arrays
+
+var typedArrayConstructors = [
+  Uint8Array,
+  Int8Array,
+  Uint16Array,
+  Int16Array,
+  Uint32Array,
+  Int32Array,
+  Uint8ClampedArray,
+  Float32Array,
+  Float64Array];
+
+
+function TestTypedArrayOf(constructor) {
+  // %TypedArray%.of basics.
+  var a = constructor.of();
+  assertEquals(0, a.length);
+  assertEquals(constructor.prototype, Object.getPrototypeOf(a));
+  assertEquals(false, Array.isArray(a));
+
+  // Items are coerced to numerical values.
+  a = constructor.of(undefined, null, [], true, false, 3.14);
+
+  // For typed arrays of floating point values, values are not rounded.
+  if (constructor === Float32Array || constructor === Float64Array) {
+    assertEquals(NaN, a[0]);
+    assertEquals(0, a[1]);
+    assertEquals(0, a[2]);
+    assertEquals(1, a[3]);
+    assertEquals(0, a[4]);
+    assertEquals(true, Math.abs(a[5] - 3.14) < 1e-6);
+  } else {
+    assertEquals(0, a[0]);
+    assertEquals(0, a[1]);
+    assertEquals(0, a[2]);
+    assertEquals(1, a[3]);
+    assertEquals(0, a[4]);
+    assertEquals(3, a[5]);
+  }
+
+  var aux = [];
+  for (var i = 0; i < 100; i++)
+    aux[i] = i;
+
+  a = constructor.of.apply(constructor, aux);
+  assertEquals(aux.length, a.length);
+  assertArrayEquals(aux, a);
+
+  // %TypedArray%.of can be transplanted to other constructors.
+  var hits = 0;
+  function Bag(length) {
+    assertEquals(arguments.length, 1);
+    assertEquals(length, 2);
+    this.length = length;
+    hits++;
+  }
+  Bag.of = constructor.of;
+
+  hits = 0;
+  a = Bag.of("zero", "one");
+  assertEquals(1, hits);
+  assertEquals(2, a.length);
+  assertArrayEquals(["zero", "one"], a);
+  assertEquals(Bag.prototype, a.__proto__);
+
+  hits = 0;
+  actual = constructor.of.call(Bag, "zero", "one");
+  assertEquals(1, hits);
+  assertEquals(2, a.length);
+  assertArrayEquals(["zero", "one"], a);
+  assertEquals(Bag.prototype, a.__proto__);
+
+  // %TypedArray%.of does not trigger prototype setters.
+  // (It defines elements rather than assigning to them.)
+  var status = "pass";
+  Object.defineProperty(constructor.prototype, "0", {
+    set: function(v) { status = "fail"; }
+  });
+  assertEquals(1, constructor.of(1)[0], 1);
+  assertEquals("pass", status);
+
+  // Note that %TypedArray%.of does not trigger "length" setter itself, as
+  // it relies on the constructor to set "length" to the value passed to it.
+  // If the constructor does not assign "length", the setter should not be
+  // invoked.
+
+  // Setter on the newly created object.
+  function Pack() {
+    Object.defineProperty(this, "length", {
+      set: function (v) { status = "fail"; }
+    });
+  }
+  Pack.of = constructor.of;
+  var pack = Pack.of("wolves", "cards", "cigarettes", "lies");
+  assertEquals("pass", status);
+
+  // when the setter is on the new object's prototype
+  function Bevy() {}
+  Object.defineProperty(Bevy.prototype, "length", {
+    set: function (v) { status = "fail"; }
+  });
+  Bevy.of = constructor.of;
+  var bevy = Bevy.of("quail");
+  assertEquals("pass", status);
+
+  // Check superficial features of %TypedArray%.of.
+  var desc = Object.getOwnPropertyDescriptor(constructor, "of");
+
+  assertEquals(desc.configurable, false);
+  assertEquals(desc.enumerable, false);
+  assertEquals(desc.writable, false);
+  assertEquals(constructor.of.length, 0);
+
+  // %TypedArray%.of is not a constructor.
+  assertThrows(function() { new constructor.of(); }, TypeError);
+
+  // For receivers which are not constructors %TypedArray%.of does not
+  // allocate a typed array using a default constructor, but throws an
+  // exception. Note that this is different from Array.of, which uses
+  // Array as default constructor.
+  for (var x of [undefined, null, false, true, "cow", 42, 3.14]) {
+    assertThrows(function () { constructor.of.call(x); }, TypeError);
+  }
+}
+
+for (var constructor of typedArrayConstructors) {
+  TestTypedArrayOf(constructor);
+}
diff --git a/deps/v8/test/mjsunit/harmony/unicode-escapes.js b/deps/v8/test/mjsunit/harmony/unicode-escapes.js
new file mode 100644 (file)
index 0000000..b39ee1a
--- /dev/null
@@ -0,0 +1,46 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// ES6 extends the \uxxxx escape and also allows \u{xxxxx}.
+
+// Flags: --harmony-unicode
+
+// Unicode escapes in variable names.
+
+(function TestVariableNames1() {
+  var foobar = 1;
+  assertEquals(foob\u0061r, 1);
+  assertEquals(foob\u{0061}r, 1);
+  assertEquals(foob\u{61}r, 1);
+  assertEquals(foob\u{0000000061}r, 1);
+})();
+
+(function TestVariableNames2() {
+  var foobar = 1;
+  assertEquals(\u0066oobar, 1);
+  assertEquals(\u{0066}oobar, 1);
+  assertEquals(\u{66}oobar, 1);
+  assertEquals(\u{0000000066}oobar, 1);
+})();
+
+// Unicode escapes in strings.
+
+(function TestStrings() {
+  var s1 = "foob\u0061r";
+  assertEquals(s1, "foobar");
+  var s2 = "foob\u{0061}r";
+  assertEquals(s2, "foobar");
+  var s3 = "foob\u{61}r";
+  assertEquals(s3, "foobar");
+  var s4 = "foob\u{0000000061}r";
+  assertEquals(s4, "foobar");
+})();
+
+
+(function TestSurrogates() {
+  // U+10E6D corresponds to the surrogate pair [U+D803, U+DE6D].
+  var s1 = "foo\u{10e6d}";
+  var s2 = "foo\u{d803}\u{de6d}";
+  assertEquals(s1, s2);
+})();
diff --git a/deps/v8/test/mjsunit/keyed-load-with-string-key.js b/deps/v8/test/mjsunit/keyed-load-with-string-key.js
new file mode 100644 (file)
index 0000000..4388946
--- /dev/null
@@ -0,0 +1,46 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+
+var o = {
+  "foo": "bar",
+}
+
+function get(obj, key) {
+  return obj[key];
+}
+
+get(o, "foo");
+get(o, "foo");
+get(o, "foo");
+
+%OptimizeFunctionOnNextCall(get);
+get(o, "foo");
+
+assertOptimized(get);
index 7020338..91d0f82 100644 (file)
@@ -125,12 +125,7 @@ function testObjectMirror(obj, cls_name, ctor_name, hasSpecialProperties) {
         // Check that serialized name is correct.
         assertEquals(properties[i].name(), fromJSON.properties[i].name, 'Unexpected serialized name');
 
-        // If property type is normal property type is not serialized.
-        if (properties[i].propertyType() != debug.PropertyType.Normal) {
-          assertEquals(properties[i].propertyType(), fromJSON.properties[i].propertyType, 'Unexpected serialized property type');
-        } else {
-          assertTrue(typeof(fromJSON.properties[i].propertyType) === 'undefined', 'Unexpected serialized property type');
-        }
+        assertEquals(properties[i].propertyType(), fromJSON.properties[i].propertyType, 'Unexpected serialized property type');
 
         // If there are no attributes attributes are not serialized.
         if (properties[i].attributes() != debug.PropertyAttribute.None) {
index 0430279..b360425 100644 (file)
@@ -231,16 +231,7 @@ var assertUnoptimized;
     return deepObjectEquals(a, b);
   }
 
-  function checkArity(args, arity, name) {
-    if (args.length < arity) {
-      fail(PrettyPrint(arity), args.length,
-           name + " requires " + arity + " or more arguments");
-    }
-  }
-
   assertSame = function assertSame(expected, found, name_opt) {
-    checkArity(arguments, 2, "assertSame");
-
     // TODO(mstarzinger): We should think about using Harmony's egal operator
     // or the function equivalent Object.is() here.
     if (found === expected) {
@@ -253,8 +244,6 @@ var assertUnoptimized;
 
 
   assertEquals = function assertEquals(expected, found, name_opt) {
-    checkArity(arguments, 2, "assertEquals");
-
     if (!deepEquals(found, expected)) {
       fail(PrettyPrint(expected), found, name_opt);
     }
index 0ab7289..26ec10b 100644 (file)
@@ -51,6 +51,9 @@
   # Issue 3389: deopt_every_n_garbage_collections is unsafe
   'regress/regress-2653': [SKIP],
 
+  # Issue 3784: setters-on-elements is flaky
+  'setters-on-elements': [PASS, FAIL],
+
   ##############################################################################
   # TurboFan compiler failures.
 
   # from the deoptimizer to do that.
   'arguments-indirect': [PASS, NO_VARIANTS],
 
-  # TODO(rossberg): Typer doesn't like contexts very much.
-  'harmony/block-conflicts': [PASS, NO_VARIANTS],
-  'harmony/block-for': [PASS, NO_VARIANTS],
-  'harmony/block-leave': [PASS, NO_VARIANTS],
-  'harmony/block-let-crankshaft': [PASS, NO_VARIANTS],
-  'harmony/empty-for': [PASS, NO_VARIANTS],
-
   # TODO(verwaest): Some tests are over-restrictive about object layout.
   'array-constructor-feedback': [PASS, NO_VARIANTS],
   'array-feedback': [PASS, NO_VARIANTS],
   'regress/regress-crbug-259300': [PASS, NO_VARIANTS],
   'regress/regress-frame-details-null-receiver': [PASS, NO_VARIANTS],
 
+  # TODO(arv): TurboFan does not yet add [[HomeObject]] as needed.
+  'harmony/object-literals-super': [PASS, NO_VARIANTS],
+
   ##############################################################################
   # Too slow in debug mode with --stress-opt mode.
   'compiler/regress-stacktrace-methods': [PASS, ['mode == debug', SKIP]],
   ##############################################################################
   # Tests verifying CHECK and ASSERT.
   'verify-check-false': [FAIL, NO_VARIANTS],
-  'verify-assert-false': [NO_VARIANTS, ['mode == release', PASS], ['mode == debug', FAIL]],
+  'verify-assert-false': [NO_VARIANTS, ['mode == release and dcheck_always_on == False', PASS], ['mode == debug or dcheck_always_on == True', FAIL]],
 
   ##############################################################################
   # Tests with different versions for release and debug.
   'math-floor-of-div-nosudiv': [PASS, SLOW, ['arch not in [arm, arm64, android_arm, android_arm64]', SKIP]],
 
   # Too slow for slow variants.
-  'asm/embenchen/*': [PASS, FAST_VARIANTS],
+  'asm/embenchen/*': [PASS, SLOW, FAST_VARIANTS],
 }],  # ALWAYS
 
 ##############################################################################
   'elements-kind': [SKIP],
   'elements-transition-hoisting': [SKIP],
   'fast-prototype': [SKIP],
+  'field-type-tracking': [SKIP],
   'getters-on-elements': [SKIP],
   'harmony/block-let-crankshaft': [SKIP],
   'opt-elements-kind': [SKIP],
   'osr-elements-kind': [SKIP],
+  'regress/regress-crbug-137689': [SKIP],
   'regress/regress-165637': [SKIP],
   'regress/regress-2249': [SKIP],
   # Tests taking too long
 
   # Too slow for gc stress.
   'asm/embenchen/box2d': [SKIP],
+
+  # Issue 3723.
+  'regress/regress-3717': [SKIP],
+  # Issue 3776.
+  'debug-stepframe': [SKIP],
 }],  # 'gc_stress == True'
 
 ##############################################################################
diff --git a/deps/v8/test/mjsunit/mod-range.js b/deps/v8/test/mjsunit/mod-range.js
new file mode 100644 (file)
index 0000000..0cded89
--- /dev/null
@@ -0,0 +1,79 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function g1(i) {
+  var x = i * 1;
+  return (x >>> 0) % 1000000000000;
+}
+
+function g2(i) {
+  var x = i * 1;
+  return ((x >>> 0) % 1000000000000) | 0;
+}
+
+function test1() {
+  assertEquals(2294967296, g1(-2000000000));
+  assertEquals(2294967295, g1(-2000000001));
+  assertEquals(2294967290, g1(-2000000006));
+
+  assertEquals(2147483651, g1(-2147483645));
+  assertEquals(2147483650, g1(-2147483646));
+  assertEquals(2147483649, g1(-2147483647));
+  assertEquals(2147483648, g1(-2147483648));
+  assertEquals(2147483647, g1(-2147483649));
+
+  assertEquals(3000000000, g1(3000000000));
+  assertEquals(3000000001, g1(3000000001));
+  assertEquals(3000000002, g1(3000000002));
+
+  assertEquals(4000000000, g1(4000000000));
+  assertEquals(4000400001, g1(4000400001));
+  assertEquals(4000400002, g1(4000400002));
+
+  assertEquals(3, g1(4294967299));
+  assertEquals(2, g1(4294967298));
+  assertEquals(1, g1(4294967297));
+  assertEquals(0, g1(4294967296));
+  assertEquals(4294967295, g1(4294967295));
+  assertEquals(4294967294, g1(4294967294));
+  assertEquals(4294967293, g1(4294967293));
+  assertEquals(4294967292, g1(4294967292));
+}
+
+%NeverOptimizeFunction(test1);
+test1();
+
+function test2() {
+  assertEquals(-2000000000, g2(-2000000000));
+  assertEquals(-2000000001, g2(-2000000001));
+  assertEquals(-2000000006, g2(-2000000006));
+
+  assertEquals(-2147483645, g2(-2147483645));
+  assertEquals(-2147483646, g2(-2147483646));
+  assertEquals(-2147483647, g2(-2147483647));
+  assertEquals(-2147483648, g2(-2147483648));
+  assertEquals(2147483647, g2(-2147483649));
+
+  assertEquals(-1294967296, g2(3000000000));
+  assertEquals(-1294967295, g2(3000000001));
+  assertEquals(-1294967294, g2(3000000002));
+
+  assertEquals(-294967296, g2(4000000000));
+  assertEquals(-294567295, g2(4000400001));
+  assertEquals(-294567294, g2(4000400002));
+
+  assertEquals(3, g2(4294967299));
+  assertEquals(2, g2(4294967298));
+  assertEquals(1, g2(4294967297));
+  assertEquals(0, g2(4294967296));
+  assertEquals(-1, g2(4294967295));
+  assertEquals(-2, g2(4294967294));
+  assertEquals(-3, g2(4294967293));
+  assertEquals(-4, g2(4294967292));
+}
+
+%NeverOptimizeFunction(test2);
+test2();
diff --git a/deps/v8/test/mjsunit/object-freeze-global.js b/deps/v8/test/mjsunit/object-freeze-global.js
new file mode 100644 (file)
index 0000000..8ab5b85
--- /dev/null
@@ -0,0 +1,6 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+Object.freeze(this);
+assertTrue(Object.isFrozen(this));
index 4144936..5d1f8b7 100644 (file)
@@ -303,7 +303,7 @@ assertTrue(Object.isFrozen(Object.freeze(function(){"use strict";})));
 
 // Also test a simpler case
 obj = {};
-Object.defineProperty(obj, 'accessor', {
+Object.defineProperty(obj, 'accessor2', {
   get: function() { return 42 },
   set: function() { accessorDidRun = true },
   configurable: true,
@@ -339,3 +339,12 @@ obj.__proto__[1] = 1;
 assertEquals(1, obj[1]);
 Object.freeze(obj);
 assertThrows(function() { obj.unshift(); }, TypeError);
+
+// Sealing and then Freezing should do the right thing.
+var obj = { foo: 'bar', 0: 'element' };
+Object.seal(obj);
+assertTrue(Object.isSealed(obj));
+assertFalse(Object.isFrozen(obj));
+Object.freeze(obj);
+assertTrue(Object.isSealed(obj));
+assertTrue(Object.isFrozen(obj));
index 6b9184d..bde3161 100644 (file)
@@ -27,6 +27,8 @@
 
 // Tests the Object.preventExtensions method - ES 15.2.3.10
 
+// Flags: --allow-natives-syntax
+
 
 var obj1 = {};
 // Extensible defaults to true.
@@ -126,3 +128,35 @@ assertEquals(50, v);
 var n = o[0] = 100;
 assertEquals(undefined, o[0]);
 assertEquals(100, n);
+
+// Fast properties should remain fast
+obj = { x: 42, y: 'foo' };
+assertTrue(%HasFastProperties(obj));
+Object.preventExtensions(obj);
+assertFalse(Object.isExtensible(obj));
+assertFalse(Object.isSealed(obj));
+assertTrue(%HasFastProperties(obj));
+
+// Non-extensible objects should share maps where possible
+obj = { prop1: 1, prop2: 2 };
+obj2 = { prop1: 3, prop2: 4 };
+assertTrue(%HaveSameMap(obj, obj2));
+Object.preventExtensions(obj);
+Object.preventExtensions(obj2);
+assertFalse(Object.isExtensible(obj));
+assertFalse(Object.isExtensible(obj2));
+assertFalse(Object.isSealed(obj));
+assertFalse(Object.isSealed(obj2));
+assertTrue(%HaveSameMap(obj, obj2));
+
+// Non-extensible objects should share maps even when they have elements
+obj = { prop1: 1, prop2: 2, 75: 'foo' };
+obj2 = { prop1: 3, prop2: 4, 150: 'bar' };
+assertTrue(%HaveSameMap(obj, obj2));
+Object.preventExtensions(obj);
+Object.preventExtensions(obj2);
+assertFalse(Object.isExtensible(obj));
+assertFalse(Object.isExtensible(obj2));
+assertFalse(Object.isSealed(obj));
+assertFalse(Object.isSealed(obj2));
+assertTrue(%HaveSameMap(obj, obj2));
diff --git a/deps/v8/test/mjsunit/object-seal-global.js b/deps/v8/test/mjsunit/object-seal-global.js
new file mode 100644 (file)
index 0000000..ec9f82e
--- /dev/null
@@ -0,0 +1,7 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+Object.seal(this);
+assertTrue(Object.isSealed(this));
+assertFalse(Object.isFrozen(this));
index 3afddb9..3c46ab2 100644 (file)
@@ -267,3 +267,132 @@ assertDoesNotThrow(function() { obj.splice(1,2,1,2); });
 assertDoesNotThrow(function() { obj.splice(1,2000,1,2); });
 assertThrows(function() { obj.splice(0,0,1); }, TypeError);
 assertThrows(function() { obj.splice(1,2000,1,2,3); }, TypeError);
+
+// Test that the enumerable attribute is unperturbed by sealing.
+obj = { x: 42, y: 'foo' };
+Object.defineProperty(obj, 'y', {enumerable: false});
+Object.seal(obj);
+assertTrue(Object.isSealed(obj));
+assertFalse(Object.isFrozen(obj));
+desc = Object.getOwnPropertyDescriptor(obj, 'x');
+assertTrue(desc.enumerable);
+desc = Object.getOwnPropertyDescriptor(obj, 'y');
+assertFalse(desc.enumerable);
+
+// Fast properties should remain fast
+obj = { x: 42, y: 'foo' };
+assertTrue(%HasFastProperties(obj));
+Object.seal(obj);
+assertTrue(Object.isSealed(obj));
+assertFalse(Object.isFrozen(obj));
+assertTrue(%HasFastProperties(obj));
+
+// Sealed objects should share maps where possible
+obj = { prop1: 1, prop2: 2 };
+obj2 = { prop1: 3, prop2: 4 };
+assertTrue(%HaveSameMap(obj, obj2));
+Object.seal(obj);
+Object.seal(obj2);
+assertTrue(Object.isSealed(obj));
+assertTrue(Object.isSealed(obj2));
+assertFalse(Object.isFrozen(obj));
+assertFalse(Object.isFrozen(obj2));
+assertTrue(%HaveSameMap(obj, obj2));
+
+// Sealed objects should share maps even when they have elements
+obj = { prop1: 1, prop2: 2, 75: 'foo' };
+obj2 = { prop1: 3, prop2: 4, 150: 'bar' };
+assertTrue(%HaveSameMap(obj, obj2));
+Object.seal(obj);
+Object.seal(obj2);
+assertTrue(Object.isSealed(obj));
+assertTrue(Object.isSealed(obj2));
+assertFalse(Object.isFrozen(obj));
+assertFalse(Object.isFrozen(obj));
+assertTrue(%HaveSameMap(obj, obj2));
+
+// Setting elements after sealing should not be allowed
+obj = { prop: 'thing' };
+Object.seal(obj);
+assertTrue(Object.isSealed(obj));
+assertFalse(Object.isFrozen(obj));
+obj[0] = 'hello';
+assertFalse(obj.hasOwnProperty(0));
+
+// Sealing an object in dictionary mode should work
+// Also testing that getter/setter properties work after sealing
+obj = { };
+for (var i = 0; i < 100; ++i) {
+  obj['x' + i] = i;
+}
+var accessorDidRun = false;
+Object.defineProperty(obj, 'accessor', {
+  get: function() { return 42 },
+  set: function() { accessorDidRun = true },
+  configurable: true,
+  enumerable: true
+});
+
+assertFalse(%HasFastProperties(obj));
+Object.seal(obj);
+assertFalse(%HasFastProperties(obj));
+assertTrue(Object.isSealed(obj));
+assertFalse(Object.isFrozen(obj));
+assertFalse(Object.isExtensible(obj));
+for (var i = 0; i < 100; ++i) {
+  desc = Object.getOwnPropertyDescriptor(obj, 'x' + i);
+  assertFalse(desc.configurable);
+}
+assertEquals(42, obj.accessor);
+assertFalse(accessorDidRun);
+obj.accessor = 'ignored value';
+assertTrue(accessorDidRun);
+
+// Sealing arguments should work
+var func = function(arg) {
+  Object.seal(arguments);
+  assertTrue(Object.isSealed(arguments));
+};
+func('hello', 'world');
+func('goodbye', 'world');
+
+// Sealing sparse arrays
+var sparseArr = [0, 1];
+sparseArr[10000] = 10000;
+Object.seal(sparseArr);
+assertTrue(Object.isSealed(sparseArr));
+
+// Accessors on fast object should behavior properly after sealing
+obj = {};
+Object.defineProperty(obj, 'accessor', {
+  get: function() { return 42 },
+  set: function() { accessorDidRun = true },
+  configurable: true,
+  enumerable: true
+});
+assertTrue(%HasFastProperties(obj));
+Object.seal(obj);
+assertTrue(Object.isSealed(obj));
+assertTrue(%HasFastProperties(obj));
+assertEquals(42, obj.accessor);
+accessorDidRun = false;
+obj.accessor = 'ignored value';
+assertTrue(accessorDidRun);
+
+// Test for regression in mixed accessor/data property objects.
+// The strict function is one such object.
+assertTrue(Object.isSealed(Object.seal(function(){"use strict";})));
+
+// Also test a simpler case
+obj = {};
+Object.defineProperty(obj, 'accessor2', {
+  get: function() { return 42 },
+  set: function() { accessorDidRun = true },
+  configurable: true,
+  enumerable: true
+});
+obj.data = 'foo';
+assertTrue(%HasFastProperties(obj));
+Object.seal(obj);
+assertTrue(%HasFastProperties(obj));
+assertTrue(Object.isSealed(obj));
index be7303b..5f4f437 100644 (file)
@@ -142,10 +142,16 @@ function test1() {
   assertTrue(%HaveSameMap(smis, doubles));
 }
 
+function clear_ic_state() {
+  %ClearFunctionTypeFeedback(construct_smis);
+  %ClearFunctionTypeFeedback(construct_doubles);
+  %ClearFunctionTypeFeedback(convert_mixed);
+}
+
 test1();
-gc(); // clear IC state
+clear_ic_state();
 test1();
-gc(); // clear IC state
+clear_ic_state();
 %OptimizeFunctionOnNextCall(test1);
 test1();
-gc(); // clear IC state
+clear_ic_state();
diff --git a/deps/v8/test/mjsunit/regress-ntl.js b/deps/v8/test/mjsunit/regress-ntl.js
new file mode 100644 (file)
index 0000000..993599e
--- /dev/null
@@ -0,0 +1,41 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function mod1() {
+  var v_1 = 1;
+  var v_2 = 1;
+  v_1++;
+  v_2 = {valueOf: function() { throw "gagh"; }};
+
+  function bug1() {
+    for (var i = 0; i < 1; v_2++) {
+      if (v_1 == 1) ;
+    }
+  }
+
+  return bug1;
+}
+
+var f = mod1();
+assertThrows(f);
+%OptimizeFunctionOnNextCall(f);
+assertThrows(f);
+
+
+var v_3 = 1;
+var v_4 = 1;
+v_3++;
+v_4 = {valueOf: function() { throw "gagh"; }};
+
+function bug2() {
+  for (var i = 0; i < 1; v_4++) {
+    if (v_3 == 1) ;
+  }
+}
+
+assertThrows(bug2);
+%OptimizeFunctionOnNextCall(bug2);
+assertThrows(bug2);
index c9972e9..21ae622 100644 (file)
@@ -26,9 +26,9 @@
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
 try {
-  /foo/\u0069
+  eval("/foo/\\u0069")
 } catch (e) {
   assertEquals(
-      "SyntaxError: Invalid flags supplied to RegExp constructor '\\u0069'",
+      "SyntaxError: Invalid regular expression flags",
       e.toString());
 }
index 35e7355..a850f70 100644 (file)
@@ -27,6 +27,7 @@
 
 // Flags: --string-slices --expose-externalize-string
 
-var a = "abcdefghijklmnopqrstuvqxy"+"z";
+var a = "internalized dummy";
+a = "abcdefghijklmnopqrstuvqxy"+"z";
 externalizeString(a, true);
 assertEquals('b', a.substring(1).charAt(0));
index e6b37d3..0eb2770 100644 (file)
@@ -46,7 +46,7 @@ for (const x in [1,2,3]) {
 }
 assertEquals("012", s);
 
-assertThrows(function() { for(const x in [1,2,3]) { x++ } }, SyntaxError);
+assertThrows("'use strict'; for (const x in [1,2,3]) { x++ }", TypeError);
 
 // Function scope
 (function() {
diff --git a/deps/v8/test/mjsunit/regress/regress-3229.js b/deps/v8/test/mjsunit/regress/regress-3229.js
new file mode 100644 (file)
index 0000000..1a0ed64
--- /dev/null
@@ -0,0 +1,26 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Escape '/'.
+function testEscapes(expected, regexp) {
+  assertEquals(expected, regexp.source);
+  assertEquals("/" + expected + "/", regexp.toString());
+}
+
+testEscapes("\\/", /\//);
+testEscapes("\\/\\/", /\/\//);
+testEscapes("\\/", new RegExp("/"));
+testEscapes("\\/", new RegExp("\\/"));
+testEscapes("\\\\/", new RegExp("\\\\/"));
+testEscapes("\\/\\/", new RegExp("\\/\\/"));
+testEscapes("\\/\\/\\/\\/", new RegExp("////"));
+testEscapes("\\/\\/\\/\\/", new RegExp("\\//\\//"));
+testEscapes("(?:)", new RegExp(""));
+testEscapes("(?:)", RegExp.prototype);
+
+// Read-only property.
+var r = /\/\//;
+testEscapes("\\/\\/", r);
+r.source = "garbage";
+testEscapes("\\/\\/", r);
diff --git a/deps/v8/test/mjsunit/regress/regress-3687.js b/deps/v8/test/mjsunit/regress/regress-3687.js
new file mode 100644 (file)
index 0000000..e1df1b4
--- /dev/null
@@ -0,0 +1,22 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+var t1 = { f1: 0 };
+var t2 = { f2: 0 };
+
+var z = {
+  x: {
+    x: t1,
+    y: {
+      x: {},
+      z1: {
+        x: t2,
+        y: 1
+      }
+    }
+  },
+  z2: 0
+};
diff --git a/deps/v8/test/mjsunit/regress/regress-3709.js b/deps/v8/test/mjsunit/regress/regress-3709.js
new file mode 100644 (file)
index 0000000..d2de711
--- /dev/null
@@ -0,0 +1,28 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function getobj() {
+  return { bar : function() { return 0}};
+}
+
+function foo() {
+  var obj = getobj();
+  var length = arguments.length;
+  if (length == 0) {
+     obj.bar();
+  } else {
+     obj.bar.apply(obj, arguments);
+  }
+}
+
+foo();
+foo();
+%OptimizeFunctionOnNextCall(foo);
+foo();
+assertOptimized(foo);
+foo(10);
+assertUnoptimized(foo);
+%ClearFunctionTypeFeedback(foo);
diff --git a/deps/v8/test/mjsunit/regress/regress-3717.js b/deps/v8/test/mjsunit/regress/regress-3717.js
new file mode 100644 (file)
index 0000000..1f7bc7d
--- /dev/null
@@ -0,0 +1,33 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug --no-lazy
+
+Debug = debug.Debug;
+var exception = null;
+var break_count = 0;
+
+function f() {
+  function g(p) {
+    return 1;
+  }
+  g(1);
+};
+
+function listener(event, exec_state, event_data, data) {
+  try {
+    if (event == Debug.DebugEvent.Break) break_count++;
+  } catch (e) {
+    exception = e;
+  }
+}
+
+Debug.setListener(listener);
+var bp = Debug.setBreakPoint(f, 2);
+f();
+Debug.clearBreakPoint(bp);
+Debug.setListener(null);
+
+assertEquals(1, break_count);
+assertNull(exception);
diff --git a/deps/v8/test/mjsunit/regress/regress-3756.js b/deps/v8/test/mjsunit/regress/regress-3756.js
new file mode 100644 (file)
index 0000000..6b1f029
--- /dev/null
@@ -0,0 +1,74 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+(function TestIdentityEscapes() {
+  // \u not followed by 4 hex digits is treated as an identity escape.
+  var r0 = /\u/;
+  assertTrue(r0.test("u"));
+
+  r0 = RegExp("\\u");
+  assertTrue(r0.test("u"));
+
+  var r1 = /\usecond/;
+  assertTrue(r1.test("usecond"));
+
+  r1 = RegExp("\\usecond");
+  assertTrue(r1.test("usecond"));
+
+  var r2 = /first\u/;
+  assertTrue(r2.test("firstu"));
+  // This used to return true (which was a bug).
+  assertFalse(r2.test("first\\u"));
+
+  r2 = RegExp("first\\u");
+  assertTrue(r2.test("firstu"));
+  // This used to return true (which was a bug).
+  assertFalse(r2.test("first\\u"));
+
+  var r3 = /first\usecond/;
+  assertTrue(r3.test("firstusecond"));
+  assertFalse(r3.test("first\\usecond"));
+
+  r3 = RegExp("first\\usecond");
+  assertTrue(r3.test("firstusecond"));
+  assertFalse(r3.test("first\\usecond"));
+
+  var r4 = /first\u123second/;
+  assertTrue(r4.test("firstu123second"));
+  assertFalse(r4.test("first\\u123second"));
+
+  r4 = RegExp("first\\u123second");
+  assertTrue(r4.test("firstu123second"));
+  assertFalse(r4.test("first\\u123second"));
+
+  // \X where X is not a legal escape character is treated as identity escape
+  // too.
+  var r5 = /\a/;
+  assertTrue(r5.test("a"));
+
+  r5 = RegExp("\\a");
+  assertTrue(r5.test("a"));
+
+  var r6 = /\asecond/;
+  assertTrue(r6.test("asecond"));
+
+  r6 = RegExp("\\asecond");
+  assertTrue(r6.test("asecond"));
+
+  var r7 = /first\a/;
+  assertTrue(r7.test("firsta"));
+  assertFalse(r7.test("first\\a"));
+
+  r7 = RegExp("first\\a");
+  assertTrue(r7.test("firsta"));
+  assertFalse(r7.test("first\\a"));
+
+  var r8 = /first\asecond/;
+  assertTrue(r8.test("firstasecond"));
+  assertFalse(r8.test("first\\asecond"));
+
+  r8 = RegExp("first\\asecond");
+  assertTrue(r8.test("firstasecond"));
+  assertFalse(r8.test("first\\asecond"));
+})();
diff --git a/deps/v8/test/mjsunit/regress/regress-410030.js b/deps/v8/test/mjsunit/regress/regress-410030.js
new file mode 100644 (file)
index 0000000..efd4b1e
--- /dev/null
@@ -0,0 +1,43 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+try {
+  throw 0;
+} catch(e) {
+  assertSame(3, eval("delete x; const x=3; x"));
+}
+
+
+try {
+  throw 0;
+} catch(e) {
+  assertSame(3, (1,eval)("delete x1; const x1=3; x1"));
+}
+
+
+try {
+  throw 0;
+} catch(e) {
+  with({}) {
+    assertSame(3, eval("delete x2; const x2=3; x2"));
+  }
+}
+
+
+(function f() {
+  try {
+    throw 0;
+  } catch(e) {
+    assertSame(3, eval("delete x; const x=3; x"));
+  }
+}());
+
+
+(function f() {
+  try {
+    throw 0;
+  } catch(e) {
+    assertSame(3, (1,eval)("delete x4; const x4=3; x4"));
+  }
+}());
diff --git a/deps/v8/test/mjsunit/regress/regress-435073.js b/deps/v8/test/mjsunit/regress/regress-435073.js
new file mode 100644 (file)
index 0000000..dbaa612
--- /dev/null
@@ -0,0 +1,12 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --verify-heap
+
+function test(x) { [x,,]; }
+
+test(0);
+test(0);
+%OptimizeFunctionOnNextCall(test);
+test(0);
diff --git a/deps/v8/test/mjsunit/regress/regress-435477.js b/deps/v8/test/mjsunit/regress/regress-435477.js
new file mode 100644 (file)
index 0000000..0a15000
--- /dev/null
@@ -0,0 +1,16 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+var a = new Array(128);
+
+function f(a, base) {
+  a[base] = 2;
+}
+
+f(a, undefined);
+f("r12", undefined);
+f(a, 0);
+%OptimizeFunctionOnNextCall(f);
+f(a, 0);
diff --git a/deps/v8/test/mjsunit/regress/regress-436893.js b/deps/v8/test/mjsunit/regress/regress-436893.js
new file mode 100644 (file)
index 0000000..38e7b5f
--- /dev/null
@@ -0,0 +1,37 @@
+// Copyright 2010 the V8 project authors. All rights reserved.
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+//     * Redistributions of source code must retain the above copyright
+//       notice, this list of conditions and the following disclaimer.
+//     * Redistributions in binary form must reproduce the above
+//       copyright notice, this list of conditions and the following
+//       disclaimer in the documentation and/or other materials provided
+//       with the distribution.
+//     * Neither the name of Google Inc. nor the names of its
+//       contributors may be used to endorse or promote products derived
+//       from this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Flags: --allow-natives-syntax
+
+var x = 11;
+function foo() {
+  return 42;
+}
+// Test passing null or undefined as receiver.
+function g() { return foo.apply(null, x()++); }
+%OptimizeFunctionOnNextCall(g);
+assertThrows(g);
diff --git a/deps/v8/test/mjsunit/regress/regress-436896.js b/deps/v8/test/mjsunit/regress/regress-436896.js
new file mode 100644 (file)
index 0000000..344a7a3
--- /dev/null
@@ -0,0 +1,17 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function f(x) {
+  const x = 0;
+  return x;
+}
+
+function g(x) {
+  return f(x);
+}
+
+%OptimizeFunctionOnNextCall(g);
+assertThrows(function() { g(42); }, TypeError);
diff --git a/deps/v8/test/mjsunit/regress/regress-437765.js b/deps/v8/test/mjsunit/regress/regress-437765.js
new file mode 100644 (file)
index 0000000..88d5388
--- /dev/null
@@ -0,0 +1,22 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax --no-fold-constants
+
+function foo(x, y) {
+  return Math.floor(x / y);
+}
+
+function bar(x, y) {
+  return foo(x + 1, y + 1);
+}
+
+function baz() {
+  bar(64, 2);
+}
+
+baz();
+baz();
+%OptimizeFunctionOnNextCall(baz);
+baz();
diff --git a/deps/v8/test/mjsunit/regress/regress-441099.js b/deps/v8/test/mjsunit/regress/regress-441099.js
new file mode 100644 (file)
index 0000000..63aecfd
--- /dev/null
@@ -0,0 +1,53 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var Module;
+if (!Module) Module = eval('(function() { try { return Module || {} } catch(e) { return {} } })()');
+else if (ENVIRONMENT_IS_SHELL) {
+}
+var Runtime = {
+  stackSave: function () {
+  },
+  alignMemory: function (quantum) { var ret = size = Math.ceil()*(quantum ? quantum : 8); return ret; }}
+function allocate() {
+}
+function callRuntimeCallbacks(callbacks) {
+    var callback = callbacks.shift();
+    var func = callback.func;
+    if (typeof func === 'number') {
+    } else {
+      func();
+    }
+}
+var __ATINIT__    = []; // functions called during startup
+function ensureInitRuntime() {
+  callRuntimeCallbacks(__ATINIT__);
+}
+/* global initializers */ __ATINIT__.push({ func: function() { runPostSets() } });
+    function __formatString() {
+            switch (next) {
+            }
+    }
+  var Browser={mainLoop:{queue:[],pause:function () {
+        }},moduleContextCreatedCallbacks:[],workers:[],init:function () {
+      }};
+var asm = (function() {
+  'use asm';
+function setThrew() {
+}
+function runPostSets() {
+}
+function _main() {
+}
+function _free() {
+}
+  return { runPostSets: runPostSets};
+})
+();
+var runPostSets = Module["runPostSets"] = asm["runPostSets"];
+var i64Math = (function() { // Emscripten wrapper
+  /**
+   */
+})();
+    ensureInitRuntime();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-109362.js b/deps/v8/test/mjsunit/regress/regress-crbug-109362.js
new file mode 100644 (file)
index 0000000..b156013
--- /dev/null
@@ -0,0 +1,26 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+
+function test(expectation, f) {
+  var stack;
+  try {
+    f();
+  } catch (e) {
+    stack = e.stack;
+  }
+  print(stack);
+  assertTrue(stack.indexOf("at eval (evaltest:" + expectation + ")") > 0);
+}
+
+test("1:5", new Function(
+    '1 + reference_error //@ sourceURL=evaltest'));
+test("2:6", new Function(
+    'x', '\n 1 + reference_error //@ sourceURL=evaltest'));
+test("2:6", new Function(
+    'x\n\n', "z//\n", "y", '\n 1 + reference_error //@ sourceURL=evaltest'));
+test("1:5", new Function(
+    'x/*', "z//\n", "y*/", '1 + reference_error //@ sourceURL=evaltest'));
+test("2:6", eval(
+    '(function () {\n 1 + reference_error //@ sourceURL=evaltest\n})'));
index 0ff0c4e..ef79d24 100644 (file)
@@ -44,5 +44,4 @@ assertTrue(%HaveSameMap(o, o2));
 
 Object.defineProperty(o, "foo", { set: setter, configurable: true });
 Object.defineProperty(o2, "foo", { set: setter, configurable: true });
-// TODO(ishell): this should eventually become assertTrue().
-assertFalse(%HaveSameMap(o, o2));
+assertTrue(%HaveSameMap(o, o2));
index 9ba759a..f199628 100644 (file)
 
 // Flags: --allow-natives-syntax
 
-var string = "hello world";
-var expected = "Hello " + "world";
+var string = "internalized dummy";
+var expected = "internalized dummy";
+string = "hello world";
+expected = "Hello " + "world";
 function Capitalize() {
   %_OneByteSeqStringSetChar(0, 0x48, string);
 }
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-431602.js b/deps/v8/test/mjsunit/regress/regress-crbug-431602.js
new file mode 100644 (file)
index 0000000..2467aaf
--- /dev/null
@@ -0,0 +1,23 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --always-opt
+
+var heap_number_producer = {y:1.5};
+heap_number_producer.y = 0;
+var heap_number_zero = heap_number_producer.y;
+var non_constant_eight = {};
+non_constant_eight = 8;
+
+function BreakIt() {
+  return heap_number_zero | (1 | non_constant_eight);
+}
+
+function expose(a, b, c) {
+  return b;
+}
+
+assertEquals(9, expose(8, 9, 10));
+assertEquals(9, expose(8, BreakIt(), 10));
+assertEquals(9, BreakIt());
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-432493.js b/deps/v8/test/mjsunit/regress/regress-crbug-432493.js
new file mode 100644 (file)
index 0000000..87c4f83
--- /dev/null
@@ -0,0 +1,57 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --expose-debug-as debug
+
+function f() {
+  var a = 1;
+  var b = 2;
+  return a + b;
+}
+
+var exception = null;
+var break_count = 0;
+var throw_count = 0;
+
+function listener(event, exec_state, event_data, data) {
+  try {
+    if (event == Debug.DebugEvent.Break) {
+      break_count++;
+      // Disable all breakpoints from within the debug event callback.
+      Debug.debuggerFlags().breakPointsActive.setValue(false);
+    } else if (event = Debug.DebugEvent.Exception) {
+      throw_count++;
+      // Enable all breakpoints from within the debug event callback.
+      Debug.debuggerFlags().breakPointsActive.setValue(true);
+    }
+  } catch (e) {
+    exception = e;
+  }
+}
+
+Debug = debug.Debug;
+
+Debug.setListener(listener);
+Debug.setBreakOnException();
+Debug.setBreakPoint(f, 2);
+
+f();
+f();
+
+assertEquals(1, break_count);
+assertEquals(0, throw_count);
+
+// Trigger exception event.
+try { throw 1; } catch (e) {}
+
+f();
+f();
+
+Debug.setListener(null);
+Debug.clearBreakOnException();
+Debug.debuggerFlags().breakPointsActive.setValue(true);
+
+assertEquals(2, break_count);
+assertEquals(1, throw_count);
+assertNull(exception);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-433332.js b/deps/v8/test/mjsunit/regress/regress-crbug-433332.js
new file mode 100644 (file)
index 0000000..d763243
--- /dev/null
@@ -0,0 +1,16 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function f(foo) {
+  var g;
+  true ? (g = 0.1) : g |=  null;
+  if (null != g) {}
+};
+
+f(1.4);
+f(1.4);
+%OptimizeFunctionOnNextCall(f);
+f(1.4);
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-433766.js b/deps/v8/test/mjsunit/regress/regress-crbug-433766.js
new file mode 100644 (file)
index 0000000..fae9483
--- /dev/null
@@ -0,0 +1,16 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+var filler = "//" + new Array(('@')).join('x');
+
+// Test strict eval in global context.
+eval(
+  "'use strict';" +
+  "var x = 23;" +
+  "var f = function bozo1() {" +
+  "  return x;" +
+  "};" +
+  "f;" +
+  filler
+)();
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-435825.js b/deps/v8/test/mjsunit/regress/regress-crbug-435825.js
new file mode 100644 (file)
index 0000000..e10b812
--- /dev/null
@@ -0,0 +1,11 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+Error.prepareStackTrace = function (a,b) { return b; };
+
+try {
+  /(invalid regexp/;
+} catch (e) {
+  e.stack[0].getThis().toString();
+}
diff --git a/deps/v8/test/mjsunit/regress/regress-crbug-436820.js b/deps/v8/test/mjsunit/regress/regress-crbug-436820.js
new file mode 100644 (file)
index 0000000..eea386c
--- /dev/null
@@ -0,0 +1,13 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Flags: --allow-natives-syntax
+
+function c(p) {
+  return {__proto__: p};
+}
+var p = {};
+var o = c(p);
+p.x = 0.6;
+Object.defineProperty(p, "x", { writable: false });
diff --git a/deps/v8/test/mjsunit/regress/regress-lea-matching.js b/deps/v8/test/mjsunit/regress/regress-lea-matching.js
new file mode 100644 (file)
index 0000000..988368a
--- /dev/null
@@ -0,0 +1,14 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function f(a, b, c) {
+  a = a|0;
+  b = b|0;
+  c = c|0;
+  var r = 0;
+  r = a + ((b << 1) + c) | 0;
+  return r|0;
+}
+
+assertEquals(8, f(1, 2, 3));
index 96d63c2..93725eb 100644 (file)
@@ -24,6 +24,8 @@
 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Flags: --noharmony-classes --noharmony-object-literals
 
 // Should throw, not crash.
 assertThrows("var o = { get /*space*/ () {} }");
diff --git a/deps/v8/test/mjsunit/regress/regress-unsigned-mul-add.js b/deps/v8/test/mjsunit/regress/regress-unsigned-mul-add.js
new file mode 100644 (file)
index 0000000..0a2fc65
--- /dev/null
@@ -0,0 +1,10 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function f(a) {
+  var x = a >>> 0;
+  return (x * 1.0 + x * 1.0) << 0;
+}
+
+assertEquals(-2, f(-1));
diff --git a/deps/v8/test/mjsunit/regress/regress-weakening-multiplication.js b/deps/v8/test/mjsunit/regress/regress-weakening-multiplication.js
new file mode 100644 (file)
index 0000000..dcf0011
--- /dev/null
@@ -0,0 +1,12 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+function f() {
+  for (var j = 1; j < 1; j *= -8) {
+  }
+  for (var i = 1; i < 1; j += 2) {
+    j * -1;
+  }
+}
+f();
diff --git a/deps/v8/test/mjsunit/runtime-gen/loadfromsuper.js b/deps/v8/test/mjsunit/runtime-gen/loadfromsuper.js
deleted file mode 100644 (file)
index 25f4ff9..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
-// AUTO-GENERATED BY tools/generate-runtime-tests.py, DO NOT MODIFY
-// Flags: --allow-natives-syntax --harmony --harmony-proxies
-var _home_object = new Object();
-var _receiver = new Object();
-var _name = "name";
-%LoadFromSuper(_home_object, _receiver, _name);
index 0c27a1c..62d003f 100644 (file)
@@ -25,7 +25,8 @@
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 
-// Flags: --turbo-deoptimization
+// Flags: --turbo-deoptimization --noharmony-scoping
+// Flags: --noharmony-classes --noharmony-object-literals
 
 function CheckStrictMode(code, exception) {
   assertDoesNotThrow(code);
index c3f889b..52f1506 100644 (file)
@@ -193,7 +193,8 @@ assertEquals("\u03B2\u03B3\u03B4\u03B5\u03B4\u03B5\u03B6\u03B7",
     utf.substring(5,1) + utf.substring(3,7));
 
 // Externalizing strings.
-var a = "123456789" + "qwertyuiopasdfghjklzxcvbnm";
+var a = "internalized dummy";
+a = "123456789" + "qwertyuiopasdfghjklzxcvbnm";
 var b = "23456789qwertyuiopasdfghjklzxcvbn"
 assertEquals(a.slice(1,-1), b);
 
index 077662e..9ba07f7 100644 (file)
   # RegExp flags.
   'ecma_3/RegExp/15.10.4.1-6': [FAIL_OK],
 
-  # PCRE doesn't allow subpattern nesting deeper than 200, this tests
-  # depth 500.  JSC detects the case, and return null from the match,
-  # and passes this test (the test doesn't check for a correct return
-  # value).
-  'ecma_3/RegExp/regress-119909': [PASS, FAIL_OK],
-
-
-  # Difference in the way capturing subpatterns work.  In JS, when the
-  # 'minimum repeat count' is reached, the empty string must not match.
-  # In this case, we are similar but not identical to JSC.  Hard to
-  # support the JS behavior with PCRE, so maybe emulate JSC?
-  'ecma_3/RegExp/regress-209919': [PASS, FAIL_OK],
-  'js1_5/extensions/regress-459606': [PASS, FAIL_OK],
-
-
   # PCRE's match limit is reached.  SpiderMonkey hangs on the first one,
   # JSC returns true somehow.  Maybe they up the match limit?  There is
   # an open V8 bug 676063 about this.
   'js1_5/Regress/regress-230216-2': [FAIL_OK],
 
 
-  # Regexp too long for PCRE.
-  'js1_5/Regress/regress-280769': [PASS, FAIL],
-  'js1_5/Regress/regress-280769-1': [PASS, FAIL],
-  'js1_5/Regress/regress-280769-2': [PASS, FAIL],
-  'js1_5/Regress/regress-280769-4': [PASS, FAIL],
-  'js1_5/Regress/regress-280769-5': [PASS, FAIL],
+  # BUG(v8:3767)
+  'js1_5/Regress/regress-280769-2': [PASS, ['arch == arm64', SKIP]],
+
+  # Regexps too big.
+  'js1_5/Regress/regress-280769-1': [SKIP],
+  'js1_5/Regress/regress-280769-5': [SKIP],
 
 
   # We do not support static RegExp.multiline - should we?.
 
 
   # No support for toSource().
-  'js1_5/Regress/regress-248444': [FAIL_OK],
   'js1_5/Regress/regress-313967-01': [FAIL_OK],
   'js1_5/Regress/regress-313967-02': [FAIL_OK],
+  'js1_5/extensions/regress-459606': [FAIL_OK],
 
   # This fails because we don't have stack space for Function.prototype.apply
   # with very large numbers of arguments.  The test uses 2^24 arguments.
   'js1_5/Regress/regress-336100': [FAIL_OK],
 
 
-  # Regular expression test failures due to PCRE. We match JSC (ie, perl)
-  # behavior and not the ECMA spec.
-  'ecma_3/RegExp/perlstress-001': [PASS, FAIL_OK],
-  'ecma_3/RegExp/regress-334158': [PASS, FAIL],
-
   # This test fails due to http://code.google.com/p/v8/issues/detail?id=187
   # Failure to clear captures when a lookahead is unwound.
   'ecma_3/RegExp/15.10.2-1': [PASS, FAIL_OK],
 
 
 ['arch ==  arm64', {
-  # BUG(v8:3152): Runs out of stack in debug mode.
-  'js1_5/extensions/regress-355497': [FAIL_OK, ['mode == debug', SKIP]],
-
   # BUG(v8:3503): Times out in debug mode.
   'js1_5/Regress/regress-280769-2': [PASS, FAIL, ['mode == debug', SKIP]],
+  # BUG(v8:3716): Flaky failure.
+  'ecma/Date/15.9.5.26-1': [PASS, FAIL],
 }],  # 'arch ==  arm64'
 
 
   'js1_5/GC/regress-203278-2': [PASS, TIMEOUT, NO_VARIANTS],
 }],  # 'arch == mipsel or arch == mips64el'
 
+['arch == mips64el and simulator_run == True', {
+  'js1_5/extensions/regress-355497': [FAIL_OK, 'Flags: --sim-stack-size=512'],
+}],
+
 ['arch == mips', {
 
   # BUG(3251229): Times out when running new crankshaft test script.
   'js1_5/extensions/regress-330569': [SKIP],
   'js1_5/extensions/regress-351448': [SKIP],
   'js1_5/extensions/regress-336410-1': [SKIP],
+
+  #BUG(3152): Avoid C stack overflow.
+  'js1_5/extensions/regress-355497': [FAIL_OK, 'Flags: --sim-stack-size=512'],
 }],  # 'arch == arm64 and simulator_run == True'
 ]
index 2b9230c..97b9081 100644 (file)
@@ -24,6 +24,8 @@
 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+//
+// Flags: --noharmony-scoping
 
 "use strict";
 const x = 42;
index 08c4288..cc3d7bb 100644 (file)
 
 # A template that performs the same strict-mode test in different
 # scopes (global scope, function scope, and nested function scope).
-def StrictTest(name, source):
-  Test(name, '"use strict";\n' + source, "strict_function")
+def StrictTest(name, source, legacy):
+  if legacy:
+    extra_flags = [
+      "--noharmony-scoping",
+      "--noharmony-classes",
+      "--noharmony-object-literals"]
+  else:
+    extra_flags = []
+  Test(name, '"use strict";\n' + source, "strict_function",
+       extra_flags)
   Test(name + '-infunc',
        'function foo() {\n "use strict";\n' + source +'\n}\n',
-       "strict_function")
+       "strict_function", 
+       extra_flags)
   Test(name + '-infunc2',
        'function foo() {\n "use strict";\n  function bar() {\n' +
        source +'\n }\n}\n',
-       "strict_function")
+       "strict_function",
+       extra_flags)
 
 # Not testing with-scope, since with is not allowed in strict mode at all.
 
 StrictTest("block", """
   { function foo() { } }
-""")
+""", True)
 
 StrictTest("try-w-catch", """
   try { function foo() { } } catch (e) { }
-""")
+""", True)
 
 StrictTest("try-w-finally", """
   try { function foo() { } } finally { }
-""")
+""", True)
 
 StrictTest("catch", """
   try { } catch (e) { function foo() { } }
-""")
+""", True)
 
 StrictTest("finally", """
   try { } finally { function foo() { } }
-""")
+""", True)
 
 StrictTest("for", """
   for (;;) { function foo() { } }
-""")
+""", True)
 
 StrictTest("while", """
   while (true) { function foo() { } }
-""")
+""", True)
 
 StrictTest("do", """
   do { function foo() { } } while (true);
-""")
+""", True)
 
 StrictTest("then", """
   if (true) { function foo() { } }
-""")
+""", True)
 
 
 StrictTest("then-w-else", """
   if (true) { function foo() { } } else { }
-""")
+""", True)
 
 
 StrictTest("else", """
   if (true) { } else { function foo() { } }
-""")
+""", True)
 
 StrictTest("switch-case", """
   switch (true) { case true: function foo() { } }
-""")
+""", False)
 
 StrictTest("labeled", """
   label: function foo() { }
-""")
+""", False)
 
 
 
index 850c0a4..ddd311c 100644 (file)
@@ -34,6 +34,10 @@ from testrunner.local import utils
 from testrunner.objects import testcase
 
 
+FLAGS_PATTERN = re.compile(r"//\s+Flags:(.*)")
+INVALID_FLAGS = ["--enable-slow-asserts"]
+
+
 class PreparserTestSuite(testsuite.TestSuite):
   def __init__(self, name, root):
     super(PreparserTestSuite, self).__init__(name, root)
@@ -59,12 +63,13 @@ class PreparserTestSuite(testsuite.TestSuite):
 
   def _ParsePythonTestTemplates(self, result, filename):
     pathname = os.path.join(self.root, filename + ".pyt")
-    def Test(name, source, expectation):
+    def Test(name, source, expectation, extra_flags=[]):
       source = source.replace("\n", " ")
       testname = os.path.join(filename, name)
       flags = ["-e", source]
       if expectation:
         flags += ["--throws"]
+      flags += extra_flags
       test = testcase.TestCase(self, testname, flags=flags)
       result.append(test)
     def Template(name, source):
@@ -104,6 +109,15 @@ class PreparserTestSuite(testsuite.TestSuite):
     first = testcase.flags[0]
     if first != "-e":
       testcase.flags[0] = os.path.join(self.root, first)
+      source = self.GetSourceForTest(testcase)
+      result = []
+      flags_match = re.findall(FLAGS_PATTERN, source)
+      for match in flags_match:
+        result += match.strip().split()
+      result += context.mode_flags
+      result = [x for x in result if x not in INVALID_FLAGS]
+      result.append(os.path.join(self.root, testcase.path + ".js"))
+      return testcase.flags + result
     return testcase.flags
 
   def GetSourceForTest(self, testcase):
index 3791dfd..8662159 100644 (file)
   '15.2.3.14-1-2': [PASS, FAIL_OK],
   '15.2.3.14-1-3': [PASS, FAIL_OK],
 
+  # String.prototype.contains renamed to 'S.p.includes'
+  'String.prototype.contains_FailBadLocation' : [FAIL_OK],
+  'String.prototype.contains_FailLocation' : [FAIL_OK],
+  'String.prototype.contains_FailMissingLetter' : [FAIL_OK],
+  'String.prototype.contains_lengthProp' : [FAIL_OK],
+  'String.prototype.contains_Success' : [FAIL_OK],
+  'String.prototype.contains_SuccessNoLocation' : [FAIL_OK],
+
+
   ############################ SKIPPED TESTS #############################
 
   # These tests take a looong time to run in debug mode.
index 87fec5a..d32f8f3 100644 (file)
   '11.2.3_b': [FAIL],
   '12.2.3_b': [FAIL],
 
+  ############################### ES6 ###################################
+  # ES6 allows block-local functions.
+  'Sbp_A1_T1': [PASS, FAIL_OK],
+  'Sbp_A2_T1': [PASS, FAIL_OK],
+  'Sbp_A2_T2': [PASS, FAIL_OK],
+  'Sbp_A3_T1': [PASS, FAIL_OK],
+  'Sbp_A3_T2': [PASS, FAIL_OK],
+  'Sbp_A4_T1': [PASS, FAIL_OK],
+  'Sbp_A4_T2': [PASS, FAIL_OK],
+  'Sbp_A5_T1': [PASS], # Test is broken (strict reference to unbound variable)
+  'Sbp_A5_T2': [PASS, FAIL_OK],
+
+  # Passes in ES6 since {__arr} syntax is parsed as object literal.
+  'S12.1_A4_T2': [PASS, FAIL_OK],
+  'S12.6.4_A15': [PASS, FAIL_OK],
+
   ######################## NEEDS INVESTIGATION ###########################
 
   # These test failures are specific to the intl402 suite and need investigation
diff --git a/deps/v8/test/unittests/base/iterator-unittest.cc b/deps/v8/test/unittests/base/iterator-unittest.cc
new file mode 100644 (file)
index 0000000..8da26ce
--- /dev/null
@@ -0,0 +1,61 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/iterator.h"
+
+#include <deque>
+
+#include "test/unittests/test-utils.h"
+
+namespace v8 {
+namespace base {
+
+TEST(IteratorTest, IteratorRangeEmpty) {
+  base::iterator_range<char*> r;
+  EXPECT_EQ(r.begin(), r.end());
+  EXPECT_EQ(r.end(), r.cend());
+  EXPECT_EQ(r.begin(), r.cbegin());
+  EXPECT_TRUE(r.empty());
+  EXPECT_EQ(0, r.size());
+}
+
+
+TEST(IteratorTest, IteratorRangeArray) {
+  int array[10] = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9};
+  base::iterator_range<int*> r1(&array[0], &array[10]);
+  for (auto i : r1) {
+    EXPECT_EQ(array[i], i);
+  }
+  EXPECT_EQ(10, r1.size());
+  EXPECT_FALSE(r1.empty());
+  for (size_t i = 0; i < arraysize(array); ++i) {
+    EXPECT_EQ(r1[i], array[i]);
+  }
+  base::iterator_range<int*> r2(&array[0], &array[0]);
+  EXPECT_EQ(0, r2.size());
+  EXPECT_TRUE(r2.empty());
+  for (auto i : array) {
+    EXPECT_EQ(r2.end(), std::find(r2.begin(), r2.end(), i));
+  }
+}
+
+
+TEST(IteratorTest, IteratorRangeDeque) {
+  typedef std::deque<unsigned> C;
+  C c;
+  c.push_back(1);
+  c.push_back(2);
+  c.push_back(2);
+  base::iterator_range<typename C::iterator> r(c.begin(), c.end());
+  EXPECT_EQ(3, r.size());
+  EXPECT_FALSE(r.empty());
+  EXPECT_TRUE(c.begin() == r.begin());
+  EXPECT_TRUE(c.end() == r.end());
+  EXPECT_EQ(0, std::count(r.begin(), r.end(), 0));
+  EXPECT_EQ(1, std::count(r.begin(), r.end(), 1));
+  EXPECT_EQ(2, std::count(r.begin(), r.end(), 2));
+}
+
+}  // namespace base
+}  // namespace v8
index 06fbee0..b17a9b9 100644 (file)
 #endif
 #include "testing/gtest/include/gtest/gtest.h"
 
+#if V8_OS_ANDROID
+#define DISABLE_ON_ANDROID(Name) DISABLED_##Name
+#else
+#define DISABLE_ON_ANDROID(Name) Name
+#endif
+
 namespace v8 {
 namespace base {
 
@@ -33,13 +39,13 @@ namespace {
 class SelfJoinThread FINAL : public Thread {
  public:
   SelfJoinThread() : Thread(Options("SelfJoinThread")) {}
-  virtual void Run() OVERRIDE { Join(); }
+  void Run() FINAL { Join(); }
 };
 
 }  // namespace
 
 
-TEST(Thread, SelfJoin) {
+TEST(Thread, DISABLE_ON_ANDROID(SelfJoin)) {
   SelfJoinThread thread;
   thread.Start();
   thread.Join();
@@ -61,7 +67,7 @@ class ThreadLocalStorageTest : public Thread, public ::testing::Test {
     }
   }
 
-  virtual void Run() FINAL OVERRIDE {
+  void Run() FINAL {
     for (size_t i = 0; i < arraysize(keys_); i++) {
       CHECK(!Thread::HasThreadLocal(keys_[i]));
     }
@@ -91,10 +97,12 @@ class ThreadLocalStorageTest : public Thread, public ::testing::Test {
 
  private:
   static void* GetValue(size_t x) {
-    return reinterpret_cast<void*>(static_cast<uintptr_t>(x + 1));
+    return bit_cast<void*>(static_cast<uintptr_t>(x + 1));
   }
 
-  Thread::LocalStorageKey keys_[256];
+  // Older versions of Android have fewer TLS slots (nominally 64, but the
+  // system uses "about 5 of them" itself).
+  Thread::LocalStorageKey keys_[32];
 };
 
 }  // namespace
index 6e4306d..fbdf87a 100644 (file)
@@ -1482,6 +1482,21 @@ INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
 // Miscellaneous.
 
 
+TEST_F(InstructionSelectorTest, Float64SubWithMinusZero) {
+  StreamBuilder m(this, kMachFloat64, kMachFloat64);
+  Node* const p0 = m.Parameter(0);
+  Node* const n = m.Float64Sub(m.Float64Constant(-0.0), p0);
+  m.Return(n);
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kArmVnegF64, s[0]->arch_opcode());
+  ASSERT_EQ(1U, s[0]->InputCount());
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+  ASSERT_EQ(1U, s[0]->OutputCount());
+  EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+}
+
+
 TEST_F(InstructionSelectorTest, Int32AddWithInt32Mul) {
   {
     StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
@@ -1558,6 +1573,150 @@ TEST_F(InstructionSelectorTest, Int32AddWithInt32MulHigh) {
 }
 
 
+TEST_F(InstructionSelectorTest, Int32AddWithWord32And) {
+  {
+    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+    Node* const p0 = m.Parameter(0);
+    Node* const p1 = m.Parameter(1);
+    Node* const r = m.Int32Add(m.Word32And(p0, m.Int32Constant(0xff)), p1);
+    m.Return(r);
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArmUxtab, s[0]->arch_opcode());
+    ASSERT_EQ(3U, s[0]->InputCount());
+    EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(0)));
+    EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1)));
+    EXPECT_EQ(0, s.ToInt32(s[0]->InputAt(2)));
+    ASSERT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
+  }
+  {
+    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+    Node* const p0 = m.Parameter(0);
+    Node* const p1 = m.Parameter(1);
+    Node* const r = m.Int32Add(p1, m.Word32And(p0, m.Int32Constant(0xff)));
+    m.Return(r);
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArmUxtab, s[0]->arch_opcode());
+    ASSERT_EQ(3U, s[0]->InputCount());
+    EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(0)));
+    EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1)));
+    EXPECT_EQ(0, s.ToInt32(s[0]->InputAt(2)));
+    ASSERT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
+  }
+  {
+    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+    Node* const p0 = m.Parameter(0);
+    Node* const p1 = m.Parameter(1);
+    Node* const r = m.Int32Add(m.Word32And(p0, m.Int32Constant(0xffff)), p1);
+    m.Return(r);
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArmUxtah, s[0]->arch_opcode());
+    ASSERT_EQ(3U, s[0]->InputCount());
+    EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(0)));
+    EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1)));
+    EXPECT_EQ(0, s.ToInt32(s[0]->InputAt(2)));
+    ASSERT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
+  }
+  {
+    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+    Node* const p0 = m.Parameter(0);
+    Node* const p1 = m.Parameter(1);
+    Node* const r = m.Int32Add(p1, m.Word32And(p0, m.Int32Constant(0xffff)));
+    m.Return(r);
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArmUxtah, s[0]->arch_opcode());
+    ASSERT_EQ(3U, s[0]->InputCount());
+    EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(0)));
+    EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1)));
+    EXPECT_EQ(0, s.ToInt32(s[0]->InputAt(2)));
+    ASSERT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
+  }
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddWithWord32SarWithWord32Shl) {
+  {
+    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+    Node* const p0 = m.Parameter(0);
+    Node* const p1 = m.Parameter(1);
+    Node* const r = m.Int32Add(
+        m.Word32Sar(m.Word32Shl(p0, m.Int32Constant(24)), m.Int32Constant(24)),
+        p1);
+    m.Return(r);
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArmSxtab, s[0]->arch_opcode());
+    ASSERT_EQ(3U, s[0]->InputCount());
+    EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(0)));
+    EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1)));
+    EXPECT_EQ(0, s.ToInt32(s[0]->InputAt(2)));
+    ASSERT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
+  }
+  {
+    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+    Node* const p0 = m.Parameter(0);
+    Node* const p1 = m.Parameter(1);
+    Node* const r = m.Int32Add(
+        p1,
+        m.Word32Sar(m.Word32Shl(p0, m.Int32Constant(24)), m.Int32Constant(24)));
+    m.Return(r);
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArmSxtab, s[0]->arch_opcode());
+    ASSERT_EQ(3U, s[0]->InputCount());
+    EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(0)));
+    EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1)));
+    EXPECT_EQ(0, s.ToInt32(s[0]->InputAt(2)));
+    ASSERT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
+  }
+  {
+    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+    Node* const p0 = m.Parameter(0);
+    Node* const p1 = m.Parameter(1);
+    Node* const r = m.Int32Add(
+        m.Word32Sar(m.Word32Shl(p0, m.Int32Constant(16)), m.Int32Constant(16)),
+        p1);
+    m.Return(r);
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArmSxtah, s[0]->arch_opcode());
+    ASSERT_EQ(3U, s[0]->InputCount());
+    EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(0)));
+    EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1)));
+    EXPECT_EQ(0, s.ToInt32(s[0]->InputAt(2)));
+    ASSERT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
+  }
+  {
+    StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+    Node* const p0 = m.Parameter(0);
+    Node* const p1 = m.Parameter(1);
+    Node* const r = m.Int32Add(
+        p1,
+        m.Word32Sar(m.Word32Shl(p0, m.Int32Constant(16)), m.Int32Constant(16)));
+    m.Return(r);
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArmSxtah, s[0]->arch_opcode());
+    ASSERT_EQ(3U, s[0]->InputCount());
+    EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(0)));
+    EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1)));
+    EXPECT_EQ(0, s.ToInt32(s[0]->InputAt(2)));
+    ASSERT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
+  }
+}
+
+
 TEST_F(InstructionSelectorTest, Int32SubWithInt32Mul) {
   StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
   m.Return(
@@ -1943,6 +2102,72 @@ TEST_F(InstructionSelectorTest, Word32AndWithBfcImmediateForARMv7) {
 }
 
 
+TEST_F(InstructionSelectorTest, Word32AndWith0xffff) {
+  {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    Node* const p0 = m.Parameter(0);
+    Node* const r = m.Word32And(p0, m.Int32Constant(0xffff));
+    m.Return(r);
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArmUxth, s[0]->arch_opcode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+    EXPECT_EQ(0, s.ToInt32(s[0]->InputAt(1)));
+    ASSERT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
+  }
+  {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    Node* const p0 = m.Parameter(0);
+    Node* const r = m.Word32And(m.Int32Constant(0xffff), p0);
+    m.Return(r);
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArmUxth, s[0]->arch_opcode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+    EXPECT_EQ(0, s.ToInt32(s[0]->InputAt(1)));
+    ASSERT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
+  }
+}
+
+
+TEST_F(InstructionSelectorTest, Word32SarWithWord32Shl) {
+  {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    Node* const p0 = m.Parameter(0);
+    Node* const r =
+        m.Word32Sar(m.Word32Shl(p0, m.Int32Constant(24)), m.Int32Constant(24));
+    m.Return(r);
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArmSxtb, s[0]->arch_opcode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+    EXPECT_EQ(0, s.ToInt32(s[0]->InputAt(1)));
+    ASSERT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
+  }
+  {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    Node* const p0 = m.Parameter(0);
+    Node* const r =
+        m.Word32Sar(m.Word32Shl(p0, m.Int32Constant(16)), m.Int32Constant(16));
+    m.Return(r);
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArmSxth, s[0]->arch_opcode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+    EXPECT_EQ(0, s.ToInt32(s[0]->InputAt(1)));
+    ASSERT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
+  }
+}
+
+
 TEST_F(InstructionSelectorTest, Word32ShrWithWord32AndWithImmediateForARMv7) {
   TRACED_FORRANGE(int32_t, lsb, 0, 31) {
     TRACED_FORRANGE(int32_t, width, 1, 32 - lsb) {
index 96b8a83..cd3ce09 100644 (file)
@@ -808,7 +808,8 @@ TEST_F(InstructionSelectorTest, Word32AndBranchWithOneBitMaskOnRight) {
     m.Return(m.Int32Constant(0));
     Stream s = m.Build();
     ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(kArm64Tbnz32, s[0]->arch_opcode());
+    EXPECT_EQ(kArm64TestAndBranch32, s[0]->arch_opcode());
+    EXPECT_EQ(kNotEqual, s[0]->flags_condition());
     EXPECT_EQ(4U, s[0]->InputCount());
     EXPECT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
     EXPECT_EQ(bit, s.ToInt32(s[0]->InputAt(1)));
@@ -827,7 +828,8 @@ TEST_F(InstructionSelectorTest, Word32AndBranchWithOneBitMaskOnRight) {
     m.Return(m.Int32Constant(0));
     Stream s = m.Build();
     ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(kArm64Tbz32, s[0]->arch_opcode());
+    EXPECT_EQ(kArm64TestAndBranch32, s[0]->arch_opcode());
+    EXPECT_EQ(kEqual, s[0]->flags_condition());
     EXPECT_EQ(4U, s[0]->InputCount());
     EXPECT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
     EXPECT_EQ(bit, s.ToInt32(s[0]->InputAt(1)));
@@ -847,7 +849,8 @@ TEST_F(InstructionSelectorTest, Word32AndBranchWithOneBitMaskOnLeft) {
     m.Return(m.Int32Constant(0));
     Stream s = m.Build();
     ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(kArm64Tbnz32, s[0]->arch_opcode());
+    EXPECT_EQ(kArm64TestAndBranch32, s[0]->arch_opcode());
+    EXPECT_EQ(kNotEqual, s[0]->flags_condition());
     EXPECT_EQ(4U, s[0]->InputCount());
     EXPECT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
     EXPECT_EQ(bit, s.ToInt32(s[0]->InputAt(1)));
@@ -866,7 +869,8 @@ TEST_F(InstructionSelectorTest, Word32AndBranchWithOneBitMaskOnLeft) {
     m.Return(m.Int32Constant(0));
     Stream s = m.Build();
     ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(kArm64Tbz32, s[0]->arch_opcode());
+    EXPECT_EQ(kArm64TestAndBranch32, s[0]->arch_opcode());
+    EXPECT_EQ(kEqual, s[0]->flags_condition());
     EXPECT_EQ(4U, s[0]->InputCount());
     EXPECT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
     EXPECT_EQ(bit, s.ToInt32(s[0]->InputAt(1)));
@@ -886,7 +890,8 @@ TEST_F(InstructionSelectorTest, Word64AndBranchWithOneBitMaskOnRight) {
     m.Return(m.Int32Constant(0));
     Stream s = m.Build();
     ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(kArm64Tbnz, s[0]->arch_opcode());
+    EXPECT_EQ(kArm64TestAndBranch, s[0]->arch_opcode());
+    EXPECT_EQ(kNotEqual, s[0]->flags_condition());
     EXPECT_EQ(4U, s[0]->InputCount());
     EXPECT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
     EXPECT_EQ(bit, s.ToInt64(s[0]->InputAt(1)));
@@ -905,7 +910,8 @@ TEST_F(InstructionSelectorTest, Word64AndBranchWithOneBitMaskOnRight) {
     m.Return(m.Int32Constant(0));
     Stream s = m.Build();
     ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(kArm64Tbz, s[0]->arch_opcode());
+    EXPECT_EQ(kArm64TestAndBranch, s[0]->arch_opcode());
+    EXPECT_EQ(kEqual, s[0]->flags_condition());
     EXPECT_EQ(4U, s[0]->InputCount());
     EXPECT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
     EXPECT_EQ(bit, s.ToInt64(s[0]->InputAt(1)));
@@ -925,7 +931,8 @@ TEST_F(InstructionSelectorTest, Word64AndBranchWithOneBitMaskOnLeft) {
     m.Return(m.Int32Constant(0));
     Stream s = m.Build();
     ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(kArm64Tbnz, s[0]->arch_opcode());
+    EXPECT_EQ(kArm64TestAndBranch, s[0]->arch_opcode());
+    EXPECT_EQ(kNotEqual, s[0]->flags_condition());
     EXPECT_EQ(4U, s[0]->InputCount());
     EXPECT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
     EXPECT_EQ(bit, s.ToInt64(s[0]->InputAt(1)));
@@ -944,7 +951,8 @@ TEST_F(InstructionSelectorTest, Word64AndBranchWithOneBitMaskOnLeft) {
     m.Return(m.Int32Constant(0));
     Stream s = m.Build();
     ASSERT_EQ(1U, s.size());
-    EXPECT_EQ(kArm64Tbz, s[0]->arch_opcode());
+    EXPECT_EQ(kArm64TestAndBranch, s[0]->arch_opcode());
+    EXPECT_EQ(kEqual, s[0]->flags_condition());
     EXPECT_EQ(4U, s[0]->InputCount());
     EXPECT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
     EXPECT_EQ(bit, s.ToInt64(s[0]->InputAt(1)));
@@ -952,6 +960,43 @@ TEST_F(InstructionSelectorTest, Word64AndBranchWithOneBitMaskOnLeft) {
 }
 
 
+TEST_F(InstructionSelectorTest, CompareAgainstZeroAndBranch) {
+  {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    MLabel a, b;
+    Node* p0 = m.Parameter(0);
+    m.Branch(p0, &a, &b);
+    m.Bind(&a);
+    m.Return(m.Int32Constant(1));
+    m.Bind(&b);
+    m.Return(m.Int32Constant(0));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArm64CompareAndBranch32, s[0]->arch_opcode());
+    EXPECT_EQ(kNotEqual, s[0]->flags_condition());
+    EXPECT_EQ(3U, s[0]->InputCount());
+    EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+  }
+
+  {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    MLabel a, b;
+    Node* p0 = m.Parameter(0);
+    m.Branch(m.Word32BinaryNot(p0), &a, &b);
+    m.Bind(&a);
+    m.Return(m.Int32Constant(1));
+    m.Bind(&b);
+    m.Return(m.Int32Constant(0));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArm64CompareAndBranch32, s[0]->arch_opcode());
+    EXPECT_EQ(kEqual, s[0]->flags_condition());
+    EXPECT_EQ(3U, s[0]->InputCount());
+    EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+  }
+}
+
+
 // -----------------------------------------------------------------------------
 // Add and subtract instructions with overflow.
 
@@ -1229,6 +1274,76 @@ INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorShiftTest,
                         ::testing::ValuesIn(kShiftInstructions));
 
 
+TEST_F(InstructionSelectorTest, Word64ShlWithChangeInt32ToInt64) {
+  TRACED_FORRANGE(int64_t, x, 32, 63) {
+    StreamBuilder m(this, kMachInt64, kMachInt32);
+    Node* const p0 = m.Parameter(0);
+    Node* const n = m.Word64Shl(m.ChangeInt32ToInt64(p0), m.Int64Constant(x));
+    m.Return(n);
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArm64Lsl, s[0]->arch_opcode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+    EXPECT_EQ(x, s.ToInt64(s[0]->InputAt(1)));
+    ASSERT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+  }
+}
+
+
+TEST_F(InstructionSelectorTest, Word64ShlWithChangeUint32ToUint64) {
+  TRACED_FORRANGE(int64_t, x, 32, 63) {
+    StreamBuilder m(this, kMachInt64, kMachUint32);
+    Node* const p0 = m.Parameter(0);
+    Node* const n = m.Word64Shl(m.ChangeUint32ToUint64(p0), m.Int64Constant(x));
+    m.Return(n);
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArm64Lsl, s[0]->arch_opcode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+    EXPECT_EQ(x, s.ToInt64(s[0]->InputAt(1)));
+    ASSERT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[0]->Output()));
+  }
+}
+
+
+TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithWord64Sar) {
+  StreamBuilder m(this, kMachInt32, kMachInt64);
+  Node* const p = m.Parameter(0);
+  Node* const t = m.TruncateInt64ToInt32(m.Word64Sar(p, m.Int64Constant(32)));
+  m.Return(t);
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kArm64Lsr, s[0]->arch_opcode());
+  ASSERT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(s.ToVreg(p), s.ToVreg(s[0]->InputAt(0)));
+  EXPECT_EQ(32, s.ToInt64(s[0]->InputAt(1)));
+  ASSERT_EQ(1U, s[0]->OutputCount());
+  EXPECT_EQ(s.ToVreg(t), s.ToVreg(s[0]->OutputAt(0)));
+}
+
+
+TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithWord64Shr) {
+  TRACED_FORRANGE(int64_t, x, 32, 63) {
+    StreamBuilder m(this, kMachInt32, kMachInt64);
+    Node* const p = m.Parameter(0);
+    Node* const t = m.TruncateInt64ToInt32(m.Word64Shr(p, m.Int64Constant(x)));
+    m.Return(t);
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArm64Lsr, s[0]->arch_opcode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(s.ToVreg(p), s.ToVreg(s[0]->InputAt(0)));
+    EXPECT_EQ(x, s.ToInt64(s[0]->InputAt(1)));
+    ASSERT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(s.ToVreg(t), s.ToVreg(s[0]->OutputAt(0)));
+  }
+}
+
+
 // -----------------------------------------------------------------------------
 // Mul and Div instructions.
 
@@ -2024,6 +2139,38 @@ TEST_F(InstructionSelectorTest, Int32MulHighWithParameters) {
   EXPECT_EQ(s.ToVreg(n), s.ToVreg(s[1]->Output()));
 }
 
+
+TEST_F(InstructionSelectorTest, Word32SarWithWord32Shl) {
+  {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    Node* const p0 = m.Parameter(0);
+    Node* const r =
+        m.Word32Sar(m.Word32Shl(p0, m.Int32Constant(24)), m.Int32Constant(24));
+    m.Return(r);
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArm64Sxtb32, s[0]->arch_opcode());
+    ASSERT_EQ(1U, s[0]->InputCount());
+    EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+    ASSERT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
+  }
+  {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    Node* const p0 = m.Parameter(0);
+    Node* const r =
+        m.Word32Sar(m.Word32Shl(p0, m.Int32Constant(16)), m.Int32Constant(16));
+    m.Return(r);
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kArm64Sxth32, s[0]->arch_opcode());
+    ASSERT_EQ(1U, s[0]->InputCount());
+    EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+    ASSERT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(s.ToVreg(r), s.ToVreg(s[0]->Output()));
+  }
+}
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
index 17d6513..763a443 100644 (file)
@@ -5,6 +5,7 @@
 #include "src/code-stubs.h"
 #include "src/compiler/change-lowering.h"
 #include "src/compiler/js-graph.h"
+#include "src/compiler/linkage.h"
 #include "src/compiler/node-properties-inl.h"
 #include "src/compiler/simplified-operator.h"
 #include "test/unittests/compiler/compiler-test-utils.h"
@@ -24,7 +25,7 @@ namespace compiler {
 class ChangeLoweringTest : public GraphTest {
  public:
   ChangeLoweringTest() : simplified_(zone()) {}
-  virtual ~ChangeLoweringTest() {}
+  ~ChangeLoweringTest() OVERRIDE {}
 
   virtual MachineType WordRepresentation() const = 0;
 
@@ -61,12 +62,8 @@ class ChangeLoweringTest : public GraphTest {
                   : SmiTagging<8>::SmiValueSize();
   }
 
-  Node* Parameter(int32_t index = 0) {
-    return graph()->NewNode(common()->Parameter(index), graph()->start());
-  }
-
   Reduction Reduce(Node* node) {
-    MachineOperatorBuilder machine(WordRepresentation());
+    MachineOperatorBuilder machine(zone(), WordRepresentation());
     JSOperatorBuilder javascript(zone());
     JSGraph jsgraph(graph(), common(), &javascript, &machine);
     CompilationInfo info(isolate(), zone());
@@ -111,11 +108,9 @@ class ChangeLoweringCommonTest
     : public ChangeLoweringTest,
       public ::testing::WithParamInterface<MachineType> {
  public:
-  virtual ~ChangeLoweringCommonTest() {}
+  ~ChangeLoweringCommonTest() OVERRIDE {}
 
-  virtual MachineType WordRepresentation() const FINAL OVERRIDE {
-    return GetParam();
-  }
+  MachineType WordRepresentation() const FINAL { return GetParam(); }
 };
 
 
@@ -178,16 +173,15 @@ INSTANTIATE_TEST_CASE_P(ChangeLoweringTest, ChangeLoweringCommonTest,
 
 class ChangeLowering32Test : public ChangeLoweringTest {
  public:
-  virtual ~ChangeLowering32Test() {}
-  virtual MachineType WordRepresentation() const FINAL OVERRIDE {
-    return kRepWord32;
-  }
+  ~ChangeLowering32Test() OVERRIDE {}
+  MachineType WordRepresentation() const FINAL { return kRepWord32; }
 };
 
 
 TARGET_TEST_F(ChangeLowering32Test, ChangeInt32ToTagged) {
   Node* val = Parameter(0);
   Node* node = graph()->NewNode(simplified()->ChangeInt32ToTagged(), val);
+  NodeProperties::SetBounds(val, Bounds(Type::None(), Type::Signed32()));
   Reduction reduction = Reduce(node);
   ASSERT_TRUE(reduction.Changed());
 
@@ -212,6 +206,19 @@ TARGET_TEST_F(ChangeLowering32Test, ChangeInt32ToTagged) {
 }
 
 
+TARGET_TEST_F(ChangeLowering32Test, ChangeInt32ToTaggedSmall) {
+  Node* val = Parameter(0);
+  Node* node = graph()->NewNode(simplified()->ChangeInt32ToTagged(), val);
+  NodeProperties::SetBounds(val, Bounds(Type::None(), Type::SignedSmall()));
+  Reduction reduction = Reduce(node);
+  ASSERT_TRUE(reduction.Changed());
+
+  Node* change = reduction.replacement();
+  Capture<Node*> add, branch, heap_number, if_true;
+  EXPECT_THAT(change, IsWord32Shl(val, IsInt32Constant(SmiShiftAmount())));
+}
+
+
 TARGET_TEST_F(ChangeLowering32Test, ChangeTaggedToFloat64) {
   STATIC_ASSERT(kSmiTag == 0);
   STATIC_ASSERT(kSmiTagSize == 1);
@@ -324,10 +331,8 @@ TARGET_TEST_F(ChangeLowering32Test, ChangeUint32ToTagged) {
 
 class ChangeLowering64Test : public ChangeLoweringTest {
  public:
-  virtual ~ChangeLowering64Test() {}
-  virtual MachineType WordRepresentation() const FINAL OVERRIDE {
-    return kRepWord64;
-  }
+  ~ChangeLowering64Test() OVERRIDE {}
+  MachineType WordRepresentation() const FINAL { return kRepWord64; }
 };
 
 
diff --git a/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc b/deps/v8/test/unittests/compiler/common-operator-reducer-unittest.cc
new file mode 100644 (file)
index 0000000..c713815
--- /dev/null
@@ -0,0 +1,110 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/common-operator-reducer.h"
+#include "src/compiler/machine-type.h"
+#include "test/unittests/compiler/graph-unittest.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class CommonOperatorReducerTest : public GraphTest {
+ public:
+  explicit CommonOperatorReducerTest(int num_parameters = 1)
+      : GraphTest(num_parameters) {}
+  ~CommonOperatorReducerTest() OVERRIDE {}
+
+ protected:
+  Reduction Reduce(Node* node) {
+    CommonOperatorReducer reducer;
+    return reducer.Reduce(node);
+  }
+};
+
+
+namespace {
+
+const BranchHint kBranchHints[] = {BranchHint::kNone, BranchHint::kFalse,
+                                   BranchHint::kTrue};
+
+
+const MachineType kMachineTypes[] = {
+    kMachFloat32, kMachFloat64,   kMachInt8,   kMachUint8,  kMachInt16,
+    kMachUint16,  kMachInt32,     kMachUint32, kMachInt64,  kMachUint64,
+    kMachPtr,     kMachAnyTagged, kRepBit,     kRepWord8,   kRepWord16,
+    kRepWord32,   kRepWord64,     kRepFloat32, kRepFloat64, kRepTagged};
+
+
+const Operator kOp0(0, Operator::kNoProperties, "Op0", 0, 0, 0, 1, 1, 0);
+
+}  // namespace
+
+
+// -----------------------------------------------------------------------------
+// EffectPhi
+
+
+TEST_F(CommonOperatorReducerTest, RedundantEffectPhi) {
+  const int kMaxInputs = 64;
+  Node* inputs[kMaxInputs];
+  Node* const input = graph()->NewNode(&kOp0);
+  TRACED_FORRANGE(int, input_count, 2, kMaxInputs - 1) {
+    int const value_input_count = input_count - 1;
+    for (int i = 0; i < value_input_count; ++i) {
+      inputs[i] = input;
+    }
+    inputs[value_input_count] = graph()->start();
+    Reduction r = Reduce(graph()->NewNode(
+        common()->EffectPhi(value_input_count), input_count, inputs));
+    ASSERT_TRUE(r.Changed());
+    EXPECT_EQ(input, r.replacement());
+  }
+}
+
+
+// -----------------------------------------------------------------------------
+// Phi
+
+
+TEST_F(CommonOperatorReducerTest, RedundantPhi) {
+  const int kMaxInputs = 64;
+  Node* inputs[kMaxInputs];
+  Node* const input = graph()->NewNode(&kOp0);
+  TRACED_FORRANGE(int, input_count, 2, kMaxInputs - 1) {
+    int const value_input_count = input_count - 1;
+    TRACED_FOREACH(MachineType, type, kMachineTypes) {
+      for (int i = 0; i < value_input_count; ++i) {
+        inputs[i] = input;
+      }
+      inputs[value_input_count] = graph()->start();
+      Reduction r = Reduce(graph()->NewNode(
+          common()->Phi(type, value_input_count), input_count, inputs));
+      ASSERT_TRUE(r.Changed());
+      EXPECT_EQ(input, r.replacement());
+    }
+  }
+}
+
+
+// -----------------------------------------------------------------------------
+// Select
+
+
+TEST_F(CommonOperatorReducerTest, RedundantSelect) {
+  Node* const input = graph()->NewNode(&kOp0);
+  TRACED_FOREACH(BranchHint, hint, kBranchHints) {
+    TRACED_FOREACH(MachineType, type, kMachineTypes) {
+      Reduction r = Reduce(
+          graph()->NewNode(common()->Select(type, hint), input, input, input));
+      ASSERT_TRUE(r.Changed());
+      EXPECT_EQ(input, r.replacement());
+    }
+  }
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
index 2c88c4b..d0ac145 100644 (file)
@@ -2,11 +2,12 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/compiler/common-operator.h"
-
 #include <limits>
 
-#include "src/compiler/operator-properties-inl.h"
+#include "src/compiler/common-operator.h"
+#include "src/compiler/opcodes.h"
+#include "src/compiler/operator.h"
+#include "src/compiler/operator-properties.h"
 #include "test/unittests/test-utils.h"
 
 namespace v8 {
@@ -117,7 +118,7 @@ namespace {
 class CommonOperatorTest : public TestWithZone {
  public:
   CommonOperatorTest() : common_(zone()) {}
-  virtual ~CommonOperatorTest() {}
+  ~CommonOperatorTest() OVERRIDE {}
 
   CommonOperatorBuilder* common() { return &common_; }
 
diff --git a/deps/v8/test/unittests/compiler/control-equivalence-unittest.cc b/deps/v8/test/unittests/compiler/control-equivalence-unittest.cc
new file mode 100644 (file)
index 0000000..56b4a2b
--- /dev/null
@@ -0,0 +1,255 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/bit-vector.h"
+#include "src/compiler/control-equivalence.h"
+#include "src/compiler/graph-visualizer.h"
+#include "src/compiler/node-properties-inl.h"
+#include "src/zone-containers.h"
+#include "test/unittests/compiler/graph-unittest.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+#define ASSERT_EQUIVALENCE(...)                           \
+  do {                                                    \
+    Node* __n[] = {__VA_ARGS__};                          \
+    ASSERT_TRUE(IsEquivalenceClass(arraysize(__n), __n)); \
+  } while (false);
+
+class ControlEquivalenceTest : public GraphTest {
+ public:
+  ControlEquivalenceTest() : all_nodes_(zone()), classes_(zone()) {
+    Store(graph()->start());
+  }
+
+ protected:
+  void ComputeEquivalence(Node* node) {
+    graph()->SetEnd(graph()->NewNode(common()->End(), node));
+    if (FLAG_trace_turbo) {
+      OFStream os(stdout);
+      os << AsDOT(*graph());
+    }
+    ControlEquivalence equivalence(zone(), graph());
+    equivalence.Run(node);
+    classes_.resize(graph()->NodeCount());
+    for (Node* node : all_nodes_) {
+      classes_[node->id()] = equivalence.ClassOf(node);
+    }
+  }
+
+  bool IsEquivalenceClass(size_t length, Node** nodes) {
+    BitVector in_class(graph()->NodeCount(), zone());
+    size_t expected_class = classes_[nodes[0]->id()];
+    for (size_t i = 0; i < length; ++i) {
+      in_class.Add(nodes[i]->id());
+    }
+    for (Node* node : all_nodes_) {
+      if (in_class.Contains(node->id())) {
+        if (classes_[node->id()] != expected_class) return false;
+      } else {
+        if (classes_[node->id()] == expected_class) return false;
+      }
+    }
+    return true;
+  }
+
+  Node* Value() { return NumberConstant(0.0); }
+
+  Node* Branch(Node* control) {
+    return Store(graph()->NewNode(common()->Branch(), Value(), control));
+  }
+
+  Node* IfTrue(Node* control) {
+    return Store(graph()->NewNode(common()->IfTrue(), control));
+  }
+
+  Node* IfFalse(Node* control) {
+    return Store(graph()->NewNode(common()->IfFalse(), control));
+  }
+
+  Node* Merge2(Node* control1, Node* control2) {
+    return Store(graph()->NewNode(common()->Merge(2), control1, control2));
+  }
+
+  Node* Loop2(Node* control) {
+    return Store(graph()->NewNode(common()->Loop(2), control, control));
+  }
+
+  Node* End(Node* control) {
+    return Store(graph()->NewNode(common()->End(), control));
+  }
+
+ private:
+  Node* Store(Node* node) {
+    all_nodes_.push_back(node);
+    return node;
+  }
+
+  ZoneVector<Node*> all_nodes_;
+  ZoneVector<size_t> classes_;
+};
+
+
+// -----------------------------------------------------------------------------
+// Test cases.
+
+
+TEST_F(ControlEquivalenceTest, Empty1) {
+  Node* start = graph()->start();
+  ComputeEquivalence(start);
+
+  ASSERT_EQUIVALENCE(start);
+}
+
+
+TEST_F(ControlEquivalenceTest, Empty2) {
+  Node* start = graph()->start();
+  Node* end = End(start);
+  ComputeEquivalence(end);
+
+  ASSERT_EQUIVALENCE(start, end);
+}
+
+
+TEST_F(ControlEquivalenceTest, Diamond1) {
+  Node* start = graph()->start();
+  Node* b = Branch(start);
+  Node* t = IfTrue(b);
+  Node* f = IfFalse(b);
+  Node* m = Merge2(t, f);
+  ComputeEquivalence(m);
+
+  ASSERT_EQUIVALENCE(b, m, start);
+  ASSERT_EQUIVALENCE(f);
+  ASSERT_EQUIVALENCE(t);
+}
+
+
+TEST_F(ControlEquivalenceTest, Diamond2) {
+  Node* start = graph()->start();
+  Node* b1 = Branch(start);
+  Node* t1 = IfTrue(b1);
+  Node* f1 = IfFalse(b1);
+  Node* b2 = Branch(f1);
+  Node* t2 = IfTrue(b2);
+  Node* f2 = IfFalse(b2);
+  Node* m2 = Merge2(t2, f2);
+  Node* m1 = Merge2(t1, m2);
+  ComputeEquivalence(m1);
+
+  ASSERT_EQUIVALENCE(b1, m1, start);
+  ASSERT_EQUIVALENCE(t1);
+  ASSERT_EQUIVALENCE(f1, b2, m2);
+  ASSERT_EQUIVALENCE(t2);
+  ASSERT_EQUIVALENCE(f2);
+}
+
+
+TEST_F(ControlEquivalenceTest, Diamond3) {
+  Node* start = graph()->start();
+  Node* b1 = Branch(start);
+  Node* t1 = IfTrue(b1);
+  Node* f1 = IfFalse(b1);
+  Node* m1 = Merge2(t1, f1);
+  Node* b2 = Branch(m1);
+  Node* t2 = IfTrue(b2);
+  Node* f2 = IfFalse(b2);
+  Node* m2 = Merge2(t2, f2);
+  ComputeEquivalence(m2);
+
+  ASSERT_EQUIVALENCE(b1, m1, b2, m2, start);
+  ASSERT_EQUIVALENCE(t1);
+  ASSERT_EQUIVALENCE(f1);
+  ASSERT_EQUIVALENCE(t2);
+  ASSERT_EQUIVALENCE(f2);
+}
+
+
+TEST_F(ControlEquivalenceTest, Switch1) {
+  Node* start = graph()->start();
+  Node* b1 = Branch(start);
+  Node* t1 = IfTrue(b1);
+  Node* f1 = IfFalse(b1);
+  Node* b2 = Branch(f1);
+  Node* t2 = IfTrue(b2);
+  Node* f2 = IfFalse(b2);
+  Node* b3 = Branch(f2);
+  Node* t3 = IfTrue(b3);
+  Node* f3 = IfFalse(b3);
+  Node* m1 = Merge2(t1, t2);
+  Node* m2 = Merge2(m1, t3);
+  Node* m3 = Merge2(m2, f3);
+  ComputeEquivalence(m3);
+
+  ASSERT_EQUIVALENCE(b1, m3, start);
+  ASSERT_EQUIVALENCE(t1);
+  ASSERT_EQUIVALENCE(f1, b2);
+  ASSERT_EQUIVALENCE(t2);
+  ASSERT_EQUIVALENCE(f2, b3);
+  ASSERT_EQUIVALENCE(t3);
+  ASSERT_EQUIVALENCE(f3);
+  ASSERT_EQUIVALENCE(m1);
+  ASSERT_EQUIVALENCE(m2);
+}
+
+
+TEST_F(ControlEquivalenceTest, Loop1) {
+  Node* start = graph()->start();
+  Node* l = Loop2(start);
+  l->ReplaceInput(1, l);
+  ComputeEquivalence(l);
+
+  ASSERT_EQUIVALENCE(start);
+  ASSERT_EQUIVALENCE(l);
+}
+
+
+TEST_F(ControlEquivalenceTest, Loop2) {
+  Node* start = graph()->start();
+  Node* l = Loop2(start);
+  Node* b = Branch(l);
+  Node* t = IfTrue(b);
+  Node* f = IfFalse(b);
+  l->ReplaceInput(1, t);
+  ComputeEquivalence(f);
+
+  ASSERT_EQUIVALENCE(f, start);
+  ASSERT_EQUIVALENCE(t);
+  ASSERT_EQUIVALENCE(l, b);
+}
+
+
+TEST_F(ControlEquivalenceTest, Irreducible) {
+  Node* start = graph()->start();
+  Node* b1 = Branch(start);
+  Node* t1 = IfTrue(b1);
+  Node* f1 = IfFalse(b1);
+  Node* lp = Loop2(f1);
+  Node* m1 = Merge2(t1, lp);
+  Node* b2 = Branch(m1);
+  Node* t2 = IfTrue(b2);
+  Node* f2 = IfFalse(b2);
+  Node* m2 = Merge2(t2, f2);
+  Node* b3 = Branch(m2);
+  Node* t3 = IfTrue(b3);
+  Node* f3 = IfFalse(b3);
+  lp->ReplaceInput(1, f3);
+  ComputeEquivalence(t3);
+
+  ASSERT_EQUIVALENCE(b1, t3, start);
+  ASSERT_EQUIVALENCE(t1);
+  ASSERT_EQUIVALENCE(f1);
+  ASSERT_EQUIVALENCE(m1, b2, m2, b3);
+  ASSERT_EQUIVALENCE(t2);
+  ASSERT_EQUIVALENCE(f2);
+  ASSERT_EQUIVALENCE(f3);
+  ASSERT_EQUIVALENCE(lp);
+}
+
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
index 88f25f7..dbdd4bb 100644 (file)
@@ -55,20 +55,20 @@ class GraphReducerTest : public TestWithZone {
 
  protected:
   void ReduceNode(Node* node, Reducer* r) {
-    GraphReducer reducer(graph());
+    GraphReducer reducer(graph(), zone());
     reducer.AddReducer(r);
     reducer.ReduceNode(node);
   }
 
   void ReduceNode(Node* node, Reducer* r1, Reducer* r2) {
-    GraphReducer reducer(graph());
+    GraphReducer reducer(graph(), zone());
     reducer.AddReducer(r1);
     reducer.AddReducer(r2);
     reducer.ReduceNode(node);
   }
 
   void ReduceNode(Node* node, Reducer* r1, Reducer* r2, Reducer* r3) {
-    GraphReducer reducer(graph());
+    GraphReducer reducer(graph(), zone());
     reducer.AddReducer(r1);
     reducer.AddReducer(r2);
     reducer.AddReducer(r3);
@@ -87,6 +87,7 @@ TEST_F(GraphReducerTest, NodeIsDeadAfterReplace) {
   Node* node0 = graph()->NewNode(&OP0);
   Node* node1 = graph()->NewNode(&OP1, node0);
   Node* node2 = graph()->NewNode(&OP1, node0);
+  EXPECT_CALL(r, Reduce(node0)).WillOnce(Return(Reducer::NoChange()));
   EXPECT_CALL(r, Reduce(node1)).WillOnce(Return(Reducer::Replace(node2)));
   ReduceNode(node1, &r);
   EXPECT_FALSE(node0->IsDead());
@@ -114,7 +115,6 @@ TEST_F(GraphReducerTest, ReduceAgainAfterChanged) {
       Return(Reducer::Changed(node0)));
   EXPECT_CALL(r1, Reduce(node0)).InSequence(s1);
   EXPECT_CALL(r2, Reduce(node0)).InSequence(s2);
-  EXPECT_CALL(r3, Reduce(node0)).InSequence(s3);
   ReduceNode(node0, &r1, &r2, &r3);
 }
 
index 2cfd23a..9543258 100644 (file)
@@ -4,8 +4,6 @@
 
 #include "test/unittests/compiler/graph-unittest.h"
 
-#include <ostream>  // NOLINT(readability/streams)
-
 #include "src/compiler/node-properties-inl.h"
 #include "test/unittests/compiler/node-test-utils.h"
 
@@ -93,6 +91,20 @@ Matcher<Node*> GraphTest::IsTrueConstant() {
       Unique<HeapObject>::CreateImmovable(factory()->true_value()));
 }
 
+
+TypedGraphTest::TypedGraphTest(int num_parameters)
+    : GraphTest(num_parameters), typer_(graph(), MaybeHandle<Context>()) {}
+
+
+TypedGraphTest::~TypedGraphTest() {}
+
+
+Node* TypedGraphTest::Parameter(Type* type, int32_t index) {
+  Node* node = GraphTest::Parameter(index);
+  NodeProperties::SetBounds(node, Bounds(type));
+  return node;
+}
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
index e63d948..7c75161 100644 (file)
@@ -7,7 +7,6 @@
 
 #include "src/compiler/common-operator.h"
 #include "src/compiler/graph.h"
-#include "src/compiler/machine-operator.h"
 #include "src/compiler/typer.h"
 #include "test/unittests/test-utils.h"
 #include "testing/gmock/include/gmock/gmock.h"
@@ -29,11 +28,11 @@ using ::testing::Matcher;
 
 class GraphTest : public TestWithContext, public TestWithZone {
  public:
-  explicit GraphTest(int parameters = 1);
-  virtual ~GraphTest();
+  explicit GraphTest(int num_parameters = 1);
+  ~GraphTest() OVERRIDE;
 
  protected:
-  Node* Parameter(int32_t index);
+  Node* Parameter(int32_t index = 0);
   Node* Float32Constant(volatile float value);
   Node* Float64Constant(volatile double value);
   Node* Int32Constant(int32_t value);
@@ -62,10 +61,13 @@ class GraphTest : public TestWithContext, public TestWithZone {
 
 class TypedGraphTest : public GraphTest {
  public:
-  explicit TypedGraphTest(int parameters = 1)
-      : GraphTest(parameters), typer_(graph(), MaybeHandle<Context>()) {}
+  explicit TypedGraphTest(int num_parameters = 1);
+  ~TypedGraphTest() OVERRIDE;
 
  protected:
+  Node* Parameter(int32_t index = 0) { return GraphTest::Parameter(index); }
+  Node* Parameter(Type* type, int32_t index = 0);
+
   Typer* typer() { return &typer_; }
 
  private:
index d1a1726..afa1e94 100644 (file)
@@ -23,7 +23,7 @@ TEST_F(InstructionSelectorTest, Int32AddWithParameter) {
   m.Return(m.Int32Add(m.Parameter(0), m.Parameter(1)));
   Stream s = m.Build();
   ASSERT_EQ(1U, s.size());
-  EXPECT_EQ(kIA32Add, s[0]->arch_opcode());
+  EXPECT_EQ(kIA32Lea, s[0]->arch_opcode());
 }
 
 
@@ -34,18 +34,26 @@ TEST_F(InstructionSelectorTest, Int32AddWithImmediate) {
       m.Return(m.Int32Add(m.Parameter(0), m.Int32Constant(imm)));
       Stream s = m.Build();
       ASSERT_EQ(1U, s.size());
-      EXPECT_EQ(kIA32Add, s[0]->arch_opcode());
-      ASSERT_EQ(2U, s[0]->InputCount());
-      EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+      EXPECT_EQ(kIA32Lea, s[0]->arch_opcode());
+      if (imm == 0) {
+        ASSERT_EQ(1U, s[0]->InputCount());
+      } else {
+        ASSERT_EQ(2U, s[0]->InputCount());
+        EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+      }
     }
     {
       StreamBuilder m(this, kMachInt32, kMachInt32);
       m.Return(m.Int32Add(m.Int32Constant(imm), m.Parameter(0)));
       Stream s = m.Build();
       ASSERT_EQ(1U, s.size());
-      EXPECT_EQ(kIA32Add, s[0]->arch_opcode());
-      ASSERT_EQ(2U, s[0]->InputCount());
-      EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+      EXPECT_EQ(kIA32Lea, s[0]->arch_opcode());
+      if (imm == 0) {
+        ASSERT_EQ(1U, s[0]->InputCount());
+      } else {
+        ASSERT_EQ(2U, s[0]->InputCount());
+        EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+      }
     }
   }
 }
@@ -112,10 +120,13 @@ TEST_F(InstructionSelectorTest, BetterLeftOperandTestAddBinop) {
   m.Return(m.Int32Add(add, param1));
   Stream s = m.Build();
   ASSERT_EQ(2U, s.size());
-  EXPECT_EQ(kIA32Add, s[0]->arch_opcode());
+  EXPECT_EQ(kIA32Lea, s[0]->arch_opcode());
   ASSERT_EQ(2U, s[0]->InputCount());
   ASSERT_TRUE(s[0]->InputAt(0)->IsUnallocated());
-  EXPECT_EQ(s.ToVreg(param2), s.ToVreg(s[0]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(param1), s.ToVreg(s[0]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(param2), s.ToVreg(s[0]->InputAt(1)));
+  ASSERT_EQ(2U, s[1]->InputCount());
+  EXPECT_EQ(s.ToVreg(param1), s.ToVreg(s[0]->InputAt(0)));
 }
 
 
@@ -131,6 +142,7 @@ TEST_F(InstructionSelectorTest, BetterLeftOperandTestMulBinop) {
   ASSERT_EQ(2U, s[0]->InputCount());
   ASSERT_TRUE(s[0]->InputAt(0)->IsUnallocated());
   EXPECT_EQ(s.ToVreg(param2), s.ToVreg(s[0]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(param1), s.ToVreg(s[0]->InputAt(1)));
 }
 
 
@@ -304,9 +316,10 @@ class AddressingModeUnitTest : public InstructionSelectorTest {
   AddressingModeUnitTest() : m(NULL) { Reset(); }
   ~AddressingModeUnitTest() { delete m; }
 
-  void Run(Node* base, Node* index, AddressingMode mode) {
-    Node* load = m->Load(kMachInt32, base, index);
-    m->Store(kMachInt32, base, index, load);
+  void Run(Node* base, Node* load_index, Node* store_index,
+           AddressingMode mode) {
+    Node* load = m->Load(kMachInt32, base, load_index);
+    m->Store(kMachInt32, base, store_index, load);
     m->Return(m->Int32Constant(0));
     Stream s = m->Build();
     ASSERT_EQ(2U, s.size());
@@ -342,21 +355,21 @@ class AddressingModeUnitTest : public InstructionSelectorTest {
 TEST_F(AddressingModeUnitTest, AddressingMode_MR) {
   Node* base = base_reg;
   Node* index = zero;
-  Run(base, index, kMode_MR);
+  Run(base, index, index, kMode_MR);
 }
 
 
 TEST_F(AddressingModeUnitTest, AddressingMode_MRI) {
   Node* base = base_reg;
   Node* index = non_zero;
-  Run(base, index, kMode_MRI);
+  Run(base, index, index, kMode_MRI);
 }
 
 
 TEST_F(AddressingModeUnitTest, AddressingMode_MR1) {
   Node* base = base_reg;
   Node* index = index_reg;
-  Run(base, index, kMode_MR1);
+  Run(base, index, index, kMode_MR1);
 }
 
 
@@ -365,16 +378,18 @@ TEST_F(AddressingModeUnitTest, AddressingMode_MRN) {
   for (size_t i = 0; i < arraysize(scales); ++i) {
     Reset();
     Node* base = base_reg;
-    Node* index = m->Int32Mul(index_reg, scales[i]);
-    Run(base, index, expected[i]);
+    Node* load_index = m->Int32Mul(index_reg, scales[i]);
+    Node* store_index = m->Int32Mul(index_reg, scales[i]);
+    Run(base, load_index, store_index, expected[i]);
   }
 }
 
 
 TEST_F(AddressingModeUnitTest, AddressingMode_MR1I) {
   Node* base = base_reg;
-  Node* index = m->Int32Add(index_reg, non_zero);
-  Run(base, index, kMode_MR1I);
+  Node* load_index = m->Int32Add(index_reg, non_zero);
+  Node* store_index = m->Int32Add(index_reg, non_zero);
+  Run(base, load_index, store_index, kMode_MR1I);
 }
 
 
@@ -383,44 +398,52 @@ TEST_F(AddressingModeUnitTest, AddressingMode_MRNI) {
   for (size_t i = 0; i < arraysize(scales); ++i) {
     Reset();
     Node* base = base_reg;
-    Node* index = m->Int32Add(m->Int32Mul(index_reg, scales[i]), non_zero);
-    Run(base, index, expected[i]);
+    Node* load_index = m->Int32Add(m->Int32Mul(index_reg, scales[i]), non_zero);
+    Node* store_index =
+        m->Int32Add(m->Int32Mul(index_reg, scales[i]), non_zero);
+    Run(base, load_index, store_index, expected[i]);
   }
 }
 
 
-TEST_F(AddressingModeUnitTest, AddressingMode_M1) {
+TEST_F(AddressingModeUnitTest, AddressingMode_M1ToMR) {
   Node* base = null_ptr;
   Node* index = index_reg;
-  Run(base, index, kMode_M1);
+  // M1 maps to MR
+  Run(base, index, index, kMode_MR);
 }
 
 
 TEST_F(AddressingModeUnitTest, AddressingMode_MN) {
-  AddressingMode expected[] = {kMode_M1, kMode_M2, kMode_M4, kMode_M8};
+  AddressingMode expected[] = {kMode_MR, kMode_M2, kMode_M4, kMode_M8};
   for (size_t i = 0; i < arraysize(scales); ++i) {
     Reset();
     Node* base = null_ptr;
-    Node* index = m->Int32Mul(index_reg, scales[i]);
-    Run(base, index, expected[i]);
+    Node* load_index = m->Int32Mul(index_reg, scales[i]);
+    Node* store_index = m->Int32Mul(index_reg, scales[i]);
+    Run(base, load_index, store_index, expected[i]);
   }
 }
 
 
-TEST_F(AddressingModeUnitTest, AddressingMode_M1I) {
+TEST_F(AddressingModeUnitTest, AddressingMode_M1IToMRI) {
   Node* base = null_ptr;
-  Node* index = m->Int32Add(index_reg, non_zero);
-  Run(base, index, kMode_M1I);
+  Node* load_index = m->Int32Add(index_reg, non_zero);
+  Node* store_index = m->Int32Add(index_reg, non_zero);
+  // M1I maps to MRI
+  Run(base, load_index, store_index, kMode_MRI);
 }
 
 
 TEST_F(AddressingModeUnitTest, AddressingMode_MNI) {
-  AddressingMode expected[] = {kMode_M1I, kMode_M2I, kMode_M4I, kMode_M8I};
+  AddressingMode expected[] = {kMode_MRI, kMode_M2I, kMode_M4I, kMode_M8I};
   for (size_t i = 0; i < arraysize(scales); ++i) {
     Reset();
     Node* base = null_ptr;
-    Node* index = m->Int32Add(m->Int32Mul(index_reg, scales[i]), non_zero);
-    Run(base, index, expected[i]);
+    Node* load_index = m->Int32Add(m->Int32Mul(index_reg, scales[i]), non_zero);
+    Node* store_index =
+        m->Int32Add(m->Int32Mul(index_reg, scales[i]), non_zero);
+    Run(base, load_index, store_index, expected[i]);
   }
 }
 
@@ -433,7 +456,7 @@ TEST_F(AddressingModeUnitTest, AddressingMode_MI) {
       Reset();
       Node* base = bases[i];
       Node* index = indices[j];
-      Run(base, index, kMode_MI);
+      Run(base, index, index, kMode_MI);
     }
   }
 }
@@ -459,7 +482,7 @@ std::ostream& operator<<(std::ostream& os, const MultParam& m) {
 
 const MultParam kMultParams[] = {{-1, false, kMode_None},
                                  {0, false, kMode_None},
-                                 {1, true, kMode_M1},
+                                 {1, true, kMode_MR},
                                  {2, true, kMode_M2},
                                  {3, true, kMode_MR2},
                                  {4, true, kMode_M4},
@@ -493,11 +516,14 @@ static unsigned InputCountForLea(AddressingMode mode) {
     case kMode_MR2:
     case kMode_MR4:
     case kMode_MR8:
+    case kMode_MRI:
       return 2U;
     case kMode_M1:
     case kMode_M2:
     case kMode_M4:
     case kMode_M8:
+    case kMode_MI:
+    case kMode_MR:
       return 1U;
     default:
       UNREACHABLE();
@@ -506,7 +532,9 @@ static unsigned InputCountForLea(AddressingMode mode) {
 }
 
 
-static AddressingMode AddressingModeForAddMult(const MultParam& m) {
+static AddressingMode AddressingModeForAddMult(int32_t imm,
+                                               const MultParam& m) {
+  if (imm == 0) return m.addressing_mode;
   switch (m.addressing_mode) {
     case kMode_MR1:
       return kMode_MR1I;
@@ -524,6 +552,8 @@ static AddressingMode AddressingModeForAddMult(const MultParam& m) {
       return kMode_M4I;
     case kMode_M8:
       return kMode_M8I;
+    case kMode_MR:
+      return kMode_MRI;
     default:
       UNREACHABLE();
       return kMode_None;
@@ -563,16 +593,19 @@ TEST_P(InstructionSelectorMultTest, MultAdd32) {
     if (m_param.lea_expected) {
       ASSERT_EQ(1U, s.size());
       EXPECT_EQ(kIA32Lea, s[0]->arch_opcode());
-      EXPECT_EQ(AddressingModeForAddMult(m_param), s[0]->addressing_mode());
+      EXPECT_EQ(AddressingModeForAddMult(imm, m_param),
+                s[0]->addressing_mode());
       unsigned input_count = InputCountForLea(s[0]->addressing_mode());
       ASSERT_EQ(input_count, s[0]->InputCount());
-      ASSERT_EQ(InstructionOperand::IMMEDIATE,
-                s[0]->InputAt(input_count - 1)->kind());
-      EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(input_count - 1)));
+      if (imm != 0) {
+        ASSERT_EQ(InstructionOperand::IMMEDIATE,
+                  s[0]->InputAt(input_count - 1)->kind());
+        EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(input_count - 1)));
+      }
     } else {
       ASSERT_EQ(2U, s.size());
       EXPECT_EQ(kIA32Imul, s[0]->arch_opcode());
-      EXPECT_EQ(kIA32Add, s[1]->arch_opcode());
+      EXPECT_EQ(kIA32Lea, s[1]->arch_opcode());
     }
   }
 }
@@ -601,6 +634,38 @@ TEST_F(InstructionSelectorTest, Int32MulHigh) {
   EXPECT_TRUE(s.IsFixed(s[0]->OutputAt(0), edx));
 }
 
+
+TEST_F(InstructionSelectorTest, Float64BinopArithmetic) {
+  {
+    StreamBuilder m(this, kMachFloat64, kMachFloat64, kMachFloat64);
+    Node* add = m.Float64Add(m.Parameter(0), m.Parameter(1));
+    Node* mul = m.Float64Mul(add, m.Parameter(1));
+    Node* sub = m.Float64Sub(mul, add);
+    Node* ret = m.Float64Div(mul, sub);
+    m.Return(ret);
+    Stream s = m.Build(AVX);
+    ASSERT_EQ(4U, s.size());
+    EXPECT_EQ(kAVXFloat64Add, s[0]->arch_opcode());
+    EXPECT_EQ(kAVXFloat64Mul, s[1]->arch_opcode());
+    EXPECT_EQ(kAVXFloat64Sub, s[2]->arch_opcode());
+    EXPECT_EQ(kAVXFloat64Div, s[3]->arch_opcode());
+  }
+  {
+    StreamBuilder m(this, kMachFloat64, kMachFloat64, kMachFloat64);
+    Node* add = m.Float64Add(m.Parameter(0), m.Parameter(1));
+    Node* mul = m.Float64Mul(add, m.Parameter(1));
+    Node* sub = m.Float64Sub(mul, add);
+    Node* ret = m.Float64Div(mul, sub);
+    m.Return(ret);
+    Stream s = m.Build();
+    ASSERT_EQ(4U, s.size());
+    EXPECT_EQ(kSSEFloat64Add, s[0]->arch_opcode());
+    EXPECT_EQ(kSSEFloat64Mul, s[1]->arch_opcode());
+    EXPECT_EQ(kSSEFloat64Sub, s[2]->arch_opcode());
+    EXPECT_EQ(kSSEFloat64Div, s[3]->arch_opcode());
+  }
+}
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
index 073af15..e65d68b 100644 (file)
@@ -21,7 +21,7 @@ namespace compiler {
 class InstructionSelectorTest : public TestWithContext, public TestWithZone {
  public:
   InstructionSelectorTest();
-  virtual ~InstructionSelectorTest();
+  ~InstructionSelectorTest() OVERRIDE;
 
   base::RandomNumberGenerator* rng() { return &rng_; }
 
diff --git a/deps/v8/test/unittests/compiler/instruction-sequence-unittest.cc b/deps/v8/test/unittests/compiler/instruction-sequence-unittest.cc
new file mode 100644 (file)
index 0000000..9546376
--- /dev/null
@@ -0,0 +1,475 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/base/utils/random-number-generator.h"
+#include "src/compiler/pipeline.h"
+#include "test/unittests/compiler/instruction-sequence-unittest.h"
+#include "test/unittests/test-utils.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+static const char*
+    general_register_names_[RegisterConfiguration::kMaxGeneralRegisters];
+static const char*
+    double_register_names_[RegisterConfiguration::kMaxDoubleRegisters];
+static char register_names_[10 * (RegisterConfiguration::kMaxGeneralRegisters +
+                                  RegisterConfiguration::kMaxDoubleRegisters)];
+
+
+static void InitializeRegisterNames() {
+  char* loc = register_names_;
+  for (int i = 0; i < RegisterConfiguration::kMaxGeneralRegisters; ++i) {
+    general_register_names_[i] = loc;
+    loc += base::OS::SNPrintF(loc, 100, "gp_%d", i);
+    *loc++ = 0;
+  }
+  for (int i = 0; i < RegisterConfiguration::kMaxDoubleRegisters; ++i) {
+    double_register_names_[i] = loc;
+    loc += base::OS::SNPrintF(loc, 100, "fp_%d", i) + 1;
+    *loc++ = 0;
+  }
+}
+
+
+InstructionSequenceTest::InstructionSequenceTest()
+    : sequence_(nullptr),
+      num_general_registers_(kDefaultNRegs),
+      num_double_registers_(kDefaultNRegs),
+      instruction_blocks_(zone()),
+      current_instruction_index_(-1),
+      current_block_(nullptr),
+      block_returns_(false) {
+  InitializeRegisterNames();
+}
+
+
+void InstructionSequenceTest::SetNumRegs(int num_general_registers,
+                                         int num_double_registers) {
+  CHECK(config_.is_empty());
+  CHECK(instructions_.empty());
+  CHECK(instruction_blocks_.empty());
+  num_general_registers_ = num_general_registers;
+  num_double_registers_ = num_double_registers;
+}
+
+
+RegisterConfiguration* InstructionSequenceTest::config() {
+  if (config_.is_empty()) {
+    config_.Reset(new RegisterConfiguration(
+        num_general_registers_, num_double_registers_, num_double_registers_,
+        general_register_names_, double_register_names_));
+  }
+  return config_.get();
+}
+
+
+InstructionSequence* InstructionSequenceTest::sequence() {
+  if (sequence_ == nullptr) {
+    sequence_ = new (zone()) InstructionSequence(zone(), &instruction_blocks_);
+  }
+  return sequence_;
+}
+
+
+void InstructionSequenceTest::StartLoop(int loop_blocks) {
+  CHECK(current_block_ == nullptr);
+  if (!loop_blocks_.empty()) {
+    CHECK(!loop_blocks_.back().loop_header_.IsValid());
+  }
+  LoopData loop_data = {Rpo::Invalid(), loop_blocks};
+  loop_blocks_.push_back(loop_data);
+}
+
+
+void InstructionSequenceTest::EndLoop() {
+  CHECK(current_block_ == nullptr);
+  CHECK(!loop_blocks_.empty());
+  CHECK_EQ(0, loop_blocks_.back().expected_blocks_);
+  loop_blocks_.pop_back();
+}
+
+
+void InstructionSequenceTest::StartBlock() {
+  block_returns_ = false;
+  NewBlock();
+}
+
+
+int InstructionSequenceTest::EndBlock(BlockCompletion completion) {
+  int instruction_index = kMinInt;
+  if (block_returns_) {
+    CHECK(completion.type_ == kBlockEnd || completion.type_ == kFallThrough);
+    completion.type_ = kBlockEnd;
+  }
+  switch (completion.type_) {
+    case kBlockEnd:
+      break;
+    case kFallThrough:
+      instruction_index = EmitFallThrough();
+      break;
+    case kJump:
+      CHECK(!block_returns_);
+      instruction_index = EmitJump();
+      break;
+    case kBranch:
+      CHECK(!block_returns_);
+      instruction_index = EmitBranch(completion.op_);
+      break;
+  }
+  completions_.push_back(completion);
+  CHECK(current_block_ != nullptr);
+  sequence()->EndBlock(current_block_->rpo_number());
+  current_block_ = nullptr;
+  return instruction_index;
+}
+
+
+InstructionSequenceTest::TestOperand InstructionSequenceTest::Imm(int32_t imm) {
+  int index = sequence()->AddImmediate(Constant(imm));
+  return TestOperand(kImmediate, index);
+}
+
+
+InstructionSequenceTest::VReg InstructionSequenceTest::Define(
+    TestOperand output_op) {
+  VReg vreg = NewReg();
+  InstructionOperand* outputs[1]{ConvertOutputOp(vreg, output_op)};
+  Emit(vreg.value_, kArchNop, 1, outputs);
+  return vreg;
+}
+
+
+int InstructionSequenceTest::Return(TestOperand input_op_0) {
+  block_returns_ = true;
+  InstructionOperand* inputs[1]{ConvertInputOp(input_op_0)};
+  return Emit(NewIndex(), kArchRet, 0, nullptr, 1, inputs);
+}
+
+
+PhiInstruction* InstructionSequenceTest::Phi(VReg incoming_vreg_0,
+                                             VReg incoming_vreg_1,
+                                             VReg incoming_vreg_2,
+                                             VReg incoming_vreg_3) {
+  auto phi = new (zone()) PhiInstruction(zone(), NewReg().value_, 10);
+  VReg inputs[] = {incoming_vreg_0, incoming_vreg_1, incoming_vreg_2,
+                   incoming_vreg_3};
+  for (size_t i = 0; i < arraysize(inputs); ++i) {
+    if (inputs[i].value_ == kNoValue) break;
+    Extend(phi, inputs[i]);
+  }
+  current_block_->AddPhi(phi);
+  return phi;
+}
+
+
+void InstructionSequenceTest::Extend(PhiInstruction* phi, VReg vreg) {
+  phi->Extend(zone(), vreg.value_);
+}
+
+
+InstructionSequenceTest::VReg InstructionSequenceTest::DefineConstant(
+    int32_t imm) {
+  VReg vreg = NewReg();
+  sequence()->AddConstant(vreg.value_, Constant(imm));
+  InstructionOperand* outputs[1]{ConstantOperand::Create(vreg.value_, zone())};
+  Emit(vreg.value_, kArchNop, 1, outputs);
+  return vreg;
+}
+
+
+int InstructionSequenceTest::EmitNop() { return Emit(NewIndex(), kArchNop); }
+
+
+static size_t CountInputs(size_t size,
+                          InstructionSequenceTest::TestOperand* inputs) {
+  size_t i = 0;
+  for (; i < size; ++i) {
+    if (inputs[i].type_ == InstructionSequenceTest::kInvalid) break;
+  }
+  return i;
+}
+
+
+int InstructionSequenceTest::EmitI(size_t input_size, TestOperand* inputs) {
+  InstructionOperand** mapped_inputs = ConvertInputs(input_size, inputs);
+  return Emit(NewIndex(), kArchNop, 0, nullptr, input_size, mapped_inputs);
+}
+
+
+int InstructionSequenceTest::EmitI(TestOperand input_op_0,
+                                   TestOperand input_op_1,
+                                   TestOperand input_op_2,
+                                   TestOperand input_op_3) {
+  TestOperand inputs[] = {input_op_0, input_op_1, input_op_2, input_op_3};
+  return EmitI(CountInputs(arraysize(inputs), inputs), inputs);
+}
+
+
+InstructionSequenceTest::VReg InstructionSequenceTest::EmitOI(
+    TestOperand output_op, size_t input_size, TestOperand* inputs) {
+  VReg output_vreg = NewReg();
+  InstructionOperand* outputs[1]{ConvertOutputOp(output_vreg, output_op)};
+  InstructionOperand** mapped_inputs = ConvertInputs(input_size, inputs);
+  Emit(output_vreg.value_, kArchNop, 1, outputs, input_size, mapped_inputs);
+  return output_vreg;
+}
+
+
+InstructionSequenceTest::VReg InstructionSequenceTest::EmitOI(
+    TestOperand output_op, TestOperand input_op_0, TestOperand input_op_1,
+    TestOperand input_op_2, TestOperand input_op_3) {
+  TestOperand inputs[] = {input_op_0, input_op_1, input_op_2, input_op_3};
+  return EmitOI(output_op, CountInputs(arraysize(inputs), inputs), inputs);
+}
+
+
+InstructionSequenceTest::VReg InstructionSequenceTest::EmitCall(
+    TestOperand output_op, size_t input_size, TestOperand* inputs) {
+  VReg output_vreg = NewReg();
+  InstructionOperand* outputs[1]{ConvertOutputOp(output_vreg, output_op)};
+  CHECK(UnallocatedOperand::cast(outputs[0])->HasFixedPolicy());
+  InstructionOperand** mapped_inputs = ConvertInputs(input_size, inputs);
+  Emit(output_vreg.value_, kArchCallCodeObject, 1, outputs, input_size,
+       mapped_inputs, 0, nullptr, true);
+  return output_vreg;
+}
+
+
+InstructionSequenceTest::VReg InstructionSequenceTest::EmitCall(
+    TestOperand output_op, TestOperand input_op_0, TestOperand input_op_1,
+    TestOperand input_op_2, TestOperand input_op_3) {
+  TestOperand inputs[] = {input_op_0, input_op_1, input_op_2, input_op_3};
+  return EmitCall(output_op, CountInputs(arraysize(inputs), inputs), inputs);
+}
+
+
+const Instruction* InstructionSequenceTest::GetInstruction(
+    int instruction_index) {
+  auto it = instructions_.find(instruction_index);
+  CHECK(it != instructions_.end());
+  return it->second;
+}
+
+
+int InstructionSequenceTest::EmitBranch(TestOperand input_op) {
+  InstructionOperand* inputs[4]{ConvertInputOp(input_op), ConvertInputOp(Imm()),
+                                ConvertInputOp(Imm()), ConvertInputOp(Imm())};
+  InstructionCode opcode = kArchJmp | FlagsModeField::encode(kFlags_branch) |
+                           FlagsConditionField::encode(kEqual);
+  auto instruction =
+      NewInstruction(opcode, 0, nullptr, 4, inputs)->MarkAsControl();
+  return AddInstruction(NewIndex(), instruction);
+}
+
+
+int InstructionSequenceTest::EmitFallThrough() {
+  auto instruction = NewInstruction(kArchNop, 0, nullptr)->MarkAsControl();
+  return AddInstruction(NewIndex(), instruction);
+}
+
+
+int InstructionSequenceTest::EmitJump() {
+  InstructionOperand* inputs[1]{ConvertInputOp(Imm())};
+  auto instruction =
+      NewInstruction(kArchJmp, 0, nullptr, 1, inputs)->MarkAsControl();
+  return AddInstruction(NewIndex(), instruction);
+}
+
+
+Instruction* InstructionSequenceTest::NewInstruction(
+    InstructionCode code, size_t outputs_size, InstructionOperand** outputs,
+    size_t inputs_size, InstructionOperand** inputs, size_t temps_size,
+    InstructionOperand** temps) {
+  CHECK_NE(nullptr, current_block_);
+  return Instruction::New(zone(), code, outputs_size, outputs, inputs_size,
+                          inputs, temps_size, temps);
+}
+
+
+InstructionOperand* InstructionSequenceTest::Unallocated(
+    TestOperand op, UnallocatedOperand::ExtendedPolicy policy) {
+  auto unallocated = new (zone()) UnallocatedOperand(policy);
+  unallocated->set_virtual_register(op.vreg_.value_);
+  return unallocated;
+}
+
+
+InstructionOperand* InstructionSequenceTest::Unallocated(
+    TestOperand op, UnallocatedOperand::ExtendedPolicy policy,
+    UnallocatedOperand::Lifetime lifetime) {
+  auto unallocated = new (zone()) UnallocatedOperand(policy, lifetime);
+  unallocated->set_virtual_register(op.vreg_.value_);
+  return unallocated;
+}
+
+
+InstructionOperand* InstructionSequenceTest::Unallocated(
+    TestOperand op, UnallocatedOperand::ExtendedPolicy policy, int index) {
+  auto unallocated = new (zone()) UnallocatedOperand(policy, index);
+  unallocated->set_virtual_register(op.vreg_.value_);
+  return unallocated;
+}
+
+
+InstructionOperand* InstructionSequenceTest::Unallocated(
+    TestOperand op, UnallocatedOperand::BasicPolicy policy, int index) {
+  auto unallocated = new (zone()) UnallocatedOperand(policy, index);
+  unallocated->set_virtual_register(op.vreg_.value_);
+  return unallocated;
+}
+
+
+InstructionOperand** InstructionSequenceTest::ConvertInputs(
+    size_t input_size, TestOperand* inputs) {
+  InstructionOperand** mapped_inputs =
+      zone()->NewArray<InstructionOperand*>(static_cast<int>(input_size));
+  for (size_t i = 0; i < input_size; ++i) {
+    mapped_inputs[i] = ConvertInputOp(inputs[i]);
+  }
+  return mapped_inputs;
+}
+
+
+InstructionOperand* InstructionSequenceTest::ConvertInputOp(TestOperand op) {
+  if (op.type_ == kImmediate) {
+    CHECK_EQ(op.vreg_.value_, kNoValue);
+    return ImmediateOperand::Create(op.value_, zone());
+  }
+  CHECK_NE(op.vreg_.value_, kNoValue);
+  switch (op.type_) {
+    case kNone:
+      return Unallocated(op, UnallocatedOperand::NONE,
+                         UnallocatedOperand::USED_AT_START);
+    case kUnique:
+      return Unallocated(op, UnallocatedOperand::NONE);
+    case kUniqueRegister:
+      return Unallocated(op, UnallocatedOperand::MUST_HAVE_REGISTER);
+    case kRegister:
+      return Unallocated(op, UnallocatedOperand::MUST_HAVE_REGISTER,
+                         UnallocatedOperand::USED_AT_START);
+    case kFixedRegister:
+      CHECK(0 <= op.value_ && op.value_ < num_general_registers_);
+      return Unallocated(op, UnallocatedOperand::FIXED_REGISTER, op.value_);
+    case kFixedSlot:
+      return Unallocated(op, UnallocatedOperand::FIXED_SLOT, op.value_);
+    default:
+      break;
+  }
+  CHECK(false);
+  return NULL;
+}
+
+
+InstructionOperand* InstructionSequenceTest::ConvertOutputOp(VReg vreg,
+                                                             TestOperand op) {
+  CHECK_EQ(op.vreg_.value_, kNoValue);
+  op.vreg_ = vreg;
+  switch (op.type_) {
+    case kSameAsFirst:
+      return Unallocated(op, UnallocatedOperand::SAME_AS_FIRST_INPUT);
+    case kRegister:
+      return Unallocated(op, UnallocatedOperand::MUST_HAVE_REGISTER);
+    case kFixedSlot:
+      return Unallocated(op, UnallocatedOperand::FIXED_SLOT, op.value_);
+    case kFixedRegister:
+      CHECK(0 <= op.value_ && op.value_ < num_general_registers_);
+      return Unallocated(op, UnallocatedOperand::FIXED_REGISTER, op.value_);
+    default:
+      break;
+  }
+  CHECK(false);
+  return NULL;
+}
+
+
+InstructionBlock* InstructionSequenceTest::NewBlock() {
+  CHECK(current_block_ == nullptr);
+  auto block_id = BasicBlock::Id::FromSize(instruction_blocks_.size());
+  Rpo rpo = Rpo::FromInt(block_id.ToInt());
+  Rpo loop_header = Rpo::Invalid();
+  Rpo loop_end = Rpo::Invalid();
+  if (!loop_blocks_.empty()) {
+    auto& loop_data = loop_blocks_.back();
+    // This is a loop header.
+    if (!loop_data.loop_header_.IsValid()) {
+      loop_end = Rpo::FromInt(block_id.ToInt() + loop_data.expected_blocks_);
+      loop_data.expected_blocks_--;
+      loop_data.loop_header_ = rpo;
+    } else {
+      // This is a loop body.
+      CHECK_NE(0, loop_data.expected_blocks_);
+      // TODO(dcarney): handle nested loops.
+      loop_data.expected_blocks_--;
+      loop_header = loop_data.loop_header_;
+    }
+  }
+  // Construct instruction block.
+  auto instruction_block = new (zone())
+      InstructionBlock(zone(), block_id, rpo, loop_header, loop_end, false);
+  instruction_blocks_.push_back(instruction_block);
+  current_block_ = instruction_block;
+  sequence()->StartBlock(rpo);
+  return instruction_block;
+}
+
+
+void InstructionSequenceTest::WireBlocks() {
+  CHECK_EQ(nullptr, current_block());
+  CHECK(instruction_blocks_.size() == completions_.size());
+  size_t offset = 0;
+  for (const auto& completion : completions_) {
+    switch (completion.type_) {
+      case kBlockEnd:
+        break;
+      case kFallThrough:  // Fallthrough.
+      case kJump:
+        WireBlock(offset, completion.offset_0_);
+        break;
+      case kBranch:
+        WireBlock(offset, completion.offset_0_);
+        WireBlock(offset, completion.offset_1_);
+        break;
+    }
+    ++offset;
+  }
+}
+
+
+void InstructionSequenceTest::WireBlock(size_t block_offset, int jump_offset) {
+  size_t target_block_offset = block_offset + static_cast<size_t>(jump_offset);
+  CHECK(block_offset < instruction_blocks_.size());
+  CHECK(target_block_offset < instruction_blocks_.size());
+  auto block = instruction_blocks_[block_offset];
+  auto target = instruction_blocks_[target_block_offset];
+  block->successors().push_back(target->rpo_number());
+  target->predecessors().push_back(block->rpo_number());
+}
+
+
+int InstructionSequenceTest::Emit(int instruction_index, InstructionCode code,
+                                  size_t outputs_size,
+                                  InstructionOperand** outputs,
+                                  size_t inputs_size,
+                                  InstructionOperand** inputs,
+                                  size_t temps_size, InstructionOperand** temps,
+                                  bool is_call) {
+  auto instruction = NewInstruction(code, outputs_size, outputs, inputs_size,
+                                    inputs, temps_size, temps);
+  if (is_call) instruction->MarkAsCall();
+  return AddInstruction(instruction_index, instruction);
+}
+
+
+int InstructionSequenceTest::AddInstruction(int instruction_index,
+                                            Instruction* instruction) {
+  sequence()->AddInstruction(instruction);
+  return instruction_index;
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/deps/v8/test/unittests/compiler/instruction-sequence-unittest.h b/deps/v8/test/unittests/compiler/instruction-sequence-unittest.h
new file mode 100644 (file)
index 0000000..ce0a5b4
--- /dev/null
@@ -0,0 +1,239 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef V8_UNITTESTS_COMPILER_INSTRUCTION_SEQUENCE_UNITTEST_H_
+#define V8_UNITTESTS_COMPILER_INSTRUCTION_SEQUENCE_UNITTEST_H_
+
+#include "src/compiler/instruction.h"
+#include "test/unittests/test-utils.h"
+#include "testing/gmock/include/gmock/gmock.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class InstructionSequenceTest : public TestWithZone {
+ public:
+  static const int kDefaultNRegs = 4;
+  static const int kNoValue = kMinInt;
+
+  typedef BasicBlock::RpoNumber Rpo;
+
+  struct VReg {
+    VReg() : value_(kNoValue) {}
+    VReg(PhiInstruction* phi) : value_(phi->virtual_register()) {}  // NOLINT
+    explicit VReg(int value) : value_(value) {}
+    int value_;
+  };
+
+  enum TestOperandType {
+    kInvalid,
+    kSameAsFirst,
+    kRegister,
+    kFixedRegister,
+    kSlot,
+    kFixedSlot,
+    kImmediate,
+    kNone,
+    kConstant,
+    kUnique,
+    kUniqueRegister
+  };
+
+  struct TestOperand {
+    TestOperand() : type_(kInvalid), vreg_(), value_(kNoValue) {}
+    TestOperand(TestOperandType type, int imm)
+        : type_(type), vreg_(), value_(imm) {}
+    TestOperand(TestOperandType type, VReg vreg, int value = kNoValue)
+        : type_(type), vreg_(vreg), value_(value) {}
+
+    TestOperandType type_;
+    VReg vreg_;
+    int value_;
+  };
+
+  static TestOperand Same() { return TestOperand(kSameAsFirst, VReg()); }
+
+  static TestOperand Reg(VReg vreg, int index = kNoValue) {
+    TestOperandType type = kRegister;
+    if (index != kNoValue) type = kFixedRegister;
+    return TestOperand(type, vreg, index);
+  }
+
+  static TestOperand Reg(int index = kNoValue) { return Reg(VReg(), index); }
+
+  static TestOperand Slot(VReg vreg, int index = kNoValue) {
+    TestOperandType type = kSlot;
+    if (index != kNoValue) type = kFixedSlot;
+    return TestOperand(type, vreg, index);
+  }
+
+  static TestOperand Slot(int index = kNoValue) { return Slot(VReg(), index); }
+
+  static TestOperand Const(int index) {
+    CHECK_NE(kNoValue, index);
+    return TestOperand(kConstant, VReg(), index);
+  }
+
+  static TestOperand Use(VReg vreg) { return TestOperand(kNone, vreg); }
+
+  static TestOperand Use() { return Use(VReg()); }
+
+  static TestOperand Unique(VReg vreg) { return TestOperand(kUnique, vreg); }
+
+  static TestOperand UniqueReg(VReg vreg) {
+    return TestOperand(kUniqueRegister, vreg);
+  }
+
+  enum BlockCompletionType { kBlockEnd, kFallThrough, kBranch, kJump };
+
+  struct BlockCompletion {
+    BlockCompletionType type_;
+    TestOperand op_;
+    int offset_0_;
+    int offset_1_;
+  };
+
+  static BlockCompletion FallThrough() {
+    BlockCompletion completion = {kFallThrough, TestOperand(), 1, kNoValue};
+    return completion;
+  }
+
+  static BlockCompletion Jump(int offset) {
+    BlockCompletion completion = {kJump, TestOperand(), offset, kNoValue};
+    return completion;
+  }
+
+  static BlockCompletion Branch(TestOperand op, int left_offset,
+                                int right_offset) {
+    BlockCompletion completion = {kBranch, op, left_offset, right_offset};
+    return completion;
+  }
+
+  static BlockCompletion Last() {
+    BlockCompletion completion = {kBlockEnd, TestOperand(), kNoValue, kNoValue};
+    return completion;
+  }
+
+  InstructionSequenceTest();
+
+  void SetNumRegs(int num_general_registers, int num_double_registers);
+  RegisterConfiguration* config();
+  InstructionSequence* sequence();
+
+  void StartLoop(int loop_blocks);
+  void EndLoop();
+  void StartBlock();
+  int EndBlock(BlockCompletion completion = FallThrough());
+
+  TestOperand Imm(int32_t imm = 0);
+  VReg Define(TestOperand output_op);
+  VReg Parameter(TestOperand output_op = Reg()) { return Define(output_op); }
+
+  int Return(TestOperand input_op_0);
+  int Return(VReg vreg) { return Return(Reg(vreg, 0)); }
+
+  PhiInstruction* Phi(VReg incoming_vreg_0 = VReg(),
+                      VReg incoming_vreg_1 = VReg(),
+                      VReg incoming_vreg_2 = VReg(),
+                      VReg incoming_vreg_3 = VReg());
+  void Extend(PhiInstruction* phi, VReg vreg);
+
+  VReg DefineConstant(int32_t imm = 0);
+  int EmitNop();
+  int EmitI(size_t input_size, TestOperand* inputs);
+  int EmitI(TestOperand input_op_0 = TestOperand(),
+            TestOperand input_op_1 = TestOperand(),
+            TestOperand input_op_2 = TestOperand(),
+            TestOperand input_op_3 = TestOperand());
+  VReg EmitOI(TestOperand output_op, size_t input_size, TestOperand* inputs);
+  VReg EmitOI(TestOperand output_op, TestOperand input_op_0 = TestOperand(),
+              TestOperand input_op_1 = TestOperand(),
+              TestOperand input_op_2 = TestOperand(),
+              TestOperand input_op_3 = TestOperand());
+  VReg EmitCall(TestOperand output_op, size_t input_size, TestOperand* inputs);
+  VReg EmitCall(TestOperand output_op, TestOperand input_op_0 = TestOperand(),
+                TestOperand input_op_1 = TestOperand(),
+                TestOperand input_op_2 = TestOperand(),
+                TestOperand input_op_3 = TestOperand());
+
+  // Get defining instruction vreg or value returned at instruction creation
+  // time when there is no return value.
+  const Instruction* GetInstruction(int instruction_index);
+
+  InstructionBlock* current_block() const { return current_block_; }
+  int num_general_registers() const { return num_general_registers_; }
+  int num_double_registers() const { return num_double_registers_; }
+
+  // Called after all instructions have been inserted.
+  void WireBlocks();
+
+ private:
+  VReg NewReg() { return VReg(sequence()->NextVirtualRegister()); }
+  int NewIndex() { return current_instruction_index_--; }
+
+  static TestOperand Invalid() { return TestOperand(kInvalid, VReg()); }
+
+  int EmitBranch(TestOperand input_op);
+  int EmitFallThrough();
+  int EmitJump();
+  Instruction* NewInstruction(InstructionCode code, size_t outputs_size,
+                              InstructionOperand** outputs,
+                              size_t inputs_size = 0,
+                              InstructionOperand* *inputs = nullptr,
+                              size_t temps_size = 0,
+                              InstructionOperand* *temps = nullptr);
+  InstructionOperand* Unallocated(TestOperand op,
+                                  UnallocatedOperand::ExtendedPolicy policy);
+  InstructionOperand* Unallocated(TestOperand op,
+                                  UnallocatedOperand::ExtendedPolicy policy,
+                                  UnallocatedOperand::Lifetime lifetime);
+  InstructionOperand* Unallocated(TestOperand op,
+                                  UnallocatedOperand::ExtendedPolicy policy,
+                                  int index);
+  InstructionOperand* Unallocated(TestOperand op,
+                                  UnallocatedOperand::BasicPolicy policy,
+                                  int index);
+  InstructionOperand** ConvertInputs(size_t input_size, TestOperand* inputs);
+  InstructionOperand* ConvertInputOp(TestOperand op);
+  InstructionOperand* ConvertOutputOp(VReg vreg, TestOperand op);
+  InstructionBlock* NewBlock();
+  void WireBlock(size_t block_offset, int jump_offset);
+
+  int Emit(int instruction_index, InstructionCode code, size_t outputs_size = 0,
+           InstructionOperand* *outputs = nullptr, size_t inputs_size = 0,
+           InstructionOperand* *inputs = nullptr, size_t temps_size = 0,
+           InstructionOperand* *temps = nullptr, bool is_call = false);
+
+  int AddInstruction(int instruction_index, Instruction* instruction);
+
+  struct LoopData {
+    Rpo loop_header_;
+    int expected_blocks_;
+  };
+
+  typedef std::vector<LoopData> LoopBlocks;
+  typedef std::map<int, const Instruction*> Instructions;
+  typedef std::vector<BlockCompletion> Completions;
+
+  SmartPointer<RegisterConfiguration> config_;
+  InstructionSequence* sequence_;
+  int num_general_registers_;
+  int num_double_registers_;
+
+  // Block building state.
+  InstructionBlocks instruction_blocks_;
+  Instructions instructions_;
+  int current_instruction_index_;
+  Completions completions_;
+  LoopBlocks loop_blocks_;
+  InstructionBlock* current_block_;
+  bool block_returns_;
+};
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
+
+#endif  // V8_UNITTESTS_COMPILER_INSTRUCTION_SEQUENCE_UNITTEST_H_
index 0a0d8d6..87d1ad5 100644 (file)
@@ -23,18 +23,12 @@ class JSBuiltinReducerTest : public TypedGraphTest {
  protected:
   Reduction Reduce(Node* node, MachineOperatorBuilder::Flags flags =
                                    MachineOperatorBuilder::Flag::kNoFlags) {
-    MachineOperatorBuilder machine(kMachPtr, flags);
+    MachineOperatorBuilder machine(zone(), kMachPtr, flags);
     JSGraph jsgraph(graph(), common(), javascript(), &machine);
     JSBuiltinReducer reducer(&jsgraph);
     return reducer.Reduce(node);
   }
 
-  Node* Parameter(Type* t, int32_t index = 0) {
-    Node* n = graph()->NewNode(common()->Parameter(index), graph()->start());
-    NodeProperties::SetBounds(n, Bounds(Type::None(), t));
-    return n;
-  }
-
   Handle<JSFunction> MathFunction(const char* name) {
     Handle<Object> m =
         JSObject::GetProperty(isolate()->global_object(),
@@ -58,11 +52,12 @@ namespace {
 
 // TODO(mstarzinger): Find a common place and unify with test-js-typed-lowering.
 Type* const kNumberTypes[] = {
-    Type::UnsignedSmall(),   Type::OtherSignedSmall(), Type::OtherUnsigned31(),
-    Type::OtherUnsigned32(), Type::OtherSigned32(),    Type::SignedSmall(),
-    Type::Signed32(),        Type::Unsigned32(),       Type::Integral32(),
-    Type::MinusZero(),       Type::NaN(),              Type::OtherNumber(),
-    Type::OrderedNumber(),   Type::Number()};
+    Type::UnsignedSmall(),       Type::NegativeSigned32(),
+    Type::NonNegativeSigned32(), Type::SignedSmall(),
+    Type::Signed32(),            Type::Unsigned32(),
+    Type::Integral32(),          Type::MinusZero(),
+    Type::NaN(),                 Type::OrderedNumber(),
+    Type::PlainNumber(),         Type::Number()};
 
 }  // namespace
 
index 31d5a03..7aa0c64 100644 (file)
@@ -3,7 +3,9 @@
 // found in the LICENSE file.
 
 #include "src/compiler/js-operator.h"
-#include "src/compiler/operator-properties-inl.h"
+#include "src/compiler/opcodes.h"
+#include "src/compiler/operator.h"
+#include "src/compiler/operator-properties.h"
 #include "test/unittests/test-utils.h"
 
 namespace v8 {
@@ -13,6 +15,7 @@ namespace compiler {
 // -----------------------------------------------------------------------------
 // Shared operators.
 
+
 namespace {
 
 struct SharedOperator {
@@ -61,8 +64,8 @@ const SharedOperator kSharedOperators[] = {
     SHARED(Multiply, Operator::kNoProperties, 2, 1, 1, 1, 1, 1),
     SHARED(Divide, Operator::kNoProperties, 2, 1, 1, 1, 1, 1),
     SHARED(Modulus, Operator::kNoProperties, 2, 1, 1, 1, 1, 1),
-    SHARED(UnaryNot, Operator::kNoProperties, 1, 0, 1, 1, 1, 1),
-    SHARED(ToBoolean, Operator::kNoProperties, 1, 0, 1, 1, 1, 1),
+    SHARED(UnaryNot, Operator::kPure, 1, 0, 0, 0, 1, 0),
+    SHARED(ToBoolean, Operator::kPure, 1, 0, 0, 0, 1, 0),
     SHARED(ToNumber, Operator::kNoProperties, 1, 0, 1, 1, 1, 1),
     SHARED(ToString, Operator::kNoProperties, 1, 0, 1, 1, 1, 1),
     SHARED(ToName, Operator::kNoProperties, 1, 0, 1, 1, 1, 1),
@@ -77,7 +80,7 @@ const SharedOperator kSharedOperators[] = {
     SHARED(CreateWithContext, Operator::kNoProperties, 2, 0, 1, 1, 1, 1),
     SHARED(CreateBlockContext, Operator::kNoProperties, 2, 0, 1, 1, 1, 1),
     SHARED(CreateModuleContext, Operator::kNoProperties, 2, 0, 1, 1, 1, 1),
-    SHARED(CreateGlobalContext, Operator::kNoProperties, 2, 0, 1, 1, 1, 1)
+    SHARED(CreateScriptContext, Operator::kNoProperties, 2, 0, 1, 1, 1, 1)
 #undef SHARED
 };
 
@@ -142,6 +145,73 @@ TEST_P(JSSharedOperatorTest, Properties) {
 INSTANTIATE_TEST_CASE_P(JSOperatorTest, JSSharedOperatorTest,
                         ::testing::ValuesIn(kSharedOperators));
 
+
+// -----------------------------------------------------------------------------
+// JSStoreProperty.
+
+
+class JSStorePropertyOperatorTest
+    : public TestWithZone,
+      public ::testing::WithParamInterface<StrictMode> {};
+
+
+TEST_P(JSStorePropertyOperatorTest, InstancesAreGloballyShared) {
+  const StrictMode mode = GetParam();
+  JSOperatorBuilder javascript1(zone());
+  JSOperatorBuilder javascript2(zone());
+  EXPECT_EQ(javascript1.StoreProperty(mode), javascript2.StoreProperty(mode));
+}
+
+
+TEST_P(JSStorePropertyOperatorTest, NumberOfInputsAndOutputs) {
+  JSOperatorBuilder javascript(zone());
+  const StrictMode mode = GetParam();
+  const Operator* op = javascript.StoreProperty(mode);
+
+  // TODO(jarin): Get rid of this hack.
+  const int frame_state_input_count = FLAG_turbo_deoptimization ? 1 : 0;
+  EXPECT_EQ(3, op->ValueInputCount());
+  EXPECT_EQ(1, OperatorProperties::GetContextInputCount(op));
+  EXPECT_EQ(frame_state_input_count,
+            OperatorProperties::GetFrameStateInputCount(op));
+  EXPECT_EQ(1, op->EffectInputCount());
+  EXPECT_EQ(1, op->ControlInputCount());
+  EXPECT_EQ(6 + frame_state_input_count,
+            OperatorProperties::GetTotalInputCount(op));
+
+  EXPECT_EQ(0, op->ValueOutputCount());
+  EXPECT_EQ(1, op->EffectOutputCount());
+  EXPECT_EQ(0, op->ControlOutputCount());
+}
+
+
+TEST_P(JSStorePropertyOperatorTest, OpcodeIsCorrect) {
+  JSOperatorBuilder javascript(zone());
+  const StrictMode mode = GetParam();
+  const Operator* op = javascript.StoreProperty(mode);
+  EXPECT_EQ(IrOpcode::kJSStoreProperty, op->opcode());
+}
+
+
+TEST_P(JSStorePropertyOperatorTest, OpParameter) {
+  JSOperatorBuilder javascript(zone());
+  const StrictMode mode = GetParam();
+  const Operator* op = javascript.StoreProperty(mode);
+  EXPECT_EQ(mode, OpParameter<StrictMode>(op));
+}
+
+
+TEST_P(JSStorePropertyOperatorTest, Properties) {
+  JSOperatorBuilder javascript(zone());
+  const StrictMode mode = GetParam();
+  const Operator* op = javascript.StoreProperty(mode);
+  EXPECT_EQ(Operator::kNoProperties, op->properties());
+}
+
+
+INSTANTIATE_TEST_CASE_P(JSOperatorTest, JSStorePropertyOperatorTest,
+                        ::testing::Values(SLOPPY, STRICT));
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
index 539785d..e4ea4a5 100644 (file)
@@ -20,10 +20,16 @@ namespace compiler {
 namespace {
 
 const ExternalArrayType kExternalArrayTypes[] = {
-#define TYPED_ARRAY_CASE(Type, type, TYPE, ctype, size) kExternal##Type##Array,
-    TYPED_ARRAYS(TYPED_ARRAY_CASE)
-#undef TYPED_ARRAY_CASE
-};
+    kExternalUint8Array,   kExternalInt8Array,   kExternalUint16Array,
+    kExternalInt16Array,   kExternalUint32Array, kExternalInt32Array,
+    kExternalFloat32Array, kExternalFloat64Array};
+
+
+const size_t kIndices[] = {0, 1, 42, 100, 1024};
+
+
+Type* const kJSTypes[] = {Type::Undefined(), Type::Null(),   Type::Boolean(),
+                          Type::Number(),    Type::String(), Type::Object()};
 
 
 const StrictMode kStrictModes[] = {SLOPPY, STRICT};
@@ -34,22 +40,16 @@ const StrictMode kStrictModes[] = {SLOPPY, STRICT};
 class JSTypedLoweringTest : public TypedGraphTest {
  public:
   JSTypedLoweringTest() : TypedGraphTest(3), javascript_(zone()) {}
-  virtual ~JSTypedLoweringTest() {}
+  ~JSTypedLoweringTest() OVERRIDE {}
 
  protected:
   Reduction Reduce(Node* node) {
-    MachineOperatorBuilder machine;
+    MachineOperatorBuilder machine(zone());
     JSGraph jsgraph(graph(), common(), javascript(), &machine);
-    JSTypedLowering reducer(&jsgraph);
+    JSTypedLowering reducer(&jsgraph, zone());
     return reducer.Reduce(node);
   }
 
-  Node* Parameter(Type* type, int index = 0) {
-    Node* node = graph()->NewNode(common()->Parameter(index), graph()->start());
-    NodeProperties::SetBounds(node, Bounds(Type::None(), type));
-    return node;
-  }
-
   Handle<JSArrayBuffer> NewArrayBuffer(void* bytes, size_t byte_length) {
     Handle<JSArrayBuffer> buffer = factory()->NewJSArrayBuffer();
     Runtime::SetupArrayBuffer(isolate(), buffer, true, bytes, byte_length);
@@ -72,14 +72,79 @@ class JSTypedLoweringTest : public TypedGraphTest {
 // JSToBoolean
 
 
+TEST_F(JSTypedLoweringTest, JSToBooleanWithBoolean) {
+  Node* input = Parameter(Type::Boolean());
+  Node* context = UndefinedConstant();
+
+  Reduction r =
+      Reduce(graph()->NewNode(javascript()->ToBoolean(), input, context));
+  ASSERT_TRUE(r.Changed());
+  EXPECT_EQ(input, r.replacement());
+}
+
+
+TEST_F(JSTypedLoweringTest, JSToBooleanWithUndefined) {
+  Node* input = Parameter(Type::Undefined());
+  Node* context = UndefinedConstant();
+
+  Reduction r =
+      Reduce(graph()->NewNode(javascript()->ToBoolean(), input, context));
+  ASSERT_TRUE(r.Changed());
+  EXPECT_THAT(r.replacement(), IsFalseConstant());
+}
+
+
+TEST_F(JSTypedLoweringTest, JSToBooleanWithNull) {
+  Node* input = Parameter(Type::Null());
+  Node* context = UndefinedConstant();
+
+  Reduction r =
+      Reduce(graph()->NewNode(javascript()->ToBoolean(), input, context));
+  ASSERT_TRUE(r.Changed());
+  EXPECT_THAT(r.replacement(), IsFalseConstant());
+}
+
+
+TEST_F(JSTypedLoweringTest, JSToBooleanWithDetectableReceiver) {
+  Node* input = Parameter(Type::DetectableReceiver());
+  Node* context = UndefinedConstant();
+
+  Reduction r =
+      Reduce(graph()->NewNode(javascript()->ToBoolean(), input, context));
+  ASSERT_TRUE(r.Changed());
+  EXPECT_THAT(r.replacement(), IsTrueConstant());
+}
+
+
+TEST_F(JSTypedLoweringTest, JSToBooleanWithUndetectable) {
+  Node* input = Parameter(Type::Undetectable());
+  Node* context = UndefinedConstant();
+
+  Reduction r =
+      Reduce(graph()->NewNode(javascript()->ToBoolean(), input, context));
+  ASSERT_TRUE(r.Changed());
+  EXPECT_THAT(r.replacement(), IsFalseConstant());
+}
+
+
+TEST_F(JSTypedLoweringTest, JSToBooleanWithOrderedNumber) {
+  Node* input = Parameter(Type::OrderedNumber());
+  Node* context = UndefinedConstant();
+
+  Reduction r =
+      Reduce(graph()->NewNode(javascript()->ToBoolean(), input, context));
+  ASSERT_TRUE(r.Changed());
+  EXPECT_THAT(r.replacement(),
+              IsBooleanNot(IsNumberEqual(input, IsNumberConstant(0))));
+}
+
+
 TEST_F(JSTypedLoweringTest, JSToBooleanWithString) {
   Node* input = Parameter(Type::String());
   Node* context = UndefinedConstant();
-  Node* effect = graph()->start();
-  Node* control = graph()->start();
 
-  Reduction r = Reduce(graph()->NewNode(javascript()->ToBoolean(), input,
-                                        context, effect, control));
+  Reduction r =
+      Reduce(graph()->NewNode(javascript()->ToBoolean(), input, context));
   ASSERT_TRUE(r.Changed());
   EXPECT_THAT(r.replacement(),
               IsBooleanNot(IsNumberEqual(
@@ -89,17 +154,16 @@ TEST_F(JSTypedLoweringTest, JSToBooleanWithString) {
 }
 
 
-TEST_F(JSTypedLoweringTest, JSToBooleanWithOrderedNumberAndBoolean) {
+TEST_F(JSTypedLoweringTest, JSToBooleanWithPhi) {
   Node* p0 = Parameter(Type::OrderedNumber(), 0);
   Node* p1 = Parameter(Type::Boolean(), 1);
   Node* context = UndefinedConstant();
-  Node* effect = graph()->start();
   Node* control = graph()->start();
 
   Reduction r = Reduce(graph()->NewNode(
       javascript()->ToBoolean(),
       graph()->NewNode(common()->Phi(kMachAnyTagged, 2), p0, p1, control),
-      context, effect, control));
+      context));
   ASSERT_TRUE(r.Changed());
   EXPECT_THAT(
       r.replacement(),
@@ -108,6 +172,227 @@ TEST_F(JSTypedLoweringTest, JSToBooleanWithOrderedNumberAndBoolean) {
 }
 
 
+TEST_F(JSTypedLoweringTest, JSToBooleanWithSelect) {
+  Node* p0 = Parameter(Type::Boolean(), 0);
+  Node* p1 = Parameter(Type::DetectableReceiver(), 1);
+  Node* p2 = Parameter(Type::OrderedNumber(), 2);
+  Node* context = UndefinedConstant();
+
+  Reduction r = Reduce(graph()->NewNode(
+      javascript()->ToBoolean(),
+      graph()->NewNode(common()->Select(kMachAnyTagged, BranchHint::kTrue), p0,
+                       p1, p2),
+      context));
+  ASSERT_TRUE(r.Changed());
+  EXPECT_THAT(r.replacement(),
+              IsSelect(kMachAnyTagged, p0, IsTrueConstant(),
+                       IsBooleanNot(IsNumberEqual(p2, IsNumberConstant(0)))));
+}
+
+
+// -----------------------------------------------------------------------------
+// JSToNumber
+
+
+TEST_F(JSTypedLoweringTest, JSToNumberWithPlainPrimitive) {
+  Node* const input = Parameter(Type::PlainPrimitive(), 0);
+  Node* const context = Parameter(Type::Any(), 1);
+  Node* const effect = graph()->start();
+  Node* const control = graph()->start();
+  Reduction r = Reduce(graph()->NewNode(javascript()->ToNumber(), input,
+                                        context, effect, control));
+  ASSERT_TRUE(r.Changed());
+  EXPECT_THAT(r.replacement(), IsToNumber(input, IsNumberConstant(0),
+                                          graph()->start(), control));
+}
+
+
+// -----------------------------------------------------------------------------
+// JSStrictEqual
+
+
+TEST_F(JSTypedLoweringTest, JSStrictEqualWithTheHole) {
+  Node* const the_hole = HeapConstant(factory()->the_hole_value());
+  Node* const context = UndefinedConstant();
+  Node* const effect = graph()->start();
+  Node* const control = graph()->start();
+  TRACED_FOREACH(Type*, type, kJSTypes) {
+    Node* const lhs = Parameter(type);
+    Reduction r = Reduce(graph()->NewNode(javascript()->StrictEqual(), lhs,
+                                          the_hole, context, effect, control));
+    ASSERT_TRUE(r.Changed());
+    EXPECT_THAT(r.replacement(), IsFalseConstant());
+  }
+}
+
+
+// -----------------------------------------------------------------------------
+// JSShiftLeft
+
+
+TEST_F(JSTypedLoweringTest, JSShiftLeftWithSigned32AndConstant) {
+  Node* const lhs = Parameter(Type::Signed32());
+  Node* const context = UndefinedConstant();
+  Node* const effect = graph()->start();
+  Node* const control = graph()->start();
+  TRACED_FORRANGE(int32_t, rhs, 0, 31) {
+    Reduction r =
+        Reduce(graph()->NewNode(javascript()->ShiftLeft(), lhs,
+                                NumberConstant(rhs), context, effect, control));
+    ASSERT_TRUE(r.Changed());
+    EXPECT_THAT(r.replacement(), IsWord32Shl(lhs, IsNumberConstant(rhs)));
+  }
+}
+
+
+TEST_F(JSTypedLoweringTest, JSShiftLeftWithSigned32AndUnsigned32) {
+  Node* const lhs = Parameter(Type::Signed32());
+  Node* const rhs = Parameter(Type::Unsigned32());
+  Node* const context = UndefinedConstant();
+  Node* const effect = graph()->start();
+  Node* const control = graph()->start();
+  Reduction r = Reduce(graph()->NewNode(javascript()->ShiftLeft(), lhs, rhs,
+                                        context, effect, control));
+  ASSERT_TRUE(r.Changed());
+  EXPECT_THAT(r.replacement(),
+              IsWord32Shl(lhs, IsWord32And(rhs, IsInt32Constant(0x1f))));
+}
+
+
+// -----------------------------------------------------------------------------
+// JSShiftRight
+
+
+TEST_F(JSTypedLoweringTest, JSShiftRightWithSigned32AndConstant) {
+  Node* const lhs = Parameter(Type::Signed32());
+  Node* const context = UndefinedConstant();
+  Node* const effect = graph()->start();
+  Node* const control = graph()->start();
+  TRACED_FORRANGE(int32_t, rhs, 0, 31) {
+    Reduction r =
+        Reduce(graph()->NewNode(javascript()->ShiftRight(), lhs,
+                                NumberConstant(rhs), context, effect, control));
+    ASSERT_TRUE(r.Changed());
+    EXPECT_THAT(r.replacement(), IsWord32Sar(lhs, IsNumberConstant(rhs)));
+  }
+}
+
+
+TEST_F(JSTypedLoweringTest, JSShiftRightWithSigned32AndUnsigned32) {
+  Node* const lhs = Parameter(Type::Signed32());
+  Node* const rhs = Parameter(Type::Unsigned32());
+  Node* const context = UndefinedConstant();
+  Node* const effect = graph()->start();
+  Node* const control = graph()->start();
+  Reduction r = Reduce(graph()->NewNode(javascript()->ShiftRight(), lhs, rhs,
+                                        context, effect, control));
+  ASSERT_TRUE(r.Changed());
+  EXPECT_THAT(r.replacement(),
+              IsWord32Sar(lhs, IsWord32And(rhs, IsInt32Constant(0x1f))));
+}
+
+
+// -----------------------------------------------------------------------------
+// JSShiftRightLogical
+
+
+TEST_F(JSTypedLoweringTest, JSShiftRightLogicalWithUnsigned32AndConstant) {
+  Node* const lhs = Parameter(Type::Unsigned32());
+  Node* const context = UndefinedConstant();
+  Node* const effect = graph()->start();
+  Node* const control = graph()->start();
+  TRACED_FORRANGE(int32_t, rhs, 0, 31) {
+    Reduction r =
+        Reduce(graph()->NewNode(javascript()->ShiftRightLogical(), lhs,
+                                NumberConstant(rhs), context, effect, control));
+    ASSERT_TRUE(r.Changed());
+    EXPECT_THAT(r.replacement(), IsWord32Shr(lhs, IsNumberConstant(rhs)));
+  }
+}
+
+
+TEST_F(JSTypedLoweringTest, JSShiftRightLogicalWithUnsigned32AndUnsigned32) {
+  Node* const lhs = Parameter(Type::Unsigned32());
+  Node* const rhs = Parameter(Type::Unsigned32());
+  Node* const context = UndefinedConstant();
+  Node* const effect = graph()->start();
+  Node* const control = graph()->start();
+  Reduction r = Reduce(graph()->NewNode(javascript()->ShiftRightLogical(), lhs,
+                                        rhs, context, effect, control));
+  ASSERT_TRUE(r.Changed());
+  EXPECT_THAT(r.replacement(),
+              IsWord32Shr(lhs, IsWord32And(rhs, IsInt32Constant(0x1f))));
+}
+
+
+// -----------------------------------------------------------------------------
+// JSLoadContext
+
+
+TEST_F(JSTypedLoweringTest, JSLoadContext) {
+  Node* const context = Parameter(Type::Any());
+  Node* const effect = graph()->start();
+  static bool kBooleans[] = {false, true};
+  TRACED_FOREACH(size_t, index, kIndices) {
+    TRACED_FOREACH(bool, immutable, kBooleans) {
+      Reduction const r1 = Reduce(
+          graph()->NewNode(javascript()->LoadContext(0, index, immutable),
+                           context, context, effect));
+      ASSERT_TRUE(r1.Changed());
+      EXPECT_THAT(r1.replacement(),
+                  IsLoadField(AccessBuilder::ForContextSlot(index), context,
+                              effect, graph()->start()));
+
+      Reduction const r2 = Reduce(
+          graph()->NewNode(javascript()->LoadContext(1, index, immutable),
+                           context, context, effect));
+      ASSERT_TRUE(r2.Changed());
+      EXPECT_THAT(r2.replacement(),
+                  IsLoadField(AccessBuilder::ForContextSlot(index),
+                              IsLoadField(AccessBuilder::ForContextSlot(
+                                              Context::PREVIOUS_INDEX),
+                                          context, effect, graph()->start()),
+                              effect, graph()->start()));
+    }
+  }
+}
+
+
+// -----------------------------------------------------------------------------
+// JSStoreContext
+
+
+TEST_F(JSTypedLoweringTest, JSStoreContext) {
+  Node* const context = Parameter(Type::Any());
+  Node* const effect = graph()->start();
+  Node* const control = graph()->start();
+  TRACED_FOREACH(size_t, index, kIndices) {
+    TRACED_FOREACH(Type*, type, kJSTypes) {
+      Node* const value = Parameter(type);
+
+      Reduction const r1 =
+          Reduce(graph()->NewNode(javascript()->StoreContext(0, index), context,
+                                  value, context, effect, control));
+      ASSERT_TRUE(r1.Changed());
+      EXPECT_THAT(r1.replacement(),
+                  IsStoreField(AccessBuilder::ForContextSlot(index), context,
+                               value, effect, control));
+
+      Reduction const r2 =
+          Reduce(graph()->NewNode(javascript()->StoreContext(1, index), context,
+                                  value, context, effect, control));
+      ASSERT_TRUE(r2.Changed());
+      EXPECT_THAT(r2.replacement(),
+                  IsStoreField(AccessBuilder::ForContextSlot(index),
+                               IsLoadField(AccessBuilder::ForContextSlot(
+                                               Context::PREVIOUS_INDEX),
+                                           context, effect, graph()->start()),
+                               value, effect, control));
+    }
+  }
+}
+
+
 // -----------------------------------------------------------------------------
 // JSLoadProperty
 
@@ -122,8 +407,58 @@ TEST_F(JSTypedLoweringTest, JSLoadPropertyFromExternalTypedArray) {
   TRACED_FOREACH(ExternalArrayType, type, kExternalArrayTypes) {
     Handle<JSTypedArray> array =
         factory()->NewJSTypedArray(type, buffer, 0, kLength);
+    int const element_size = static_cast<int>(array->element_size());
+
+    Node* key = Parameter(
+        Type::Range(factory()->NewNumber(kMinInt / element_size),
+                    factory()->NewNumber(kMaxInt / element_size), zone()));
+    Node* base = HeapConstant(array);
+    Node* context = UndefinedConstant();
+    Node* effect = graph()->start();
+    Node* control = graph()->start();
+    Node* node = graph()->NewNode(javascript()->LoadProperty(feedback), base,
+                                  key, context);
+    if (FLAG_turbo_deoptimization) {
+      node->AppendInput(zone(), UndefinedConstant());
+    }
+    node->AppendInput(zone(), effect);
+    node->AppendInput(zone(), control);
+    Reduction r = Reduce(node);
+
+    Matcher<Node*> offset_matcher =
+        element_size == 1
+            ? key
+            : IsWord32Shl(key, IsInt32Constant(WhichPowerOf2(element_size)));
+
+    ASSERT_TRUE(r.Changed());
+    EXPECT_THAT(
+        r.replacement(),
+        IsLoadBuffer(BufferAccess(type),
+                     IsIntPtrConstant(bit_cast<intptr_t>(&backing_store[0])),
+                     offset_matcher,
+                     IsNumberConstant(array->byte_length()->Number()), effect,
+                     control));
+  }
+}
+
+
+TEST_F(JSTypedLoweringTest, JSLoadPropertyFromExternalTypedArrayWithSafeKey) {
+  const size_t kLength = 17;
+  double backing_store[kLength];
+  Handle<JSArrayBuffer> buffer =
+      NewArrayBuffer(backing_store, sizeof(backing_store));
+  VectorSlotPair feedback(Handle<TypeFeedbackVector>::null(),
+                          FeedbackVectorICSlot::Invalid());
+  TRACED_FOREACH(ExternalArrayType, type, kExternalArrayTypes) {
+    Handle<JSTypedArray> array =
+        factory()->NewJSTypedArray(type, buffer, 0, kLength);
+    ElementAccess access = AccessBuilder::ForTypedArrayElement(type, true);
 
-    Node* key = Parameter(Type::Integral32());
+    int min = random_number_generator()->NextInt(static_cast<int>(kLength));
+    int max = random_number_generator()->NextInt(static_cast<int>(kLength));
+    if (min > max) std::swap(min, max);
+    Node* key = Parameter(Type::Range(factory()->NewNumber(min),
+                                      factory()->NewNumber(max), zone()));
     Node* base = HeapConstant(array);
     Node* context = UndefinedConstant();
     Node* effect = graph()->start();
@@ -138,11 +473,11 @@ TEST_F(JSTypedLoweringTest, JSLoadPropertyFromExternalTypedArray) {
     Reduction r = Reduce(node);
 
     ASSERT_TRUE(r.Changed());
-    EXPECT_THAT(r.replacement(),
-                IsLoadElement(
-                    AccessBuilder::ForTypedArrayElement(type, true),
-                    IsIntPtrConstant(bit_cast<intptr_t>(&backing_store[0])),
-                    key, IsNumberConstant(array->length()->Number()), effect));
+    EXPECT_THAT(
+        r.replacement(),
+        IsLoadElement(access,
+                      IsIntPtrConstant(bit_cast<intptr_t>(&backing_store[0])),
+                      key, effect, control));
   }
 }
 
@@ -160,8 +495,58 @@ TEST_F(JSTypedLoweringTest, JSStorePropertyToExternalTypedArray) {
     TRACED_FOREACH(StrictMode, strict_mode, kStrictModes) {
       Handle<JSTypedArray> array =
           factory()->NewJSTypedArray(type, buffer, 0, kLength);
+      int const element_size = static_cast<int>(array->element_size());
 
-      Node* key = Parameter(Type::Integral32());
+      Node* key = Parameter(
+          Type::Range(factory()->NewNumber(kMinInt / element_size),
+                      factory()->NewNumber(kMaxInt / element_size), zone()));
+      Node* base = HeapConstant(array);
+      Node* value =
+          Parameter(AccessBuilder::ForTypedArrayElement(type, true).type);
+      Node* context = UndefinedConstant();
+      Node* effect = graph()->start();
+      Node* control = graph()->start();
+      Node* node = graph()->NewNode(javascript()->StoreProperty(strict_mode),
+                                    base, key, value, context);
+      if (FLAG_turbo_deoptimization) {
+        node->AppendInput(zone(), UndefinedConstant());
+      }
+      node->AppendInput(zone(), effect);
+      node->AppendInput(zone(), control);
+      Reduction r = Reduce(node);
+
+      Matcher<Node*> offset_matcher =
+          element_size == 1
+              ? key
+              : IsWord32Shl(key, IsInt32Constant(WhichPowerOf2(element_size)));
+
+      ASSERT_TRUE(r.Changed());
+      EXPECT_THAT(
+          r.replacement(),
+          IsStoreBuffer(BufferAccess(type),
+                        IsIntPtrConstant(bit_cast<intptr_t>(&backing_store[0])),
+                        offset_matcher,
+                        IsNumberConstant(array->byte_length()->Number()), value,
+                        effect, control));
+    }
+  }
+}
+
+
+TEST_F(JSTypedLoweringTest, JSStorePropertyToExternalTypedArrayWithConversion) {
+  const size_t kLength = 17;
+  double backing_store[kLength];
+  Handle<JSArrayBuffer> buffer =
+      NewArrayBuffer(backing_store, sizeof(backing_store));
+  TRACED_FOREACH(ExternalArrayType, type, kExternalArrayTypes) {
+    TRACED_FOREACH(StrictMode, strict_mode, kStrictModes) {
+      Handle<JSTypedArray> array =
+          factory()->NewJSTypedArray(type, buffer, 0, kLength);
+      int const element_size = static_cast<int>(array->element_size());
+
+      Node* key = Parameter(
+          Type::Range(factory()->NewNumber(kMinInt / element_size),
+                      factory()->NewNumber(kMaxInt / element_size), zone()));
       Node* base = HeapConstant(array);
       Node* value = Parameter(Type::Any());
       Node* context = UndefinedConstant();
@@ -176,13 +561,71 @@ TEST_F(JSTypedLoweringTest, JSStorePropertyToExternalTypedArray) {
       node->AppendInput(zone(), control);
       Reduction r = Reduce(node);
 
+      Matcher<Node*> offset_matcher =
+          element_size == 1
+              ? key
+              : IsWord32Shl(key, IsInt32Constant(WhichPowerOf2(element_size)));
+
+      Matcher<Node*> value_matcher =
+          IsToNumber(value, context, effect, control);
+      Matcher<Node*> effect_matcher = value_matcher;
+      if (AccessBuilder::ForTypedArrayElement(type, true)
+              .type->Is(Type::Signed32())) {
+        value_matcher = IsNumberToInt32(value_matcher);
+      } else if (AccessBuilder::ForTypedArrayElement(type, true)
+                     .type->Is(Type::Unsigned32())) {
+        value_matcher = IsNumberToUint32(value_matcher);
+      }
+
       ASSERT_TRUE(r.Changed());
-      EXPECT_THAT(r.replacement(),
-                  IsStoreElement(
-                      AccessBuilder::ForTypedArrayElement(type, true),
-                      IsIntPtrConstant(bit_cast<intptr_t>(&backing_store[0])),
-                      key, IsNumberConstant(array->length()->Number()), value,
-                      effect, control));
+      EXPECT_THAT(
+          r.replacement(),
+          IsStoreBuffer(BufferAccess(type),
+                        IsIntPtrConstant(bit_cast<intptr_t>(&backing_store[0])),
+                        offset_matcher,
+                        IsNumberConstant(array->byte_length()->Number()),
+                        value_matcher, effect_matcher, control));
+    }
+  }
+}
+
+
+TEST_F(JSTypedLoweringTest, JSStorePropertyToExternalTypedArrayWithSafeKey) {
+  const size_t kLength = 17;
+  double backing_store[kLength];
+  Handle<JSArrayBuffer> buffer =
+      NewArrayBuffer(backing_store, sizeof(backing_store));
+  TRACED_FOREACH(ExternalArrayType, type, kExternalArrayTypes) {
+    TRACED_FOREACH(StrictMode, strict_mode, kStrictModes) {
+      Handle<JSTypedArray> array =
+          factory()->NewJSTypedArray(type, buffer, 0, kLength);
+      ElementAccess access = AccessBuilder::ForTypedArrayElement(type, true);
+
+      int min = random_number_generator()->NextInt(static_cast<int>(kLength));
+      int max = random_number_generator()->NextInt(static_cast<int>(kLength));
+      if (min > max) std::swap(min, max);
+      Node* key = Parameter(Type::Range(factory()->NewNumber(min),
+                                        factory()->NewNumber(max), zone()));
+      Node* base = HeapConstant(array);
+      Node* value = Parameter(access.type);
+      Node* context = UndefinedConstant();
+      Node* effect = graph()->start();
+      Node* control = graph()->start();
+      Node* node = graph()->NewNode(javascript()->StoreProperty(strict_mode),
+                                    base, key, value, context);
+      if (FLAG_turbo_deoptimization) {
+        node->AppendInput(zone(), UndefinedConstant());
+      }
+      node->AppendInput(zone(), effect);
+      node->AppendInput(zone(), control);
+      Reduction r = Reduce(node);
+
+      ASSERT_TRUE(r.Changed());
+      EXPECT_THAT(
+          r.replacement(),
+          IsStoreElement(
+              access, IsIntPtrConstant(bit_cast<intptr_t>(&backing_store[0])),
+              key, value, effect, control));
     }
   }
 }
diff --git a/deps/v8/test/unittests/compiler/load-elimination-unittest.cc b/deps/v8/test/unittests/compiler/load-elimination-unittest.cc
new file mode 100644 (file)
index 0000000..f0cd60e
--- /dev/null
@@ -0,0 +1,72 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/access-builder.h"
+#include "src/compiler/load-elimination.h"
+#include "src/compiler/simplified-operator.h"
+#include "test/unittests/compiler/graph-unittest.h"
+#include "test/unittests/compiler/node-test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class LoadEliminationTest : public GraphTest {
+ public:
+  LoadEliminationTest() : GraphTest(3), simplified_(zone()) {}
+  ~LoadEliminationTest() OVERRIDE {}
+
+ protected:
+  Reduction Reduce(Node* node) {
+    LoadElimination reducer;
+    return reducer.Reduce(node);
+  }
+
+  SimplifiedOperatorBuilder* simplified() { return &simplified_; }
+
+ private:
+  SimplifiedOperatorBuilder simplified_;
+};
+
+
+TEST_F(LoadEliminationTest, LoadFieldWithStoreField) {
+  Node* object1 = Parameter(0);
+  Node* object2 = Parameter(1);
+  Node* value = Parameter(2);
+  Node* effect = graph()->start();
+  Node* control = graph()->start();
+
+  FieldAccess access1 = AccessBuilder::ForContextSlot(42);
+  Node* store1 = graph()->NewNode(simplified()->StoreField(access1), object1,
+                                  value, effect, control);
+  Reduction r1 = Reduce(graph()->NewNode(simplified()->LoadField(access1),
+                                         object1, store1, control));
+  ASSERT_TRUE(r1.Changed());
+  EXPECT_EQ(value, r1.replacement());
+
+  FieldAccess access2 = AccessBuilder::ForMap();
+  Node* store2 = graph()->NewNode(simplified()->StoreField(access2), object1,
+                                  object2, store1, control);
+  Reduction r2 = Reduce(graph()->NewNode(simplified()->LoadField(access2),
+                                         object1, store2, control));
+  ASSERT_TRUE(r2.Changed());
+  EXPECT_EQ(object2, r2.replacement());
+
+  Node* store3 = graph()->NewNode(
+      simplified()->StoreBuffer(BufferAccess(kExternalInt8Array)), object2,
+      value, Int32Constant(10), object1, store2, control);
+
+  Reduction r3 = Reduce(graph()->NewNode(simplified()->LoadField(access1),
+                                         object2, store3, control));
+  ASSERT_FALSE(r3.Changed());
+
+  Reduction r4 = Reduce(graph()->NewNode(simplified()->LoadField(access1),
+                                         object1, store3, control));
+  ASSERT_TRUE(r4.Changed());
+  EXPECT_EQ(value, r4.replacement());
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
index a62216d..b92bd28 100644 (file)
@@ -22,7 +22,7 @@ namespace compiler {
 class MachineOperatorReducerTest : public TypedGraphTest {
  public:
   explicit MachineOperatorReducerTest(int num_parameters = 2)
-      : TypedGraphTest(num_parameters) {}
+      : TypedGraphTest(num_parameters), machine_(zone()) {}
 
  protected:
   Reduction Reduce(Node* node) {
@@ -66,7 +66,7 @@ class MachineOperatorReducerTestWithParam
  public:
   explicit MachineOperatorReducerTestWithParam(int num_parameters = 2)
       : MachineOperatorReducerTest(num_parameters) {}
-  virtual ~MachineOperatorReducerTestWithParam() {}
+  ~MachineOperatorReducerTestWithParam() OVERRIDE {}
 };
 
 
@@ -512,7 +512,9 @@ TEST_F(MachineOperatorReducerTest, Word32AndWithWord32AndWithConstant) {
           graph()->NewNode(machine()->Word32And(), p0, Int32Constant(k)),
           Int32Constant(l)));
       ASSERT_TRUE(r1.Changed());
-      EXPECT_THAT(r1.replacement(), IsWord32And(p0, IsInt32Constant(k & l)));
+      EXPECT_THAT(r1.replacement(),
+                  (k & l) ? IsWord32And(p0, IsInt32Constant(k & l))
+                          : IsInt32Constant(0));
 
       // (K & x) & L => x & (K & L)
       Reduction const r2 = Reduce(graph()->NewNode(
@@ -520,7 +522,85 @@ TEST_F(MachineOperatorReducerTest, Word32AndWithWord32AndWithConstant) {
           graph()->NewNode(machine()->Word32And(), Int32Constant(k), p0),
           Int32Constant(l)));
       ASSERT_TRUE(r2.Changed());
-      EXPECT_THAT(r2.replacement(), IsWord32And(p0, IsInt32Constant(k & l)));
+      EXPECT_THAT(r2.replacement(),
+                  (k & l) ? IsWord32And(p0, IsInt32Constant(k & l))
+                          : IsInt32Constant(0));
+    }
+  }
+}
+
+
+TEST_F(MachineOperatorReducerTest, Word32AndWithInt32AddAndConstant) {
+  Node* const p0 = Parameter(0);
+  Node* const p1 = Parameter(1);
+
+  TRACED_FORRANGE(int32_t, l, 1, 31) {
+    TRACED_FOREACH(int32_t, k, kInt32Values) {
+      if ((k << l) == 0) continue;
+      // (x + (K << L)) & (-1 << L) => (x & (-1 << L)) + (K << L)
+      Reduction const r = Reduce(graph()->NewNode(
+          machine()->Word32And(),
+          graph()->NewNode(machine()->Int32Add(), p0, Int32Constant(k << l)),
+          Int32Constant(-1 << l)));
+      ASSERT_TRUE(r.Changed());
+      EXPECT_THAT(r.replacement(),
+                  IsInt32Add(IsWord32And(p0, IsInt32Constant(-1 << l)),
+                             IsInt32Constant(k << l)));
+    }
+
+    Node* s1 = graph()->NewNode(machine()->Word32Shl(), p1, Int32Constant(l));
+
+    // (y << L + x) & (-1 << L) => (x & (-1 << L)) + y << L
+    Reduction const r1 = Reduce(graph()->NewNode(
+        machine()->Word32And(), graph()->NewNode(machine()->Int32Add(), s1, p0),
+        Int32Constant(-1 << l)));
+    ASSERT_TRUE(r1.Changed());
+    EXPECT_THAT(r1.replacement(),
+                IsInt32Add(IsWord32And(p0, IsInt32Constant(-1 << l)), s1));
+
+    // (x + y << L) & (-1 << L) => (x & (-1 << L)) + y << L
+    Reduction const r2 = Reduce(graph()->NewNode(
+        machine()->Word32And(), graph()->NewNode(machine()->Int32Add(), p0, s1),
+        Int32Constant(-1 << l)));
+    ASSERT_TRUE(r2.Changed());
+    EXPECT_THAT(r2.replacement(),
+                IsInt32Add(IsWord32And(p0, IsInt32Constant(-1 << l)), s1));
+  }
+}
+
+
+TEST_F(MachineOperatorReducerTest,
+       Word32AndWithInt32AddAndInt32MulAndConstant) {
+  Node* const p0 = Parameter(0);
+  Node* const p1 = Parameter(1);
+
+  TRACED_FORRANGE(int32_t, l, 1, 31) {
+    TRACED_FOREACH(int32_t, k, kInt32Values) {
+      if ((k << l) == 0) continue;
+      // (y * (K << L) + x) & (-1 << L) => (x & (-1 << L)) + y * (K << L)
+      Reduction const r1 = Reduce(graph()->NewNode(
+          machine()->Word32And(),
+          graph()->NewNode(machine()->Int32Add(),
+                           graph()->NewNode(machine()->Int32Mul(), p1,
+                                            Int32Constant(k << l)),
+                           p0),
+          Int32Constant(-1 << l)));
+      ASSERT_TRUE(r1.Changed());
+      EXPECT_THAT(r1.replacement(),
+                  IsInt32Add(IsWord32And(p0, IsInt32Constant(-1 << l)),
+                             IsInt32Mul(p1, IsInt32Constant(k << l))));
+
+      // (x + y * (K << L)) & (-1 << L) => (x & (-1 << L)) + y * (K << L)
+      Reduction const r2 = Reduce(graph()->NewNode(
+          machine()->Word32And(),
+          graph()->NewNode(machine()->Int32Add(), p0,
+                           graph()->NewNode(machine()->Int32Mul(), p1,
+                                            Int32Constant(k << l))),
+          Int32Constant(-1 << l)));
+      ASSERT_TRUE(r2.Changed());
+      EXPECT_THAT(r2.replacement(),
+                  IsInt32Add(IsWord32And(p0, IsInt32Constant(-1 << l)),
+                             IsInt32Mul(p1, IsInt32Constant(k << l))));
     }
   }
 }
@@ -572,24 +652,43 @@ TEST_F(MachineOperatorReducerTest, Word32XorWithWord32XorAndMinusOne) {
 TEST_F(MachineOperatorReducerTest, ReduceToWord32RorWithParameters) {
   Node* value = Parameter(0);
   Node* shift = Parameter(1);
-  Node* shl = graph()->NewNode(machine()->Word32Shl(), value, shift);
-  Node* shr = graph()->NewNode(
-      machine()->Word32Shr(), value,
-      graph()->NewNode(machine()->Int32Sub(), Int32Constant(32), shift));
+  Node* sub = graph()->NewNode(machine()->Int32Sub(), Int32Constant(32), shift);
 
-  // (x << y) | (x >> (32 - y)) => x ror y
-  Node* node1 = graph()->NewNode(machine()->Word32Or(), shl, shr);
+  // Testing rotate left.
+  Node* shl_l = graph()->NewNode(machine()->Word32Shl(), value, shift);
+  Node* shr_l = graph()->NewNode(machine()->Word32Shr(), value, sub);
+
+  // (x << y) | (x >>> (32 - y)) => x ror (32 - y)
+  Node* node1 = graph()->NewNode(machine()->Word32Or(), shl_l, shr_l);
   Reduction reduction1 = Reduce(node1);
   EXPECT_TRUE(reduction1.Changed());
   EXPECT_EQ(reduction1.replacement(), node1);
-  EXPECT_THAT(reduction1.replacement(), IsWord32Ror(value, shift));
+  EXPECT_THAT(reduction1.replacement(), IsWord32Ror(value, sub));
 
-  // (x >> (32 - y)) | (x << y) => x ror y
-  Node* node2 = graph()->NewNode(machine()->Word32Or(), shr, shl);
+  // (x >>> (32 - y)) | (x << y) => x ror (32 - y)
+  Node* node2 = graph()->NewNode(machine()->Word32Or(), shr_l, shl_l);
   Reduction reduction2 = Reduce(node2);
   EXPECT_TRUE(reduction2.Changed());
   EXPECT_EQ(reduction2.replacement(), node2);
-  EXPECT_THAT(reduction2.replacement(), IsWord32Ror(value, shift));
+  EXPECT_THAT(reduction2.replacement(), IsWord32Ror(value, sub));
+
+  // Testing rotate right.
+  Node* shl_r = graph()->NewNode(machine()->Word32Shl(), value, sub);
+  Node* shr_r = graph()->NewNode(machine()->Word32Shr(), value, shift);
+
+  // (x << (32 - y)) | (x >>> y) => x ror y
+  Node* node3 = graph()->NewNode(machine()->Word32Or(), shl_r, shr_r);
+  Reduction reduction3 = Reduce(node3);
+  EXPECT_TRUE(reduction3.Changed());
+  EXPECT_EQ(reduction3.replacement(), node3);
+  EXPECT_THAT(reduction3.replacement(), IsWord32Ror(value, shift));
+
+  // (x >>> y) | (x << (32 - y)) => x ror y
+  Node* node4 = graph()->NewNode(machine()->Word32Or(), shr_r, shl_r);
+  Reduction reduction4 = Reduce(node4);
+  EXPECT_TRUE(reduction4.Changed());
+  EXPECT_EQ(reduction4.replacement(), node4);
+  EXPECT_THAT(reduction4.replacement(), IsWord32Ror(value, shift));
 }
 
 
@@ -601,21 +700,21 @@ TEST_F(MachineOperatorReducerTest, ReduceToWord32RorWithConstant) {
     Node* shr =
         graph()->NewNode(machine()->Word32Shr(), value, Int32Constant(32 - k));
 
-    // (x << K) | (x >> ((32 - K) - y)) => x ror K
+    // (x << K) | (x >>> ((32 - K) - y)) => x ror (32 - K)
     Node* node1 = graph()->NewNode(machine()->Word32Or(), shl, shr);
     Reduction reduction1 = Reduce(node1);
     EXPECT_TRUE(reduction1.Changed());
     EXPECT_EQ(reduction1.replacement(), node1);
     EXPECT_THAT(reduction1.replacement(),
-                IsWord32Ror(value, IsInt32Constant(k)));
+                IsWord32Ror(value, IsInt32Constant(32 - k)));
 
-    // (x >> (32 - K)) | (x << K) => x ror K
+    // (x >>> (32 - K)) | (x << K) => x ror (32 - K)
     Node* node2 = graph()->NewNode(machine()->Word32Or(), shr, shl);
     Reduction reduction2 = Reduce(node2);
     EXPECT_TRUE(reduction2.Changed());
     EXPECT_EQ(reduction2.replacement(), node2);
     EXPECT_THAT(reduction2.replacement(),
-                IsWord32Ror(value, IsInt32Constant(k)));
+                IsWord32Ror(value, IsInt32Constant(32 - k)));
   }
 }
 
@@ -645,6 +744,36 @@ TEST_F(MachineOperatorReducerTest, Word32RorWithConstants) {
 
 
 // -----------------------------------------------------------------------------
+// Word32Sar
+
+
+TEST_F(MachineOperatorReducerTest, Word32SarWithWord32ShlAndLoad) {
+  Node* const p0 = Parameter(0);
+  Node* const p1 = Parameter(1);
+  {
+    Node* const l = graph()->NewNode(machine()->Load(kMachInt8), p0, p1,
+                                     graph()->start(), graph()->start());
+    Reduction const r = Reduce(graph()->NewNode(
+        machine()->Word32Sar(),
+        graph()->NewNode(machine()->Word32Shl(), l, Int32Constant(24)),
+        Int32Constant(24)));
+    ASSERT_TRUE(r.Changed());
+    EXPECT_EQ(l, r.replacement());
+  }
+  {
+    Node* const l = graph()->NewNode(machine()->Load(kMachInt16), p0, p1,
+                                     graph()->start(), graph()->start());
+    Reduction const r = Reduce(graph()->NewNode(
+        machine()->Word32Sar(),
+        graph()->NewNode(machine()->Word32Shl(), l, Int32Constant(16)),
+        Int32Constant(16)));
+    ASSERT_TRUE(r.Changed());
+    EXPECT_EQ(l, r.replacement());
+  }
+}
+
+
+// -----------------------------------------------------------------------------
 // Word32Shl
 
 
@@ -672,6 +801,29 @@ TEST_F(MachineOperatorReducerTest, Word32ShlWithWord32Sar) {
 }
 
 
+TEST_F(MachineOperatorReducerTest,
+       Word32ShlWithWord32SarAndInt32AddAndConstant) {
+  Node* const p0 = Parameter(0);
+  TRACED_FOREACH(int32_t, k, kInt32Values) {
+    TRACED_FORRANGE(int32_t, l, 1, 31) {
+      if ((k << l) == 0) continue;
+      // (x + (K << L)) >> L << L => (x & (-1 << L)) + (K << L)
+      Reduction const r = Reduce(graph()->NewNode(
+          machine()->Word32Shl(),
+          graph()->NewNode(machine()->Word32Sar(),
+                           graph()->NewNode(machine()->Int32Add(), p0,
+                                            Int32Constant(k << l)),
+                           Int32Constant(l)),
+          Int32Constant(l)));
+      ASSERT_TRUE(r.Changed());
+      EXPECT_THAT(r.replacement(),
+                  IsInt32Add(IsWord32And(p0, IsInt32Constant(-1 << l)),
+                             IsInt32Constant(k << l)));
+    }
+  }
+}
+
+
 TEST_F(MachineOperatorReducerTest, Word32ShlWithWord32Shr) {
   Node* p0 = Parameter(0);
   TRACED_FORRANGE(int32_t, x, 1, 31) {
index 6e41faa..6e0df2a 100644 (file)
@@ -3,8 +3,10 @@
 // found in the LICENSE file.
 
 #include "src/compiler/machine-operator.h"
-#include "src/compiler/operator-properties-inl.h"
-#include "testing/gtest-support.h"
+#include "src/compiler/opcodes.h"
+#include "src/compiler/operator.h"
+#include "src/compiler/operator-properties.h"
+#include "test/unittests/test-utils.h"
 
 namespace v8 {
 namespace internal {
@@ -14,13 +16,14 @@ namespace compiler {
 
 template <typename T>
 class MachineOperatorTestWithParam
-    : public ::testing::TestWithParam< ::testing::tuple<MachineType, T> > {
+    : public TestWithZone,
+      public ::testing::WithParamInterface< ::testing::tuple<MachineType, T> > {
  protected:
   MachineType type() const { return ::testing::get<0>(B::GetParam()); }
   const T& GetParam() const { return ::testing::get<1>(B::GetParam()); }
 
  private:
-  typedef ::testing::TestWithParam< ::testing::tuple<MachineType, T> > B;
+  typedef ::testing::WithParamInterface< ::testing::tuple<MachineType, T> > B;
 };
 
 
@@ -47,14 +50,14 @@ typedef MachineOperatorTestWithParam<LoadRepresentation>
 
 
 TEST_P(MachineLoadOperatorTest, InstancesAreGloballyShared) {
-  MachineOperatorBuilder machine1(type());
-  MachineOperatorBuilder machine2(type());
+  MachineOperatorBuilder machine1(zone(), type());
+  MachineOperatorBuilder machine2(zone(), type());
   EXPECT_EQ(machine1.Load(GetParam()), machine2.Load(GetParam()));
 }
 
 
 TEST_P(MachineLoadOperatorTest, NumberOfInputsAndOutputs) {
-  MachineOperatorBuilder machine(type());
+  MachineOperatorBuilder machine(zone(), type());
   const Operator* op = machine.Load(GetParam());
 
   EXPECT_EQ(2, op->ValueInputCount());
@@ -69,13 +72,13 @@ TEST_P(MachineLoadOperatorTest, NumberOfInputsAndOutputs) {
 
 
 TEST_P(MachineLoadOperatorTest, OpcodeIsCorrect) {
-  MachineOperatorBuilder machine(type());
+  MachineOperatorBuilder machine(zone(), type());
   EXPECT_EQ(IrOpcode::kLoad, machine.Load(GetParam())->opcode());
 }
 
 
 TEST_P(MachineLoadOperatorTest, ParameterIsCorrect) {
-  MachineOperatorBuilder machine(type());
+  MachineOperatorBuilder machine(zone(), type());
   EXPECT_EQ(GetParam(),
             OpParameter<LoadRepresentation>(machine.Load(GetParam())));
 }
@@ -105,14 +108,14 @@ class MachineStoreOperatorTest
 
 
 TEST_P(MachineStoreOperatorTest, InstancesAreGloballyShared) {
-  MachineOperatorBuilder machine1(type());
-  MachineOperatorBuilder machine2(type());
+  MachineOperatorBuilder machine1(zone(), type());
+  MachineOperatorBuilder machine2(zone(), type());
   EXPECT_EQ(machine1.Store(GetParam()), machine2.Store(GetParam()));
 }
 
 
 TEST_P(MachineStoreOperatorTest, NumberOfInputsAndOutputs) {
-  MachineOperatorBuilder machine(type());
+  MachineOperatorBuilder machine(zone(), type());
   const Operator* op = machine.Store(GetParam());
 
   EXPECT_EQ(3, op->ValueInputCount());
@@ -127,13 +130,13 @@ TEST_P(MachineStoreOperatorTest, NumberOfInputsAndOutputs) {
 
 
 TEST_P(MachineStoreOperatorTest, OpcodeIsCorrect) {
-  MachineOperatorBuilder machine(type());
+  MachineOperatorBuilder machine(zone(), type());
   EXPECT_EQ(IrOpcode::kStore, machine.Store(GetParam())->opcode());
 }
 
 
 TEST_P(MachineStoreOperatorTest, ParameterIsCorrect) {
-  MachineOperatorBuilder machine(type());
+  MachineOperatorBuilder machine(zone(), type());
   EXPECT_EQ(GetParam(),
             OpParameter<StoreRepresentation>(machine.Store(GetParam())));
 }
@@ -215,14 +218,14 @@ typedef MachineOperatorTestWithParam<PureOperator> MachinePureOperatorTest;
 
 TEST_P(MachinePureOperatorTest, InstancesAreGloballyShared) {
   const PureOperator& pop = GetParam();
-  MachineOperatorBuilder machine1(type());
-  MachineOperatorBuilder machine2(type());
+  MachineOperatorBuilder machine1(zone(), type());
+  MachineOperatorBuilder machine2(zone(), type());
   EXPECT_EQ((machine1.*pop.constructor)(), (machine2.*pop.constructor)());
 }
 
 
 TEST_P(MachinePureOperatorTest, NumberOfInputsAndOutputs) {
-  MachineOperatorBuilder machine(type());
+  MachineOperatorBuilder machine(zone(), type());
   const PureOperator& pop = GetParam();
   const Operator* op = (machine.*pop.constructor)();
 
@@ -239,7 +242,7 @@ TEST_P(MachinePureOperatorTest, NumberOfInputsAndOutputs) {
 
 
 TEST_P(MachinePureOperatorTest, MarkedAsPure) {
-  MachineOperatorBuilder machine(type());
+  MachineOperatorBuilder machine(zone(), type());
   const PureOperator& pop = GetParam();
   const Operator* op = (machine.*pop.constructor)();
   EXPECT_TRUE(op->HasProperty(Operator::kPure));
@@ -247,7 +250,7 @@ TEST_P(MachinePureOperatorTest, MarkedAsPure) {
 
 
 TEST_P(MachinePureOperatorTest, OpcodeIsCorrect) {
-  MachineOperatorBuilder machine(type());
+  MachineOperatorBuilder machine(zone(), type());
   const PureOperator& pop = GetParam();
   const Operator* op = (machine.*pop.constructor)();
   EXPECT_EQ(pop.opcode, op->opcode());
@@ -266,8 +269,15 @@ INSTANTIATE_TEST_CASE_P(
 // Pseudo operators.
 
 
-TEST(MachineOperatorTest, PseudoOperatorsWhenWordSizeIs32Bit) {
-  MachineOperatorBuilder machine(kRepWord32);
+namespace {
+
+typedef TestWithZone MachineOperatorTest;
+
+}  // namespace
+
+
+TEST_F(MachineOperatorTest, PseudoOperatorsWhenWordSizeIs32Bit) {
+  MachineOperatorBuilder machine(zone(), kRepWord32);
   EXPECT_EQ(machine.Word32And(), machine.WordAnd());
   EXPECT_EQ(machine.Word32Or(), machine.WordOr());
   EXPECT_EQ(machine.Word32Xor(), machine.WordXor());
@@ -288,8 +298,8 @@ TEST(MachineOperatorTest, PseudoOperatorsWhenWordSizeIs32Bit) {
 }
 
 
-TEST(MachineOperatorTest, PseudoOperatorsWhenWordSizeIs64Bit) {
-  MachineOperatorBuilder machine(kRepWord64);
+TEST_F(MachineOperatorTest, PseudoOperatorsWhenWordSizeIs64Bit) {
+  MachineOperatorBuilder machine(zone(), kRepWord64);
   EXPECT_EQ(machine.Word64And(), machine.WordAnd());
   EXPECT_EQ(machine.Word64Or(), machine.WordOr());
   EXPECT_EQ(machine.Word64Xor(), machine.WordXor());
diff --git a/deps/v8/test/unittests/compiler/mips64/OWNERS b/deps/v8/test/unittests/compiler/mips64/OWNERS
new file mode 100644 (file)
index 0000000..5508ba6
--- /dev/null
@@ -0,0 +1,5 @@
+paul.lind@imgtec.com
+gergely.kis@imgtec.com
+akos.palfi@imgtec.com
+balazs.kilvady@imgtec.com
+dusan.milosavljevic@imgtec.com
diff --git a/deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc b/deps/v8/test/unittests/compiler/mips64/instruction-selector-mips64-unittest.cc
new file mode 100644 (file)
index 0000000..a39ae75
--- /dev/null
@@ -0,0 +1,807 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file
+
+#include "test/unittests/compiler/instruction-selector-unittest.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+namespace {
+template <typename T>
+struct MachInst {
+  T constructor;
+  const char* constructor_name;
+  ArchOpcode arch_opcode;
+  MachineType machine_type;
+};
+
+template <typename T>
+std::ostream& operator<<(std::ostream& os, const MachInst<T>& mi) {
+  return os << mi.constructor_name;
+}
+
+typedef MachInst<Node* (RawMachineAssembler::*)(Node*)> MachInst1;
+typedef MachInst<Node* (RawMachineAssembler::*)(Node*, Node*)> MachInst2;
+
+
+// To avoid duplicated code IntCmp helper structure
+// is created. It contains MachInst2 with two nodes and expected_size
+// because different cmp instructions have different size.
+struct IntCmp {
+  MachInst2 mi;
+  uint32_t expected_size;
+};
+
+struct FPCmp {
+  MachInst2 mi;
+  FlagsCondition cond;
+};
+
+const FPCmp kFPCmpInstructions[] = {
+    {{&RawMachineAssembler::Float64Equal, "Float64Equal", kMips64CmpD,
+      kMachFloat64},
+     kUnorderedEqual},
+    {{&RawMachineAssembler::Float64LessThan, "Float64LessThan", kMips64CmpD,
+      kMachFloat64},
+     kUnorderedLessThan},
+    {{&RawMachineAssembler::Float64LessThanOrEqual, "Float64LessThanOrEqual",
+      kMips64CmpD, kMachFloat64},
+     kUnorderedLessThanOrEqual},
+    {{&RawMachineAssembler::Float64GreaterThan, "Float64GreaterThan",
+      kMips64CmpD, kMachFloat64},
+     kUnorderedLessThan},
+    {{&RawMachineAssembler::Float64GreaterThanOrEqual,
+      "Float64GreaterThanOrEqual", kMips64CmpD, kMachFloat64},
+     kUnorderedLessThanOrEqual}};
+
+struct Conversion {
+  // The machine_type field in MachInst1 represents the destination type.
+  MachInst1 mi;
+  MachineType src_machine_type;
+};
+
+
+// ----------------------------------------------------------------------------
+// Logical instructions.
+// ----------------------------------------------------------------------------
+
+
+const MachInst2 kLogicalInstructions[] = {
+    {&RawMachineAssembler::Word32And, "Word32And", kMips64And, kMachInt32},
+    {&RawMachineAssembler::Word64And, "Word64And", kMips64And, kMachInt64},
+    {&RawMachineAssembler::Word32Or, "Word32Or", kMips64Or, kMachInt32},
+    {&RawMachineAssembler::Word64Or, "Word64Or", kMips64Or, kMachInt64},
+    {&RawMachineAssembler::Word32Xor, "Word32Xor", kMips64Xor, kMachInt32},
+    {&RawMachineAssembler::Word64Xor, "Word64Xor", kMips64Xor, kMachInt64}};
+
+
+// ----------------------------------------------------------------------------
+// Shift instructions.
+// ----------------------------------------------------------------------------
+
+
+const MachInst2 kShiftInstructions[] = {
+    {&RawMachineAssembler::Word32Shl, "Word32Shl", kMips64Shl, kMachInt32},
+    {&RawMachineAssembler::Word64Shl, "Word64Shl", kMips64Dshl, kMachInt64},
+    {&RawMachineAssembler::Word32Shr, "Word32Shr", kMips64Shr, kMachInt32},
+    {&RawMachineAssembler::Word64Shr, "Word64Shr", kMips64Dshr, kMachInt64},
+    {&RawMachineAssembler::Word32Sar, "Word32Sar", kMips64Sar, kMachInt32},
+    {&RawMachineAssembler::Word64Sar, "Word64Sar", kMips64Dsar, kMachInt64},
+    {&RawMachineAssembler::Word32Ror, "Word32Ror", kMips64Ror, kMachInt32},
+    {&RawMachineAssembler::Word64Ror, "Word64Ror", kMips64Dror, kMachInt64}};
+
+
+// ----------------------------------------------------------------------------
+// MUL/DIV instructions.
+// ----------------------------------------------------------------------------
+
+
+const MachInst2 kMulDivInstructions[] = {
+    {&RawMachineAssembler::Int32Mul, "Int32Mul", kMips64Mul, kMachInt32},
+    {&RawMachineAssembler::Int32Div, "Int32Div", kMips64Div, kMachInt32},
+    {&RawMachineAssembler::Uint32Div, "Uint32Div", kMips64DivU, kMachUint32},
+    {&RawMachineAssembler::Int64Mul, "Int64Mul", kMips64Dmul, kMachInt64},
+    {&RawMachineAssembler::Int64Div, "Int64Div", kMips64Ddiv, kMachInt64},
+    {&RawMachineAssembler::Uint64Div, "Uint64Div", kMips64DdivU, kMachUint64},
+    {&RawMachineAssembler::Float64Mul, "Float64Mul", kMips64MulD, kMachFloat64},
+    {&RawMachineAssembler::Float64Div, "Float64Div", kMips64DivD,
+     kMachFloat64}};
+
+
+// ----------------------------------------------------------------------------
+// MOD instructions.
+// ----------------------------------------------------------------------------
+
+
+const MachInst2 kModInstructions[] = {
+    {&RawMachineAssembler::Int32Mod, "Int32Mod", kMips64Mod, kMachInt32},
+    {&RawMachineAssembler::Uint32Mod, "Uint32Mod", kMips64ModU, kMachInt32},
+    {&RawMachineAssembler::Float64Mod, "Float64Mod", kMips64ModD,
+     kMachFloat64}};
+
+
+// ----------------------------------------------------------------------------
+// Arithmetic FPU instructions.
+// ----------------------------------------------------------------------------
+
+
+const MachInst2 kFPArithInstructions[] = {
+    {&RawMachineAssembler::Float64Add, "Float64Add", kMips64AddD, kMachFloat64},
+    {&RawMachineAssembler::Float64Sub, "Float64Sub", kMips64SubD,
+     kMachFloat64}};
+
+
+// ----------------------------------------------------------------------------
+// IntArithTest instructions, two nodes.
+// ----------------------------------------------------------------------------
+
+
+const MachInst2 kAddSubInstructions[] = {
+    {&RawMachineAssembler::Int32Add, "Int32Add", kMips64Add, kMachInt32},
+    {&RawMachineAssembler::Int64Add, "Int64Add", kMips64Dadd, kMachInt64},
+    {&RawMachineAssembler::Int32Sub, "Int32Sub", kMips64Sub, kMachInt32},
+    {&RawMachineAssembler::Int64Sub, "Int64Sub", kMips64Dsub, kMachInt64}};
+
+
+// ----------------------------------------------------------------------------
+// IntArithTest instructions, one node.
+// ----------------------------------------------------------------------------
+
+
+const MachInst1 kAddSubOneInstructions[] = {
+    {&RawMachineAssembler::Int32Neg, "Int32Neg", kMips64Sub, kMachInt32},
+    {&RawMachineAssembler::Int64Neg, "Int64Neg", kMips64Dsub, kMachInt64}};
+
+
+// ----------------------------------------------------------------------------
+// Arithmetic compare instructions.
+// ----------------------------------------------------------------------------
+
+
+const IntCmp kCmpInstructions[] = {
+    {{&RawMachineAssembler::WordEqual, "WordEqual", kMips64Cmp, kMachInt64},
+     1U},
+    {{&RawMachineAssembler::WordNotEqual, "WordNotEqual", kMips64Cmp,
+      kMachInt64},
+     1U},
+    {{&RawMachineAssembler::Word32Equal, "Word32Equal", kMips64Cmp32,
+      kMachInt32},
+     1U},
+    {{&RawMachineAssembler::Word32NotEqual, "Word32NotEqual", kMips64Cmp32,
+      kMachInt32},
+     1U},
+    {{&RawMachineAssembler::Int32LessThan, "Int32LessThan", kMips64Cmp32,
+      kMachInt32},
+     1U},
+    {{&RawMachineAssembler::Int32LessThanOrEqual, "Int32LessThanOrEqual",
+      kMips64Cmp32, kMachInt32},
+     1U},
+    {{&RawMachineAssembler::Int32GreaterThan, "Int32GreaterThan", kMips64Cmp32,
+      kMachInt32},
+     1U},
+    {{&RawMachineAssembler::Int32GreaterThanOrEqual, "Int32GreaterThanOrEqual",
+      kMips64Cmp32, kMachInt32},
+     1U},
+    {{&RawMachineAssembler::Uint32LessThan, "Uint32LessThan", kMips64Cmp32,
+      kMachUint32},
+     1U},
+    {{&RawMachineAssembler::Uint32LessThanOrEqual, "Uint32LessThanOrEqual",
+      kMips64Cmp32, kMachUint32},
+     1U}};
+
+
+// ----------------------------------------------------------------------------
+// Conversion instructions.
+// ----------------------------------------------------------------------------
+
+const Conversion kConversionInstructions[] = {
+    // Conversion instructions are related to machine_operator.h:
+    // FPU conversions:
+    // Convert representation of integers between float64 and int32/uint32.
+    // The precise rounding mode and handling of out of range inputs are *not*
+    // defined for these operators, since they are intended only for use with
+    // integers.
+    // mips instructions:
+    // mtc1, cvt.d.w
+    {{&RawMachineAssembler::ChangeInt32ToFloat64, "ChangeInt32ToFloat64",
+      kMips64CvtDW, kMachFloat64},
+     kMachInt32},
+
+    // mips instructions:
+    // cvt.d.uw
+    {{&RawMachineAssembler::ChangeUint32ToFloat64, "ChangeUint32ToFloat64",
+      kMips64CvtDUw, kMachFloat64},
+     kMachInt32},
+
+    // mips instructions:
+    // mfc1, trunc double to word, for more details look at mips macro
+    // asm and mips asm file
+    {{&RawMachineAssembler::ChangeFloat64ToInt32, "ChangeFloat64ToInt32",
+      kMips64TruncWD, kMachFloat64},
+     kMachInt32},
+
+    // mips instructions:
+    // trunc double to unsigned word, for more details look at mips macro
+    // asm and mips asm file
+    {{&RawMachineAssembler::ChangeFloat64ToUint32, "ChangeFloat64ToUint32",
+      kMips64TruncUwD, kMachFloat64},
+     kMachInt32}};
+
+}  // namespace
+
+
+typedef InstructionSelectorTestWithParam<FPCmp> InstructionSelectorFPCmpTest;
+
+TEST_P(InstructionSelectorFPCmpTest, Parameter) {
+  const FPCmp cmp = GetParam();
+  StreamBuilder m(this, kMachInt32, cmp.mi.machine_type, cmp.mi.machine_type);
+  m.Return((m.*cmp.mi.constructor)(m.Parameter(0), m.Parameter(1)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(cmp.mi.arch_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(1U, s[0]->OutputCount());
+  EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+  EXPECT_EQ(cmp.cond, s[0]->flags_condition());
+}
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorFPCmpTest,
+                        ::testing::ValuesIn(kFPCmpInstructions));
+
+// ----------------------------------------------------------------------------
+// Arithmetic compare instructions integers
+// ----------------------------------------------------------------------------
+typedef InstructionSelectorTestWithParam<IntCmp> InstructionSelectorCmpTest;
+
+
+TEST_P(InstructionSelectorCmpTest, Parameter) {
+  const IntCmp cmp = GetParam();
+  const MachineType type = cmp.mi.machine_type;
+  StreamBuilder m(this, type, type, type);
+  m.Return((m.*cmp.mi.constructor)(m.Parameter(0), m.Parameter(1)));
+  Stream s = m.Build();
+  ASSERT_EQ(cmp.expected_size, s.size());
+  EXPECT_EQ(cmp.mi.arch_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorCmpTest,
+                        ::testing::ValuesIn(kCmpInstructions));
+
+// ----------------------------------------------------------------------------
+// Shift instructions.
+// ----------------------------------------------------------------------------
+typedef InstructionSelectorTestWithParam<MachInst2>
+    InstructionSelectorShiftTest;
+
+TEST_P(InstructionSelectorShiftTest, Immediate) {
+  const MachInst2 dpi = GetParam();
+  const MachineType type = dpi.machine_type;
+  TRACED_FORRANGE(int32_t, imm, 0, (ElementSizeOf(type) * 8) - 1) {
+    StreamBuilder m(this, type, type);
+    m.Return((m.*dpi.constructor)(m.Parameter(0), m.Int32Constant(imm)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(2U, s[0]->InputCount());
+    EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
+    EXPECT_EQ(imm, s.ToInt32(s[0]->InputAt(1)));
+    EXPECT_EQ(1U, s[0]->OutputCount());
+  }
+}
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorShiftTest,
+                        ::testing::ValuesIn(kShiftInstructions));
+
+// ----------------------------------------------------------------------------
+// Logical instructions.
+// ----------------------------------------------------------------------------
+typedef InstructionSelectorTestWithParam<MachInst2>
+    InstructionSelectorLogicalTest;
+
+
+TEST_P(InstructionSelectorLogicalTest, Parameter) {
+  const MachInst2 dpi = GetParam();
+  const MachineType type = dpi.machine_type;
+  StreamBuilder m(this, type, type, type);
+  m.Return((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorLogicalTest,
+                        ::testing::ValuesIn(kLogicalInstructions));
+
+// ----------------------------------------------------------------------------
+// MUL/DIV instructions.
+// ----------------------------------------------------------------------------
+typedef InstructionSelectorTestWithParam<MachInst2>
+    InstructionSelectorMulDivTest;
+
+TEST_P(InstructionSelectorMulDivTest, Parameter) {
+  const MachInst2 dpi = GetParam();
+  const MachineType type = dpi.machine_type;
+  StreamBuilder m(this, type, type, type);
+  m.Return((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorMulDivTest,
+                        ::testing::ValuesIn(kMulDivInstructions));
+
+// ----------------------------------------------------------------------------
+// MOD instructions.
+// ----------------------------------------------------------------------------
+typedef InstructionSelectorTestWithParam<MachInst2> InstructionSelectorModTest;
+
+TEST_P(InstructionSelectorModTest, Parameter) {
+  const MachInst2 dpi = GetParam();
+  const MachineType type = dpi.machine_type;
+  StreamBuilder m(this, type, type, type);
+  m.Return((m.*dpi.constructor)(m.Parameter(0), m.Parameter(1)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(dpi.arch_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorModTest,
+                        ::testing::ValuesIn(kModInstructions));
+
+// ----------------------------------------------------------------------------
+// Floating point instructions.
+// ----------------------------------------------------------------------------
+typedef InstructionSelectorTestWithParam<MachInst2>
+    InstructionSelectorFPArithTest;
+
+TEST_P(InstructionSelectorFPArithTest, Parameter) {
+  const MachInst2 fpa = GetParam();
+  StreamBuilder m(this, fpa.machine_type, fpa.machine_type, fpa.machine_type);
+  m.Return((m.*fpa.constructor)(m.Parameter(0), m.Parameter(1)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(fpa.arch_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest, InstructionSelectorFPArithTest,
+                        ::testing::ValuesIn(kFPArithInstructions));
+// ----------------------------------------------------------------------------
+// Integer arithmetic
+// ----------------------------------------------------------------------------
+typedef InstructionSelectorTestWithParam<MachInst2>
+    InstructionSelectorIntArithTwoTest;
+
+TEST_P(InstructionSelectorIntArithTwoTest, Parameter) {
+  const MachInst2 intpa = GetParam();
+  StreamBuilder m(this, intpa.machine_type, intpa.machine_type,
+                  intpa.machine_type);
+  m.Return((m.*intpa.constructor)(m.Parameter(0), m.Parameter(1)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(intpa.arch_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+                        InstructionSelectorIntArithTwoTest,
+                        ::testing::ValuesIn(kAddSubInstructions));
+
+
+// ----------------------------------------------------------------------------
+// One node.
+// ----------------------------------------------------------------------------
+
+
+typedef InstructionSelectorTestWithParam<MachInst1>
+    InstructionSelectorIntArithOneTest;
+
+TEST_P(InstructionSelectorIntArithOneTest, Parameter) {
+  const MachInst1 intpa = GetParam();
+  StreamBuilder m(this, intpa.machine_type, intpa.machine_type,
+                  intpa.machine_type);
+  m.Return((m.*intpa.constructor)(m.Parameter(0)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(intpa.arch_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+                        InstructionSelectorIntArithOneTest,
+                        ::testing::ValuesIn(kAddSubOneInstructions));
+// ----------------------------------------------------------------------------
+// Conversions.
+// ----------------------------------------------------------------------------
+typedef InstructionSelectorTestWithParam<Conversion>
+    InstructionSelectorConversionTest;
+
+TEST_P(InstructionSelectorConversionTest, Parameter) {
+  const Conversion conv = GetParam();
+  StreamBuilder m(this, conv.mi.machine_type, conv.src_machine_type);
+  m.Return((m.*conv.mi.constructor)(m.Parameter(0)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(conv.mi.arch_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(1U, s[0]->InputCount());
+  EXPECT_EQ(1U, s[0]->OutputCount());
+}
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+                        InstructionSelectorConversionTest,
+                        ::testing::ValuesIn(kConversionInstructions));
+
+
+// ----------------------------------------------------------------------------
+// Loads and stores.
+// ----------------------------------------------------------------------------
+
+
+namespace {
+
+struct MemoryAccess {
+  MachineType type;
+  ArchOpcode load_opcode;
+  ArchOpcode store_opcode;
+};
+
+static const MemoryAccess kMemoryAccesses[] = {
+    {kMachInt8, kMips64Lb, kMips64Sb},
+    {kMachUint8, kMips64Lbu, kMips64Sb},
+    {kMachInt16, kMips64Lh, kMips64Sh},
+    {kMachUint16, kMips64Lhu, kMips64Sh},
+    {kMachInt32, kMips64Lw, kMips64Sw},
+    {kRepFloat32, kMips64Lwc1, kMips64Swc1},
+    {kRepFloat64, kMips64Ldc1, kMips64Sdc1},
+    {kMachInt64, kMips64Ld, kMips64Sd}};
+
+
+struct MemoryAccessImm {
+  MachineType type;
+  ArchOpcode load_opcode;
+  ArchOpcode store_opcode;
+  bool (InstructionSelectorTest::Stream::*val_predicate)(
+      const InstructionOperand*) const;
+  const int32_t immediates[40];
+};
+
+
+std::ostream& operator<<(std::ostream& os, const MemoryAccessImm& acc) {
+  return os << acc.type;
+}
+
+
+struct MemoryAccessImm1 {
+  MachineType type;
+  ArchOpcode load_opcode;
+  ArchOpcode store_opcode;
+  bool (InstructionSelectorTest::Stream::*val_predicate)(
+      const InstructionOperand*) const;
+  const int32_t immediates[5];
+};
+
+
+std::ostream& operator<<(std::ostream& os, const MemoryAccessImm1& acc) {
+  return os << acc.type;
+}
+
+
+// ----------------------------------------------------------------------------
+// Loads and stores immediate values
+// ----------------------------------------------------------------------------
+
+
+const MemoryAccessImm kMemoryAccessesImm[] = {
+    {kMachInt8,
+     kMips64Lb,
+     kMips64Sb,
+     &InstructionSelectorTest::Stream::IsInteger,
+     {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
+      -87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
+      115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
+    {kMachUint8,
+     kMips64Lbu,
+     kMips64Sb,
+     &InstructionSelectorTest::Stream::IsInteger,
+     {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
+      -87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
+      115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
+    {kMachInt16,
+     kMips64Lh,
+     kMips64Sh,
+     &InstructionSelectorTest::Stream::IsInteger,
+     {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
+      -87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
+      115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
+    {kMachUint16,
+     kMips64Lhu,
+     kMips64Sh,
+     &InstructionSelectorTest::Stream::IsInteger,
+     {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
+      -87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
+      115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
+    {kMachInt32,
+     kMips64Lw,
+     kMips64Sw,
+     &InstructionSelectorTest::Stream::IsInteger,
+     {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
+      -87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
+      115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
+    {kMachFloat32,
+     kMips64Lwc1,
+     kMips64Swc1,
+     &InstructionSelectorTest::Stream::IsDouble,
+     {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
+      -87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
+      115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
+    {kMachFloat64,
+     kMips64Ldc1,
+     kMips64Sdc1,
+     &InstructionSelectorTest::Stream::IsDouble,
+     {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
+      -87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
+      115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}},
+    {kMachInt64,
+     kMips64Ld,
+     kMips64Sd,
+     &InstructionSelectorTest::Stream::IsInteger,
+     {-4095, -3340, -3231, -3224, -3088, -1758, -1203, -123, -117, -91, -89,
+      -87, -86, -82, -44, -23, -3, 0, 7, 10, 39, 52, 69, 71, 91, 92, 107, 109,
+      115, 124, 286, 655, 1362, 1569, 2587, 3067, 3096, 3462, 3510, 4095}}};
+
+
+const MemoryAccessImm1 kMemoryAccessImmMoreThan16bit[] = {
+    {kMachInt8,
+     kMips64Lb,
+     kMips64Sb,
+     &InstructionSelectorTest::Stream::IsInteger,
+     {-65000, -55000, 32777, 55000, 65000}},
+    {kMachInt8,
+     kMips64Lbu,
+     kMips64Sb,
+     &InstructionSelectorTest::Stream::IsInteger,
+     {-65000, -55000, 32777, 55000, 65000}},
+    {kMachInt16,
+     kMips64Lh,
+     kMips64Sh,
+     &InstructionSelectorTest::Stream::IsInteger,
+     {-65000, -55000, 32777, 55000, 65000}},
+    {kMachInt16,
+     kMips64Lhu,
+     kMips64Sh,
+     &InstructionSelectorTest::Stream::IsInteger,
+     {-65000, -55000, 32777, 55000, 65000}},
+    {kMachInt32,
+     kMips64Lw,
+     kMips64Sw,
+     &InstructionSelectorTest::Stream::IsInteger,
+     {-65000, -55000, 32777, 55000, 65000}},
+    {kMachFloat32,
+     kMips64Lwc1,
+     kMips64Swc1,
+     &InstructionSelectorTest::Stream::IsDouble,
+     {-65000, -55000, 32777, 55000, 65000}},
+    {kMachFloat64,
+     kMips64Ldc1,
+     kMips64Sdc1,
+     &InstructionSelectorTest::Stream::IsDouble,
+     {-65000, -55000, 32777, 55000, 65000}},
+    {kMachInt64,
+     kMips64Ld,
+     kMips64Sd,
+     &InstructionSelectorTest::Stream::IsInteger,
+     {-65000, -55000, 32777, 55000, 65000}}};
+
+}  // namespace
+
+
+typedef InstructionSelectorTestWithParam<MemoryAccess>
+    InstructionSelectorMemoryAccessTest;
+
+TEST_P(InstructionSelectorMemoryAccessTest, LoadWithParameters) {
+  const MemoryAccess memacc = GetParam();
+  StreamBuilder m(this, memacc.type, kMachPtr, kMachInt32);
+  m.Return(m.Load(memacc.type, m.Parameter(0)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(memacc.load_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
+}
+
+
+TEST_P(InstructionSelectorMemoryAccessTest, StoreWithParameters) {
+  const MemoryAccess memacc = GetParam();
+  StreamBuilder m(this, kMachInt32, kMachPtr, kMachInt32, memacc.type);
+  m.Store(memacc.type, m.Parameter(0), m.Parameter(1));
+  m.Return(m.Int32Constant(0));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
+}
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+                        InstructionSelectorMemoryAccessTest,
+                        ::testing::ValuesIn(kMemoryAccesses));
+
+
+// ----------------------------------------------------------------------------
+// Load immediate.
+// ----------------------------------------------------------------------------
+
+
+typedef InstructionSelectorTestWithParam<MemoryAccessImm>
+    InstructionSelectorMemoryAccessImmTest;
+
+TEST_P(InstructionSelectorMemoryAccessImmTest, LoadWithImmediateIndex) {
+  const MemoryAccessImm memacc = GetParam();
+  TRACED_FOREACH(int32_t, index, memacc.immediates) {
+    StreamBuilder m(this, memacc.type, kMachPtr);
+    m.Return(m.Load(memacc.type, m.Parameter(0), m.Int32Constant(index)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(memacc.load_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+    EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
+    ASSERT_EQ(1U, s[0]->OutputCount());
+    EXPECT_TRUE((s.*memacc.val_predicate)(s[0]->Output()));
+  }
+}
+
+
+// ----------------------------------------------------------------------------
+// Store immediate.
+// ----------------------------------------------------------------------------
+
+
+TEST_P(InstructionSelectorMemoryAccessImmTest, StoreWithImmediateIndex) {
+  const MemoryAccessImm memacc = GetParam();
+  TRACED_FOREACH(int32_t, index, memacc.immediates) {
+    StreamBuilder m(this, kMachInt32, kMachPtr, memacc.type);
+    m.Store(memacc.type, m.Parameter(0), m.Int32Constant(index),
+            m.Parameter(1));
+    m.Return(m.Int32Constant(0));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(memacc.store_opcode, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
+    ASSERT_EQ(3U, s[0]->InputCount());
+    ASSERT_EQ(InstructionOperand::IMMEDIATE, s[0]->InputAt(1)->kind());
+    EXPECT_EQ(index, s.ToInt32(s[0]->InputAt(1)));
+    EXPECT_EQ(0U, s[0]->OutputCount());
+  }
+}
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+                        InstructionSelectorMemoryAccessImmTest,
+                        ::testing::ValuesIn(kMemoryAccessesImm));
+
+
+// ----------------------------------------------------------------------------
+// Load/store offsets more than 16 bits.
+// ----------------------------------------------------------------------------
+
+
+typedef InstructionSelectorTestWithParam<MemoryAccessImm1>
+    InstructionSelectorMemoryAccessImmMoreThan16bitTest;
+
+TEST_P(InstructionSelectorMemoryAccessImmMoreThan16bitTest,
+       LoadWithImmediateIndex) {
+  const MemoryAccessImm1 memacc = GetParam();
+  TRACED_FOREACH(int32_t, index, memacc.immediates) {
+    StreamBuilder m(this, memacc.type, kMachPtr);
+    m.Return(m.Load(memacc.type, m.Parameter(0), m.Int32Constant(index)));
+    Stream s = m.Build();
+    ASSERT_EQ(2U, s.size());
+    // kMips64Dadd is expected opcode
+    // size more than 16 bits wide
+    EXPECT_EQ(kMips64Dadd, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+    EXPECT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(1U, s[0]->OutputCount());
+  }
+}
+
+TEST_P(InstructionSelectorMemoryAccessImmMoreThan16bitTest,
+       StoreWithImmediateIndex) {
+  const MemoryAccessImm1 memacc = GetParam();
+  TRACED_FOREACH(int32_t, index, memacc.immediates) {
+    StreamBuilder m(this, kMachInt32, kMachPtr, memacc.type);
+    m.Store(memacc.type, m.Parameter(0), m.Int32Constant(index),
+            m.Parameter(1));
+    m.Return(m.Int32Constant(0));
+    Stream s = m.Build();
+    ASSERT_EQ(2U, s.size());
+    // kMips64Add is expected opcode
+    // size more than 16 bits wide
+    EXPECT_EQ(kMips64Dadd, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+    EXPECT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(1U, s[0]->OutputCount());
+  }
+}
+
+INSTANTIATE_TEST_CASE_P(InstructionSelectorTest,
+                        InstructionSelectorMemoryAccessImmMoreThan16bitTest,
+                        ::testing::ValuesIn(kMemoryAccessImmMoreThan16bit));
+
+
+// ----------------------------------------------------------------------------
+// kMips64Cmp with zero testing.
+// ----------------------------------------------------------------------------
+
+
+TEST_F(InstructionSelectorTest, Word32EqualWithZero) {
+  {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    m.Return(m.Word32Equal(m.Parameter(0), m.Int32Constant(0)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kMips64Cmp32, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+    EXPECT_EQ(kEqual, s[0]->flags_condition());
+  }
+  {
+    StreamBuilder m(this, kMachInt32, kMachInt32);
+    m.Return(m.Word32Equal(m.Int32Constant(0), m.Parameter(0)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kMips64Cmp32, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+    EXPECT_EQ(kEqual, s[0]->flags_condition());
+  }
+}
+
+
+TEST_F(InstructionSelectorTest, Word64EqualWithZero) {
+  {
+    StreamBuilder m(this, kMachInt64, kMachInt64);
+    m.Return(m.Word64Equal(m.Parameter(0), m.Int64Constant(0)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kMips64Cmp, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+    EXPECT_EQ(kEqual, s[0]->flags_condition());
+  }
+  {
+    StreamBuilder m(this, kMachInt64, kMachInt64);
+    m.Return(m.Word64Equal(m.Int32Constant(0), m.Parameter(0)));
+    Stream s = m.Build();
+    ASSERT_EQ(1U, s.size());
+    EXPECT_EQ(kMips64Cmp, s[0]->arch_opcode());
+    EXPECT_EQ(kMode_None, s[0]->addressing_mode());
+    ASSERT_EQ(2U, s[0]->InputCount());
+    EXPECT_EQ(1U, s[0]->OutputCount());
+    EXPECT_EQ(kFlags_set, s[0]->flags_mode());
+    EXPECT_EQ(kEqual, s[0]->flags_condition());
+  }
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/deps/v8/test/unittests/compiler/move-optimizer-unittest.cc b/deps/v8/test/unittests/compiler/move-optimizer-unittest.cc
new file mode 100644 (file)
index 0000000..5b956f0
--- /dev/null
@@ -0,0 +1,133 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/move-optimizer.h"
+#include "test/unittests/compiler/instruction-sequence-unittest.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class MoveOptimizerTest : public InstructionSequenceTest {
+ public:
+  GapInstruction* LastGap() {
+    auto instruction = sequence()->instructions().back();
+    if (!instruction->IsGapMoves()) {
+      instruction = *(sequence()->instructions().rbegin() + 1);
+    }
+    return GapInstruction::cast(instruction);
+  }
+
+  void AddMove(GapInstruction* gap, TestOperand from, TestOperand to,
+               GapInstruction::InnerPosition pos = GapInstruction::START) {
+    auto parallel_move = gap->GetOrCreateParallelMove(pos, zone());
+    parallel_move->AddMove(ConvertMoveArg(from), ConvertMoveArg(to), zone());
+  }
+
+  int NonRedundantSize(ParallelMove* move) {
+    int i = 0;
+    auto ops = move->move_operands();
+    for (auto op = ops->begin(); op != ops->end(); ++op) {
+      if (op->IsRedundant()) continue;
+      i++;
+    }
+    return i;
+  }
+
+  bool Contains(ParallelMove* move, TestOperand from_op, TestOperand to_op) {
+    auto from = ConvertMoveArg(from_op);
+    auto to = ConvertMoveArg(to_op);
+    auto ops = move->move_operands();
+    for (auto op = ops->begin(); op != ops->end(); ++op) {
+      if (op->IsRedundant()) continue;
+      if (op->source()->Equals(from) && op->destination()->Equals(to)) {
+        return true;
+      }
+    }
+    return false;
+  }
+
+  // TODO(dcarney): add a verifier.
+  void Optimize() {
+    WireBlocks();
+    if (FLAG_trace_turbo) {
+      OFStream os(stdout);
+      PrintableInstructionSequence printable = {config(), sequence()};
+      os << "----- Instruction sequence before move optimization -----\n"
+         << printable;
+    }
+    MoveOptimizer move_optimizer(zone(), sequence());
+    move_optimizer.Run();
+    if (FLAG_trace_turbo) {
+      OFStream os(stdout);
+      PrintableInstructionSequence printable = {config(), sequence()};
+      os << "----- Instruction sequence after move optimization -----\n"
+         << printable;
+    }
+  }
+
+ private:
+  InstructionOperand* ConvertMoveArg(TestOperand op) {
+    CHECK_EQ(kNoValue, op.vreg_.value_);
+    CHECK_NE(kNoValue, op.value_);
+    switch (op.type_) {
+      case kConstant:
+        return ConstantOperand::Create(op.value_, zone());
+      case kFixedSlot:
+        return StackSlotOperand::Create(op.value_, zone());
+      case kFixedRegister:
+        CHECK(0 <= op.value_ && op.value_ < num_general_registers());
+        return RegisterOperand::Create(op.value_, zone());
+      default:
+        break;
+    }
+    CHECK(false);
+    return nullptr;
+  }
+};
+
+
+TEST_F(MoveOptimizerTest, RemovesRedundant) {
+  StartBlock();
+  AddMove(LastGap(), Reg(0), Reg(1));
+  EmitNop();
+  AddMove(LastGap(), Reg(1), Reg(0));
+  EmitNop();
+  EndBlock(Last());
+
+  Optimize();
+
+  auto gap = LastGap();
+  auto move = gap->parallel_moves()[0];
+  CHECK_EQ(1, NonRedundantSize(move));
+  CHECK(Contains(move, Reg(0), Reg(1)));
+}
+
+
+TEST_F(MoveOptimizerTest, SplitsConstants) {
+  StartBlock();
+  EndBlock(Last());
+
+  auto gap = LastGap();
+  AddMove(gap, Const(1), Slot(0));
+  AddMove(gap, Const(1), Slot(1));
+  AddMove(gap, Const(1), Reg(0));
+  AddMove(gap, Const(1), Slot(2));
+
+  Optimize();
+
+  auto move = gap->parallel_moves()[0];
+  CHECK_EQ(1, NonRedundantSize(move));
+  CHECK(Contains(move, Const(1), Reg(0)));
+
+  move = gap->parallel_moves()[1];
+  CHECK_EQ(3, NonRedundantSize(move));
+  CHECK(Contains(move, Reg(0), Slot(0)));
+  CHECK(Contains(move, Reg(0), Slot(1)));
+  CHECK(Contains(move, Reg(0), Slot(2)));
+}
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
diff --git a/deps/v8/test/unittests/compiler/node-matchers-unittest.cc b/deps/v8/test/unittests/compiler/node-matchers-unittest.cc
new file mode 100644 (file)
index 0000000..85db9db
--- /dev/null
@@ -0,0 +1,733 @@
+// Copyright 2014 the V8 project authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "src/compiler/common-operator.h"
+#include "src/compiler/graph.h"
+#include "src/compiler/machine-operator.h"
+#include "src/compiler/node.h"
+#include "src/compiler/node-matchers.h"
+#include "src/compiler/opcodes.h"
+
+#include "test/unittests/compiler/graph-unittest.h"
+#include "test/unittests/test-utils.h"
+
+namespace v8 {
+namespace internal {
+namespace compiler {
+
+class NodeMatcherTest : public GraphTest {
+ public:
+  NodeMatcherTest() : machine_(zone()) {}
+  ~NodeMatcherTest() OVERRIDE {}
+
+  MachineOperatorBuilder* machine() { return &machine_; }
+
+ private:
+  MachineOperatorBuilder machine_;
+};
+
+namespace {
+
+template <class Matcher>
+void CheckBaseWithIndexAndDisplacement(Matcher* matcher, Node* index, int scale,
+                                       Node* base, Node* displacement) {
+  EXPECT_TRUE(matcher->matches());
+  EXPECT_EQ(index, matcher->index());
+  EXPECT_EQ(scale, matcher->scale());
+  EXPECT_EQ(base, matcher->base());
+  EXPECT_EQ(displacement, matcher->displacement());
+}
+};
+
+
+TEST_F(NodeMatcherTest, ScaledWithOffset32Matcher) {
+  graph()->SetStart(graph()->NewNode(common()->Start(0)));
+
+  const Operator* d0_op = common()->Int32Constant(0);
+  Node* d0 = graph()->NewNode(d0_op);
+  USE(d0);
+  const Operator* d1_op = common()->Int32Constant(1);
+  Node* d1 = graph()->NewNode(d1_op);
+  USE(d1);
+  const Operator* d2_op = common()->Int32Constant(2);
+  Node* d2 = graph()->NewNode(d2_op);
+  USE(d2);
+  const Operator* d3_op = common()->Int32Constant(3);
+  Node* d3 = graph()->NewNode(d3_op);
+  USE(d3);
+  const Operator* d4_op = common()->Int32Constant(4);
+  Node* d4 = graph()->NewNode(d4_op);
+  USE(d4);
+  const Operator* d5_op = common()->Int32Constant(5);
+  Node* d5 = graph()->NewNode(d5_op);
+  USE(d5);
+  const Operator* d7_op = common()->Int32Constant(7);
+  Node* d7 = graph()->NewNode(d7_op);
+  USE(d4);
+  const Operator* d8_op = common()->Int32Constant(8);
+  Node* d8 = graph()->NewNode(d8_op);
+  USE(d8);
+  const Operator* d9_op = common()->Int32Constant(9);
+  Node* d9 = graph()->NewNode(d9_op);
+  USE(d9);
+  const Operator* d15_op = common()->Int32Constant(15);
+  Node* d15 = graph()->NewNode(d15_op);
+  USE(d15);
+
+  const Operator* b0_op = common()->Parameter(0);
+  Node* b0 = graph()->NewNode(b0_op, graph()->start());
+  USE(b0);
+  const Operator* b1_op = common()->Parameter(1);
+  Node* b1 = graph()->NewNode(b1_op, graph()->start());
+  USE(b0);
+
+  const Operator* p1_op = common()->Parameter(3);
+  Node* p1 = graph()->NewNode(p1_op, graph()->start());
+  USE(p1);
+
+  const Operator* a_op = machine()->Int32Add();
+  USE(a_op);
+
+  const Operator* m_op = machine()->Int32Mul();
+  Node* m1 = graph()->NewNode(m_op, p1, d1);
+  Node* m2 = graph()->NewNode(m_op, p1, d2);
+  Node* m3 = graph()->NewNode(m_op, p1, d3);
+  Node* m4 = graph()->NewNode(m_op, p1, d4);
+  Node* m5 = graph()->NewNode(m_op, p1, d5);
+  Node* m7 = graph()->NewNode(m_op, p1, d7);
+  Node* m8 = graph()->NewNode(m_op, p1, d8);
+  Node* m9 = graph()->NewNode(m_op, p1, d9);
+  USE(m1);
+  USE(m2);
+  USE(m3);
+  USE(m4);
+  USE(m5);
+  USE(m7);
+  USE(m8);
+  USE(m9);
+
+  const Operator* s_op = machine()->Word32Shl();
+  Node* s0 = graph()->NewNode(s_op, p1, d0);
+  Node* s1 = graph()->NewNode(s_op, p1, d1);
+  Node* s2 = graph()->NewNode(s_op, p1, d2);
+  Node* s3 = graph()->NewNode(s_op, p1, d3);
+  Node* s4 = graph()->NewNode(s_op, p1, d4);
+  USE(s0);
+  USE(s1);
+  USE(s2);
+  USE(s3);
+  USE(s4);
+
+  // 1 INPUT
+
+  // Only relevant test dases is Checking for non-match.
+  BaseWithIndexAndDisplacement32Matcher match0(d15);
+  EXPECT_FALSE(match0.matches());
+
+  // 2 INPUT
+
+  // (B0 + B1) -> [B0, 0, B1, NULL]
+  BaseWithIndexAndDisplacement32Matcher match1(graph()->NewNode(a_op, b0, b1));
+  CheckBaseWithIndexAndDisplacement(&match1, b1, 0, b0, NULL);
+
+  // (B0 + D15) -> [NULL, 0, B0, D15]
+  BaseWithIndexAndDisplacement32Matcher match2(graph()->NewNode(a_op, b0, d15));
+  CheckBaseWithIndexAndDisplacement(&match2, NULL, 0, b0, d15);
+
+  // (D15 + B0) -> [NULL, 0, B0, D15]
+  BaseWithIndexAndDisplacement32Matcher match3(graph()->NewNode(a_op, d15, b0));
+  CheckBaseWithIndexAndDisplacement(&match3, NULL, 0, b0, d15);
+
+  // (B0 + M1) -> [p1, 0, B0, NULL]
+  BaseWithIndexAndDisplacement32Matcher match4(graph()->NewNode(a_op, b0, m1));
+  CheckBaseWithIndexAndDisplacement(&match4, p1, 0, b0, NULL);
+
+  // (M1 + B0) -> [p1, 0, B0, NULL]
+  m1 = graph()->NewNode(m_op, p1, d1);
+  BaseWithIndexAndDisplacement32Matcher match5(graph()->NewNode(a_op, m1, b0));
+  CheckBaseWithIndexAndDisplacement(&match5, p1, 0, b0, NULL);
+
+  // (D15 + M1) -> [P1, 0, NULL, D15]
+  m1 = graph()->NewNode(m_op, p1, d1);
+  BaseWithIndexAndDisplacement32Matcher match6(graph()->NewNode(a_op, d15, m1));
+  CheckBaseWithIndexAndDisplacement(&match6, p1, 0, NULL, d15);
+
+  // (M1 + D15) -> [P1, 0, NULL, D15]
+  m1 = graph()->NewNode(m_op, p1, d1);
+  BaseWithIndexAndDisplacement32Matcher match7(graph()->NewNode(a_op, m1, d15));
+  CheckBaseWithIndexAndDisplacement(&match7, p1, 0, NULL, d15);
+
+  // (B0 + S0) -> [p1, 0, B0, NULL]
+  BaseWithIndexAndDisplacement32Matcher match8(graph()->NewNode(a_op, b0, s0));
+  CheckBaseWithIndexAndDisplacement(&match8, p1, 0, b0, NULL);
+
+  // (S0 + B0) -> [p1, 0, B0, NULL]
+  s0 = graph()->NewNode(s_op, p1, d0);
+  BaseWithIndexAndDisplacement32Matcher match9(graph()->NewNode(a_op, s0, b0));
+  CheckBaseWithIndexAndDisplacement(&match9, p1, 0, b0, NULL);
+
+  // (D15 + S0) -> [P1, 0, NULL, D15]
+  s0 = graph()->NewNode(s_op, p1, d0);
+  BaseWithIndexAndDisplacement32Matcher match10(
+      graph()->NewNode(a_op, d15, s0));
+  CheckBaseWithIndexAndDisplacement(&match10, p1, 0, NULL, d15);
+
+  // (S0 + D15) -> [P1, 0, NULL, D15]
+  s0 = graph()->NewNode(s_op, p1, d0);
+  BaseWithIndexAndDisplacement32Matcher match11(
+      graph()->NewNode(a_op, s0, d15));
+  CheckBaseWithIndexAndDisplacement(&match11, p1, 0, NULL, d15);
+
+  // (B0 + M2) -> [p1, 1, B0, NULL]
+  BaseWithIndexAndDisplacement32Matcher match12(graph()->NewNode(a_op, b0, m2));
+  CheckBaseWithIndexAndDisplacement(&match12, p1, 1, b0, NULL);
+
+  // (M2 + B0) -> [p1, 1, B0, NULL]
+  m2 = graph()->NewNode(m_op, p1, d2);
+  BaseWithIndexAndDisplacement32Matcher match13(graph()->NewNode(a_op, m2, b0));
+  CheckBaseWithIndexAndDisplacement(&match13, p1, 1, b0, NULL);
+
+  // (D15 + M2) -> [P1, 1, NULL, D15]
+  m2 = graph()->NewNode(m_op, p1, d2);
+  BaseWithIndexAndDisplacement32Matcher match14(
+      graph()->NewNode(a_op, d15, m2));
+  CheckBaseWithIndexAndDisplacement(&match14, p1, 1, NULL, d15);
+
+  // (M2 + D15) -> [P1, 1, NULL, D15]
+  m2 = graph()->NewNode(m_op, p1, d2);
+  BaseWithIndexAndDisplacement32Matcher match15(
+      graph()->NewNode(a_op, m2, d15));
+  CheckBaseWithIndexAndDisplacement(&match15, p1, 1, NULL, d15);
+
+  // (B0 + S1) -> [p1, 1, B0, NULL]
+  BaseWithIndexAndDisplacement32Matcher match16(graph()->NewNode(a_op, b0, s1));
+  CheckBaseWithIndexAndDisplacement(&match16, p1, 1, b0, NULL);
+
+  // (S1 + B0) -> [p1, 1, B0, NULL]
+  s1 = graph()->NewNode(s_op, p1, d1);
+  BaseWithIndexAndDisplacement32Matcher match17(graph()->NewNode(a_op, s1, b0));
+  CheckBaseWithIndexAndDisplacement(&match17, p1, 1, b0, NULL);
+
+  // (D15 + S1) -> [P1, 1, NULL, D15]
+  s1 = graph()->NewNode(s_op, p1, d1);
+  BaseWithIndexAndDisplacement32Matcher match18(
+      graph()->NewNode(a_op, d15, s1));
+  CheckBaseWithIndexAndDisplacement(&match18, p1, 1, NULL, d15);
+
+  // (S1 + D15) -> [P1, 1, NULL, D15]
+  s1 = graph()->NewNode(s_op, p1, d1);
+  BaseWithIndexAndDisplacement32Matcher match19(
+      graph()->NewNode(a_op, s1, d15));
+  CheckBaseWithIndexAndDisplacement(&match19, p1, 1, NULL, d15);
+
+  // (B0 + M4) -> [p1, 2, B0, NULL]
+  BaseWithIndexAndDisplacement32Matcher match20(graph()->NewNode(a_op, b0, m4));
+  CheckBaseWithIndexAndDisplacement(&match20, p1, 2, b0, NULL);
+
+  // (M4 + B0) -> [p1, 2, B0, NULL]
+  m4 = graph()->NewNode(m_op, p1, d4);
+  BaseWithIndexAndDisplacement32Matcher match21(graph()->NewNode(a_op, m4, b0));
+  CheckBaseWithIndexAndDisplacement(&match21, p1, 2, b0, NULL);
+
+  // (D15 + M4) -> [p1, 2, NULL, D15]
+  m4 = graph()->NewNode(m_op, p1, d4);
+  BaseWithIndexAndDisplacement32Matcher match22(
+      graph()->NewNode(a_op, d15, m4));
+  CheckBaseWithIndexAndDisplacement(&match22, p1, 2, NULL, d15);
+
+  // (M4 + D15) -> [p1, 2, NULL, D15]
+  m4 = graph()->NewNode(m_op, p1, d4);
+  BaseWithIndexAndDisplacement32Matcher match23(
+      graph()->NewNode(a_op, m4, d15));
+  CheckBaseWithIndexAndDisplacement(&match23, p1, 2, NULL, d15);
+
+  // (B0 + S2) -> [p1, 2, B0, NULL]
+  BaseWithIndexAndDisplacement32Matcher match24(graph()->NewNode(a_op, b0, s2));
+  CheckBaseWithIndexAndDisplacement(&match24, p1, 2, b0, NULL);
+
+  // (S2 + B0) -> [p1, 2, B0, NULL]
+  s2 = graph()->NewNode(s_op, p1, d2);
+  BaseWithIndexAndDisplacement32Matcher match25(graph()->NewNode(a_op, s2, b0));
+  CheckBaseWithIndexAndDisplacement(&match25, p1, 2, b0, NULL);
+
+  // (D15 + S2) -> [p1, 2, NULL, D15]
+  s2 = graph()->NewNode(s_op, p1, d2);
+  BaseWithIndexAndDisplacement32Matcher match26(
+      graph()->NewNode(a_op, d15, s2));
+  CheckBaseWithIndexAndDisplacement(&match26, p1, 2, NULL, d15);
+
+  // (S2 + D15) -> [p1, 2, NULL, D15]
+  s2 = graph()->NewNode(s_op, p1, d2);
+  BaseWithIndexAndDisplacement32Matcher match27(
+      graph()->NewNode(a_op, s2, d15));
+  CheckBaseWithIndexAndDisplacement(&match27, p1, 2, NULL, d15);
+
+  // (B0 + M8) -> [p1, 2, B0, NULL]
+  BaseWithIndexAndDisplacement32Matcher match28(graph()->NewNode(a_op, b0, m8));
+  CheckBaseWithIndexAndDisplacement(&match28, p1, 3, b0, NULL);
+
+  // (M8 + B0) -> [p1, 2, B0, NULL]
+  m8 = graph()->NewNode(m_op, p1, d8);
+  BaseWithIndexAndDisplacement32Matcher match29(graph()->NewNode(a_op, m8, b0));
+  CheckBaseWithIndexAndDisplacement(&match29, p1, 3, b0, NULL);
+
+  // (D15 + M8) -> [p1, 2, NULL, D15]
+  m8 = graph()->NewNode(m_op, p1, d8);
+  BaseWithIndexAndDisplacement32Matcher match30(
+      graph()->NewNode(a_op, d15, m8));
+  CheckBaseWithIndexAndDisplacement(&match30, p1, 3, NULL, d15);
+
+  // (M8 + D15) -> [p1, 2, NULL, D15]
+  m8 = graph()->NewNode(m_op, p1, d8);
+  BaseWithIndexAndDisplacement32Matcher match31(
+      graph()->NewNode(a_op, m8, d15));
+  CheckBaseWithIndexAndDisplacement(&match31, p1, 3, NULL, d15);
+
+  // (B0 + S3) -> [p1, 2, B0, NULL]
+  BaseWithIndexAndDisplacement32Matcher match32(graph()->NewNode(a_op, b0, s3));
+  CheckBaseWithIndexAndDisplacement(&match32, p1, 3, b0, NULL);
+
+  // (S3 + B0) -> [p1, 2, B0, NULL]
+  s3 = graph()->NewNode(s_op, p1, d3);
+  BaseWithIndexAndDisplacement32Matcher match33(graph()->NewNode(a_op, s3, b0));
+  CheckBaseWithIndexAndDisplacement(&match33, p1, 3, b0, NULL);
+
+  // (D15 + S3) -> [p1, 2, NULL, D15]
+  s3 = graph()->NewNode(s_op, p1, d3);
+  BaseWithIndexAndDisplacement32Matcher match34(
+      graph()->NewNode(a_op, d15, s3));
+  CheckBaseWithIndexAndDisplacement(&match34, p1, 3, NULL, d15);
+
+  // (S3 + D15) -> [p1, 2, NULL, D15]
+  s3 = graph()->NewNode(s_op, p1, d3);
+  BaseWithIndexAndDisplacement32Matcher match35(
+      graph()->NewNode(a_op, s3, d15));
+  CheckBaseWithIndexAndDisplacement(&match35, p1, 3, NULL, d15);
+
+  // 2 INPUT - NEGATIVE CASES
+
+  // (M3 + B1) -> [B0, 0, M3, NULL]
+  BaseWithIndexAndDisplacement32Matcher match36(graph()->NewNode(a_op, b1, m3));
+  CheckBaseWithIndexAndDisplacement(&match36, m3, 0, b1, NULL);
+
+  // (S4 + B1) -> [B0, 0, S4, NULL]
+  BaseWithIndexAndDisplacement32Matcher match37(graph()->NewNode(a_op, b1, s4));
+  CheckBaseWithIndexAndDisplacement(&match37, s4, 0, b1, NULL);
+
+  // 3 INPUT
+
+  // (D15 + S3) + B0 -> [p1, 2, b0, d15]
+  s3 = graph()->NewNode(s_op, p1, d3);
+  BaseWithIndexAndDisplacement32Matcher match38(
+      graph()->NewNode(a_op, graph()->NewNode(a_op, d15, s3), b0));
+  CheckBaseWithIndexAndDisplacement(&match38, p1, 3, b0, d15);
+
+  // (B0 + D15) + S3 -> [p1, 2, b0, d15]
+  s3 = graph()->NewNode(s_op, p1, d3);
+  BaseWithIndexAndDisplacement32Matcher match39(
+      graph()->NewNode(a_op, graph()->NewNode(a_op, b0, d15), s3));
+  CheckBaseWithIndexAndDisplacement(&match39, p1, 3, b0, d15);
+
+  // (S3 + B0) + D15 -> [p1, 2, b0, d15]
+  s3 = graph()->NewNode(s_op, p1, d3);
+  BaseWithIndexAndDisplacement32Matcher match40(
+      graph()->NewNode(a_op, graph()->NewNode(a_op, s3, b0), d15));
+  CheckBaseWithIndexAndDisplacement(&match40, p1, 3, b0, d15);
+
+  // D15 + (S3 + B0) -> [p1, 2, b0, d15]
+  s3 = graph()->NewNode(s_op, p1, d3);
+  BaseWithIndexAndDisplacement32Matcher match41(
+      graph()->NewNode(a_op, d15, graph()->NewNode(a_op, s3, b0)));
+  CheckBaseWithIndexAndDisplacement(&match41, p1, 3, b0, d15);
+
+  // B0 + (D15 + S3) -> [p1, 2, b0, d15]
+  s3 = graph()->NewNode(s_op, p1, d3);
+  BaseWithIndexAndDisplacement32Matcher match42(
+      graph()->NewNode(a_op, b0, graph()->NewNode(a_op, d15, s3)));
+  CheckBaseWithIndexAndDisplacement(&match42, p1, 3, b0, d15);
+
+  // S3 + (B0 + D15) -> [p1, 2, b0, d15]
+  s3 = graph()->NewNode(s_op, p1, d3);
+  BaseWithIndexAndDisplacement32Matcher match43(
+      graph()->NewNode(a_op, s3, graph()->NewNode(a_op, b0, d15)));
+  CheckBaseWithIndexAndDisplacement(&match43, p1, 3, b0, d15);
+
+  // Check that scales that require using the base address work dorrectly.
+}
+
+
+TEST_F(NodeMatcherTest, ScaledWithOffset64Matcher) {
+  graph()->SetStart(graph()->NewNode(common()->Start(0)));
+
+  const Operator* d0_op = common()->Int64Constant(0);
+  Node* d0 = graph()->NewNode(d0_op);
+  USE(d0);
+  const Operator* d1_op = common()->Int64Constant(1);
+  Node* d1 = graph()->NewNode(d1_op);
+  USE(d1);
+  const Operator* d2_op = common()->Int64Constant(2);
+  Node* d2 = graph()->NewNode(d2_op);
+  USE(d2);
+  const Operator* d3_op = common()->Int64Constant(3);
+  Node* d3 = graph()->NewNode(d3_op);
+  USE(d3);
+  const Operator* d4_op = common()->Int64Constant(4);
+  Node* d4 = graph()->NewNode(d4_op);
+  USE(d4);
+  const Operator* d5_op = common()->Int64Constant(5);
+  Node* d5 = graph()->NewNode(d5_op);
+  USE(d5);
+  const Operator* d7_op = common()->Int64Constant(7);
+  Node* d7 = graph()->NewNode(d7_op);
+  USE(d7);
+  const Operator* d8_op = common()->Int64Constant(8);
+  Node* d8 = graph()->NewNode(d8_op);
+  USE(d8);
+  const Operator* d9_op = common()->Int64Constant(9);
+  Node* d9 = graph()->NewNode(d9_op);
+  USE(d8);
+  const Operator* d15_op = common()->Int64Constant(15);
+  Node* d15 = graph()->NewNode(d15_op);
+  USE(d15);
+  const Operator* d15_32_op = common()->Int32Constant(15);
+  Node* d15_32 = graph()->NewNode(d15_32_op);
+  USE(d15_32);
+
+  const Operator* b0_op = common()->Parameter(0);
+  Node* b0 = graph()->NewNode(b0_op, graph()->start());
+  USE(b0);
+  const Operator* b1_op = common()->Parameter(1);
+  Node* b1 = graph()->NewNode(b1_op, graph()->start());
+  USE(b0);
+
+  const Operator* p1_op = common()->Parameter(3);
+  Node* p1 = graph()->NewNode(p1_op, graph()->start());
+  USE(p1);
+
+  const Operator* a_op = machine()->Int64Add();
+  USE(a_op);
+
+  const Operator* m_op = machine()->Int64Mul();
+  Node* m1 = graph()->NewNode(m_op, p1, d1);
+  Node* m2 = graph()->NewNode(m_op, p1, d2);
+  Node* m3 = graph()->NewNode(m_op, p1, d3);
+  Node* m4 = graph()->NewNode(m_op, p1, d4);
+  Node* m5 = graph()->NewNode(m_op, p1, d5);
+  Node* m7 = graph()->NewNode(m_op, p1, d7);
+  Node* m8 = graph()->NewNode(m_op, p1, d8);
+  Node* m9 = graph()->NewNode(m_op, p1, d9);
+  USE(m1);
+  USE(m2);
+  USE(m3);
+  USE(m4);
+  USE(m5);
+  USE(m7);
+  USE(m8);
+  USE(m9);
+
+  const Operator* s_op = machine()->Word64Shl();
+  Node* s0 = graph()->NewNode(s_op, p1, d0);
+  Node* s1 = graph()->NewNode(s_op, p1, d1);
+  Node* s2 = graph()->NewNode(s_op, p1, d2);
+  Node* s3 = graph()->NewNode(s_op, p1, d3);
+  Node* s4 = graph()->NewNode(s_op, p1, d4);
+  USE(s0);
+  USE(s1);
+  USE(s2);
+  USE(s3);
+  USE(s4);
+
+  // 1 INPUT
+
+  // Only relevant test dases is Checking for non-match.
+  BaseWithIndexAndDisplacement64Matcher match0(d15);
+  EXPECT_FALSE(match0.matches());
+
+  // 2 INPUT
+
+  // (B0 + B1) -> [B0, 0, B1, NULL]
+  BaseWithIndexAndDisplacement64Matcher match1(graph()->NewNode(a_op, b0, b1));
+  CheckBaseWithIndexAndDisplacement(&match1, b1, 0, b0, NULL);
+
+  // (B0 + D15) -> [NULL, 0, B0, D15]
+  BaseWithIndexAndDisplacement64Matcher match2(graph()->NewNode(a_op, b0, d15));
+  CheckBaseWithIndexAndDisplacement(&match2, NULL, 0, b0, d15);
+
+  BaseWithIndexAndDisplacement64Matcher match2_32(
+      graph()->NewNode(a_op, b0, d15_32));
+  CheckBaseWithIndexAndDisplacement(&match2_32, NULL, 0, b0, d15_32);
+
+  // (D15 + B0) -> [NULL, 0, B0, D15]
+  BaseWithIndexAndDisplacement64Matcher match3(graph()->NewNode(a_op, d15, b0));
+  CheckBaseWithIndexAndDisplacement(&match3, NULL, 0, b0, d15);
+
+  // (B0 + M1) -> [p1, 0, B0, NULL]
+  BaseWithIndexAndDisplacement64Matcher match4(graph()->NewNode(a_op, b0, m1));
+  CheckBaseWithIndexAndDisplacement(&match4, p1, 0, b0, NULL);
+
+  // (M1 + B0) -> [p1, 0, B0, NULL]
+  m1 = graph()->NewNode(m_op, p1, d1);
+  BaseWithIndexAndDisplacement64Matcher match5(graph()->NewNode(a_op, m1, b0));
+  CheckBaseWithIndexAndDisplacement(&match5, p1, 0, b0, NULL);
+
+  // (D15 + M1) -> [P1, 0, NULL, D15]
+  m1 = graph()->NewNode(m_op, p1, d1);
+  BaseWithIndexAndDisplacement64Matcher match6(graph()->NewNode(a_op, d15, m1));
+  CheckBaseWithIndexAndDisplacement(&match6, p1, 0, NULL, d15);
+
+  // (M1 + D15) -> [P1, 0, NULL, D15]
+  m1 = graph()->NewNode(m_op, p1, d1);
+  BaseWithIndexAndDisplacement64Matcher match7(graph()->NewNode(a_op, m1, d15));
+  CheckBaseWithIndexAndDisplacement(&match7, p1, 0, NULL, d15);
+
+  // (B0 + S0) -> [p1, 0, B0, NULL]
+  BaseWithIndexAndDisplacement64Matcher match8(graph()->NewNode(a_op, b0, s0));
+  CheckBaseWithIndexAndDisplacement(&match8, p1, 0, b0, NULL);
+
+  // (S0 + B0) -> [p1, 0, B0, NULL]
+  s0 = graph()->NewNode(s_op, p1, d0);
+  BaseWithIndexAndDisplacement64Matcher match9(graph()->NewNode(a_op, s0, b0));
+  CheckBaseWithIndexAndDisplacement(&match9, p1, 0, b0, NULL);
+
+  // (D15 + S0) -> [P1, 0, NULL, D15]
+  s0 = graph()->NewNode(s_op, p1, d0);
+  BaseWithIndexAndDisplacement64Matcher match10(
+      graph()->NewNode(a_op, d15, s0));
+  CheckBaseWithIndexAndDisplacement(&match10, p1, 0, NULL, d15);
+
+  // (S0 + D15) -> [P1, 0, NULL, D15]
+  s0 = graph()->NewNode(s_op, p1, d0);
+  BaseWithIndexAndDisplacement64Matcher match11(
+      graph()->NewNode(a_op, s0, d15));
+  CheckBaseWithIndexAndDisplacement(&match11, p1, 0, NULL, d15);
+
+  // (B0 + M2) -> [p1, 1, B0, NULL]
+  BaseWithIndexAndDisplacement64Matcher match12(graph()->NewNode(a_op, b0, m2));
+  CheckBaseWithIndexAndDisplacement(&match12, p1, 1, b0, NULL);
+
+  // (M2 + B0) -> [p1, 1, B0, NULL]
+  m2 = graph()->NewNode(m_op, p1, d2);
+  BaseWithIndexAndDisplacement64Matcher match13(graph()->NewNode(a_op, m2, b0));
+  CheckBaseWithIndexAndDisplacement(&match13, p1, 1, b0, NULL);
+
+  // (D15 + M2) -> [P1, 1, NULL, D15]
+  m2 = graph()->NewNode(m_op, p1, d2);
+  BaseWithIndexAndDisplacement64Matcher match14(
+      graph()->NewNode(a_op, d15, m2));
+  CheckBaseWithIndexAndDisplacement(&match14, p1, 1, NULL, d15);
+
+  // (M2 + D15) -> [P1, 1, NULL, D15]
+  m2 = graph()->NewNode(m_op, p1, d2);
+  BaseWithIndexAndDisplacement64Matcher match15(
+      graph()->NewNode(a_op, m2, d15));
+  CheckBaseWithIndexAndDisplacement(&match15, p1, 1, NULL, d15);
+
+  // (B0 + S1) -> [p1, 1, B0, NULL]
+  BaseWithIndexAndDisplacement64Matcher match16(graph()->NewNode(a_op, b0, s1));
+  CheckBaseWithIndexAndDisplacement(&match16, p1, 1, b0, NULL);
+
+  // (S1 + B0) -> [p1, 1, B0, NULL]
+  s1 = graph()->NewNode(s_op, p1, d1);
+  BaseWithIndexAndDisplacement64Matcher match17(graph()->NewNode(a_op, s1, b0));
+  CheckBaseWithIndexAndDisplacement(&match17, p1, 1, b0, NULL);
+
+  // (D15 + S1) -> [P1, 1, NULL, D15]
+  s1 = graph()->NewNode(s_op, p1, d1);
+  BaseWithIndexAndDisplacement64Matcher match18(
+      graph()->NewNode(a_op, d15, s1));
+  CheckBaseWithIndexAndDisplacement(&match18, p1, 1, NULL, d15);
+
+  // (S1 + D15) -> [P1, 1, NULL, D15]
+  s1 = graph()->NewNode(s_op, p1, d1);
+  BaseWithIndexAndDisplacement64Matcher match19(
+      graph()->NewNode(a_op, s1, d15));
+  CheckBaseWithIndexAndDisplacement(&match19, p1, 1, NULL, d15);
+
+  // (B0 + M4) -> [p1, 2, B0, NULL]
+  BaseWithIndexAndDisplacement64Matcher match20(graph()->NewNode(a_op, b0, m4));
+  CheckBaseWithIndexAndDisplacement(&match20, p1, 2, b0, NULL);
+
+  // (M4 + B0) -> [p1, 2, B0, NULL]
+  m4 = graph()->NewNode(m_op, p1, d4);
+  BaseWithIndexAndDisplacement64Matcher match21(graph()->NewNode(a_op, m4, b0));
+  CheckBaseWithIndexAndDisplacement(&match21, p1, 2, b0, NULL);
+
+  // (D15 + M4) -> [p1, 2, NULL, D15]
+  m4 = graph()->NewNode(m_op, p1, d4);
+  BaseWithIndexAndDisplacement64Matcher match22(
+      graph()->NewNode(a_op, d15, m4));
+  CheckBaseWithIndexAndDisplacement(&match22, p1, 2, NULL, d15);
+
+  // (M4 + D15) -> [p1, 2, NULL, D15]
+  m4 = graph()->NewNode(m_op, p1, d4);
+  BaseWithIndexAndDisplacement64Matcher match23(
+      graph()->NewNode(a_op, m4, d15));
+  CheckBaseWithIndexAndDisplacement(&match23, p1, 2, NULL, d15);
+
+  // (B0 + S2) -> [p1, 2, B0, NULL]
+  BaseWithIndexAndDisplacement64Matcher match24(graph()->NewNode(a_op, b0, s2));
+  CheckBaseWithIndexAndDisplacement(&match24, p1, 2, b0, NULL);
+
+  // (S2 + B0) -> [p1, 2, B0, NULL]
+  s2 = graph()->NewNode(s_op, p1, d2);
+  BaseWithIndexAndDisplacement64Matcher match25(graph()->NewNode(a_op, s2, b0));
+  CheckBaseWithIndexAndDisplacement(&match25, p1, 2, b0, NULL);
+
+  // (D15 + S2) -> [p1, 2, NULL, D15]
+  s2 = graph()->NewNode(s_op, p1, d2);
+  BaseWithIndexAndDisplacement64Matcher match26(
+      graph()->NewNode(a_op, d15, s2));
+  CheckBaseWithIndexAndDisplacement(&match26, p1, 2, NULL, d15);
+
+  // (S2 + D15) -> [p1, 2, NULL, D15]
+  s2 = graph()->NewNode(s_op, p1, d2);
+  BaseWithIndexAndDisplacement64Matcher match27(
+      graph()->NewNode(a_op, s2, d15));
+  CheckBaseWithIndexAndDisplacement(&match27, p1, 2, NULL, d15);
+
+  // (B0 + M8) -> [p1, 2, B0, NULL]
+  BaseWithIndexAndDisplacement64Matcher match28(graph()->NewNode(a_op, b0, m8));
+  CheckBaseWithIndexAndDisplacement(&match28, p1, 3, b0, NULL);
+
+  // (M8 + B0) -> [p1, 2, B0, NULL]
+  m8 = graph()->NewNode(m_op, p1, d8);
+  BaseWithIndexAndDisplacement64Matcher match29(graph()->NewNode(a_op, m8, b0));
+  CheckBaseWithIndexAndDisplacement(&match29, p1, 3, b0, NULL);
+
+  // (D15 + M8) -> [p1, 2, NULL, D15]
+  m8 = graph()->NewNode(m_op, p1, d8);
+  BaseWithIndexAndDisplacement64Matcher match30(
+      graph()->NewNode(a_op, d15, m8));
+  CheckBaseWithIndexAndDisplacement(&match30, p1, 3, NULL, d15);
+
+  // (M8 + D15) -> [p1, 2, NULL, D15]
+  m8 = graph()->NewNode(m_op, p1, d8);
+  BaseWithIndexAndDisplacement64Matcher match31(
+      graph()->NewNode(a_op, m8, d15));
+  CheckBaseWithIndexAndDisplacement(&match31, p1, 3, NULL, d15);
+
+  // (B0 + S3) -> [p1, 2, B0, NULL]
+  BaseWithIndexAndDisplacement64Matcher match64(graph()->NewNode(a_op, b0, s3));
+  CheckBaseWithIndexAndDisplacement(&match64, p1, 3, b0, NULL);
+
+  // (S3 + B0) -> [p1, 2, B0, NULL]
+  s3 = graph()->NewNode(s_op, p1, d3);
+  BaseWithIndexAndDisplacement64Matcher match33(graph()->NewNode(a_op, s3, b0));
+  CheckBaseWithIndexAndDisplacement(&match33, p1, 3, b0, NULL);
+
+  // (D15 + S3) -> [p1, 2, NULL, D15]
+  s3 = graph()->NewNode(s_op, p1, d3);
+  BaseWithIndexAndDisplacement64Matcher match34(
+      graph()->NewNode(a_op, d15, s3));
+  CheckBaseWithIndexAndDisplacement(&match34, p1, 3, NULL, d15);
+
+  // (S3 + D15) -> [p1, 2, NULL, D15]
+  s3 = graph()->NewNode(s_op, p1, d3);
+  BaseWithIndexAndDisplacement64Matcher match35(
+      graph()->NewNode(a_op, s3, d15));
+  CheckBaseWithIndexAndDisplacement(&match35, p1, 3, NULL, d15);
+
+  // 2 INPUT - NEGATIVE CASES
+
+  // (M3 + B1) -> [B0, 0, M3, NULL]
+  BaseWithIndexAndDisplacement64Matcher match36(graph()->NewNode(a_op, b1, m3));
+  CheckBaseWithIndexAndDisplacement(&match36, m3, 0, b1, NULL);
+
+  // (S4 + B1) -> [B0, 0, S4, NULL]
+  BaseWithIndexAndDisplacement64Matcher match37(graph()->NewNode(a_op, b1, s4));
+  CheckBaseWithIndexAndDisplacement(&match37, s4, 0, b1, NULL);
+
+  // 3 INPUT
+
+  // (D15 + S3) + B0 -> [p1, 2, b0, d15]
+  s3 = graph()->NewNode(s_op, p1, d3);
+  BaseWithIndexAndDisplacement64Matcher match38(
+      graph()->NewNode(a_op, graph()->NewNode(a_op, d15, s3), b0));
+  CheckBaseWithIndexAndDisplacement(&match38, p1, 3, b0, d15);
+
+  // (B0 + D15) + S3 -> [p1, 2, b0, d15]
+  s3 = graph()->NewNode(s_op, p1, d3);
+  BaseWithIndexAndDisplacement64Matcher match39(
+      graph()->NewNode(a_op, graph()->NewNode(a_op, b0, d15), s3));
+  CheckBaseWithIndexAndDisplacement(&match39, p1, 3, b0, d15);
+
+  // (S3 + B0) + D15 -> [p1, 2, b0, d15]
+  s3 = graph()->NewNode(s_op, p1, d3);
+  BaseWithIndexAndDisplacement64Matcher match40(
+      graph()->NewNode(a_op, graph()->NewNode(a_op, s3, b0), d15));
+  CheckBaseWithIndexAndDisplacement(&match40, p1, 3, b0, d15);
+
+  // D15 + (S3 + B0) -> [p1, 2, b0, d15]
+  s3 = graph()->NewNode(s_op, p1, d3);
+  BaseWithIndexAndDisplacement64Matcher match41(
+      graph()->NewNode(a_op, d15, graph()->NewNode(a_op, s3, b0)));
+  CheckBaseWithIndexAndDisplacement(&match41, p1, 3, b0, d15);
+
+  // B0 + (D15 + S3) -> [p1, 2, b0, d15]
+  s3 = graph()->NewNode(s_op, p1, d3);
+  BaseWithIndexAndDisplacement64Matcher match42(
+      graph()->NewNode(a_op, b0, graph()->NewNode(a_op, d15, s3)));
+  CheckBaseWithIndexAndDisplacement(&match42, p1, 3, b0, d15);
+
+  // S3 + (B0 + D15) -> [p1, 2, b0, d15]
+  s3 = graph()->NewNode(s_op, p1, d3);
+  BaseWithIndexAndDisplacement64Matcher match43(
+      graph()->NewNode(a_op, s3, graph()->NewNode(a_op, b0, d15)));
+  CheckBaseWithIndexAndDisplacement(&match43, p1, 3, b0, d15);
+
+  // 2 INPUT with non-power of 2 scale
+
+  // (M3 + D15) -> [p1, 1, p1, D15]
+  m3 = graph()->NewNode(m_op, p1, d3);
+  BaseWithIndexAndDisplacement64Matcher match44(
+      graph()->NewNode(a_op, m3, d15));
+  CheckBaseWithIndexAndDisplacement(&match44, p1, 1, p1, d15);
+
+  // (M5 + D15) -> [p1, 2, p1, D15]
+  m5 = graph()->NewNode(m_op, p1, d5);
+  BaseWithIndexAndDisplacement64Matcher match45(
+      graph()->NewNode(a_op, m5, d15));
+  CheckBaseWithIndexAndDisplacement(&match45, p1, 2, p1, d15);
+
+  // (M9 + D15) -> [p1, 3, p1, D15]
+  m9 = graph()->NewNode(m_op, p1, d9);
+  BaseWithIndexAndDisplacement64Matcher match46(
+      graph()->NewNode(a_op, m9, d15));
+  CheckBaseWithIndexAndDisplacement(&match46, p1, 3, p1, d15);
+
+  // 3 INPUT negative cases: non-power of 2 scale but with a base
+
+  // ((M3 + B0) + D15) -> [m3, 0, b0, D15]
+  m3 = graph()->NewNode(m_op, p1, d3);
+  Node* temp = graph()->NewNode(a_op, m3, b0);
+  BaseWithIndexAndDisplacement64Matcher match47(
+      graph()->NewNode(a_op, temp, d15));
+  CheckBaseWithIndexAndDisplacement(&match47, m3, 0, b0, d15);
+
+  // (M3 + (B0 + D15)) -> [m3, 0, b0, D15]
+  m3 = graph()->NewNode(m_op, p1, d3);
+  temp = graph()->NewNode(a_op, d15, b0);
+  BaseWithIndexAndDisplacement64Matcher match48(
+      graph()->NewNode(a_op, m3, temp));
+  CheckBaseWithIndexAndDisplacement(&match48, m3, 0, b0, d15);
+
+  // ((B0 + M3) + D15) -> [m3, 0, b0, D15]
+  m3 = graph()->NewNode(m_op, p1, d3);
+  temp = graph()->NewNode(a_op, b0, m3);
+  BaseWithIndexAndDisplacement64Matcher match49(
+      graph()->NewNode(a_op, temp, d15));
+  CheckBaseWithIndexAndDisplacement(&match49, m3, 0, b0, d15);
+
+  // (M3 + (D15 + B0)) -> [m3, 0, b0, D15]
+  m3 = graph()->NewNode(m_op, p1, d3);
+  temp = graph()->NewNode(a_op, b0, d15);
+  BaseWithIndexAndDisplacement64Matcher match50(
+      graph()->NewNode(a_op, m3, temp));
+  CheckBaseWithIndexAndDisplacement(&match50, m3, 0, b0, d15);
+}
+
+
+}  // namespace compiler
+}  // namespace internal
+}  // namespace v8
index fde7f03..3162c54 100644 (file)
@@ -4,6 +4,7 @@
 
 #include "test/unittests/compiler/node-test-utils.h"
 
+#include "src/assembler.h"
 #include "src/compiler/node-properties-inl.h"
 #include "src/compiler/simplified-operator.h"
 
@@ -39,12 +40,12 @@ class NodeMatcher : public MatcherInterface<Node*> {
  public:
   explicit NodeMatcher(IrOpcode::Value opcode) : opcode_(opcode) {}
 
-  virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+  void DescribeTo(std::ostream* os) const OVERRIDE {
     *os << "is a " << IrOpcode::Mnemonic(opcode_) << " node";
   }
 
-  virtual bool MatchAndExplain(Node* node,
-                               MatchResultListener* listener) const OVERRIDE {
+  bool MatchAndExplain(Node* node,
+                       MatchResultListener* listener) const OVERRIDE {
     if (node == NULL) {
       *listener << "which is NULL";
       return false;
@@ -70,7 +71,7 @@ class IsBranchMatcher FINAL : public NodeMatcher {
         value_matcher_(value_matcher),
         control_matcher_(control_matcher) {}
 
-  virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+  void DescribeTo(std::ostream* os) const FINAL {
     NodeMatcher::DescribeTo(os);
     *os << " whose value (";
     value_matcher_.DescribeTo(os);
@@ -79,8 +80,7 @@ class IsBranchMatcher FINAL : public NodeMatcher {
     *os << ")";
   }
 
-  virtual bool MatchAndExplain(Node* node,
-                               MatchResultListener* listener) const OVERRIDE {
+  bool MatchAndExplain(Node* node, MatchResultListener* listener) const FINAL {
     return (NodeMatcher::MatchAndExplain(node, listener) &&
             PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0),
                                  "value", value_matcher_, listener) &&
@@ -102,7 +102,7 @@ class IsMergeMatcher FINAL : public NodeMatcher {
         control0_matcher_(control0_matcher),
         control1_matcher_(control1_matcher) {}
 
-  virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+  void DescribeTo(std::ostream* os) const FINAL {
     NodeMatcher::DescribeTo(os);
     *os << " whose control0 (";
     control0_matcher_.DescribeTo(os);
@@ -111,8 +111,7 @@ class IsMergeMatcher FINAL : public NodeMatcher {
     *os << ")";
   }
 
-  virtual bool MatchAndExplain(Node* node,
-                               MatchResultListener* listener) const OVERRIDE {
+  bool MatchAndExplain(Node* node, MatchResultListener* listener) const FINAL {
     return (NodeMatcher::MatchAndExplain(node, listener) &&
             PrintMatchAndExplain(NodeProperties::GetControlInput(node, 0),
                                  "control0", control0_matcher_, listener) &&
@@ -132,15 +131,14 @@ class IsControl1Matcher FINAL : public NodeMatcher {
                     const Matcher<Node*>& control_matcher)
       : NodeMatcher(opcode), control_matcher_(control_matcher) {}
 
-  virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+  void DescribeTo(std::ostream* os) const FINAL {
     NodeMatcher::DescribeTo(os);
     *os << " whose control (";
     control_matcher_.DescribeTo(os);
     *os << ")";
   }
 
-  virtual bool MatchAndExplain(Node* node,
-                               MatchResultListener* listener) const OVERRIDE {
+  bool MatchAndExplain(Node* node, MatchResultListener* listener) const FINAL {
     return (NodeMatcher::MatchAndExplain(node, listener) &&
             PrintMatchAndExplain(NodeProperties::GetControlInput(node),
                                  "control", control_matcher_, listener));
@@ -159,7 +157,7 @@ class IsFinishMatcher FINAL : public NodeMatcher {
         value_matcher_(value_matcher),
         effect_matcher_(effect_matcher) {}
 
-  virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+  void DescribeTo(std::ostream* os) const FINAL {
     NodeMatcher::DescribeTo(os);
     *os << " whose value (";
     value_matcher_.DescribeTo(os);
@@ -168,8 +166,7 @@ class IsFinishMatcher FINAL : public NodeMatcher {
     *os << ")";
   }
 
-  virtual bool MatchAndExplain(Node* node,
-                               MatchResultListener* listener) const OVERRIDE {
+  bool MatchAndExplain(Node* node, MatchResultListener* listener) const FINAL {
     return (NodeMatcher::MatchAndExplain(node, listener) &&
             PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0),
                                  "value", value_matcher_, listener) &&
@@ -189,15 +186,14 @@ class IsConstantMatcher FINAL : public NodeMatcher {
   IsConstantMatcher(IrOpcode::Value opcode, const Matcher<T>& value_matcher)
       : NodeMatcher(opcode), value_matcher_(value_matcher) {}
 
-  virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+  void DescribeTo(std::ostream* os) const FINAL {
     NodeMatcher::DescribeTo(os);
     *os << " whose value (";
     value_matcher_.DescribeTo(os);
     *os << ")";
   }
 
-  virtual bool MatchAndExplain(Node* node,
-                               MatchResultListener* listener) const OVERRIDE {
+  bool MatchAndExplain(Node* node, MatchResultListener* listener) const FINAL {
     return (NodeMatcher::MatchAndExplain(node, listener) &&
             PrintMatchAndExplain(OpParameter<T>(node), "value", value_matcher_,
                                  listener));
@@ -220,7 +216,7 @@ class IsSelectMatcher FINAL : public NodeMatcher {
         value1_matcher_(value1_matcher),
         value2_matcher_(value2_matcher) {}
 
-  virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+  void DescribeTo(std::ostream* os) const FINAL {
     NodeMatcher::DescribeTo(os);
     *os << " whose type (";
     type_matcher_.DescribeTo(os);
@@ -233,8 +229,7 @@ class IsSelectMatcher FINAL : public NodeMatcher {
     *os << ")";
   }
 
-  virtual bool MatchAndExplain(Node* node,
-                               MatchResultListener* listener) const OVERRIDE {
+  bool MatchAndExplain(Node* node, MatchResultListener* listener) const FINAL {
     return (NodeMatcher::MatchAndExplain(node, listener) &&
             PrintMatchAndExplain(OpParameter<MachineType>(node), "type",
                                  type_matcher_, listener) &&
@@ -266,7 +261,7 @@ class IsPhiMatcher FINAL : public NodeMatcher {
         value1_matcher_(value1_matcher),
         control_matcher_(control_matcher) {}
 
-  virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+  void DescribeTo(std::ostream* os) const FINAL {
     NodeMatcher::DescribeTo(os);
     *os << " whose type (";
     type_matcher_.DescribeTo(os);
@@ -279,8 +274,7 @@ class IsPhiMatcher FINAL : public NodeMatcher {
     *os << ")";
   }
 
-  virtual bool MatchAndExplain(Node* node,
-                               MatchResultListener* listener) const OVERRIDE {
+  bool MatchAndExplain(Node* node, MatchResultListener* listener) const FINAL {
     return (NodeMatcher::MatchAndExplain(node, listener) &&
             PrintMatchAndExplain(OpParameter<MachineType>(node), "type",
                                  type_matcher_, listener) &&
@@ -310,7 +304,7 @@ class IsEffectPhiMatcher FINAL : public NodeMatcher {
         effect1_matcher_(effect1_matcher),
         control_matcher_(control_matcher) {}
 
-  virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+  void DescribeTo(std::ostream* os) const FINAL {
     NodeMatcher::DescribeTo(os);
     *os << "), effect0 (";
     effect0_matcher_.DescribeTo(os);
@@ -321,8 +315,7 @@ class IsEffectPhiMatcher FINAL : public NodeMatcher {
     *os << ")";
   }
 
-  virtual bool MatchAndExplain(Node* node,
-                               MatchResultListener* listener) const OVERRIDE {
+  bool MatchAndExplain(Node* node, MatchResultListener* listener) const FINAL {
     return (NodeMatcher::MatchAndExplain(node, listener) &&
             PrintMatchAndExplain(NodeProperties::GetEffectInput(node, 0),
                                  "effect0", effect0_matcher_, listener) &&
@@ -347,7 +340,7 @@ class IsProjectionMatcher FINAL : public NodeMatcher {
         index_matcher_(index_matcher),
         base_matcher_(base_matcher) {}
 
-  virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+  void DescribeTo(std::ostream* os) const FINAL {
     NodeMatcher::DescribeTo(os);
     *os << " whose index (";
     index_matcher_.DescribeTo(os);
@@ -356,8 +349,7 @@ class IsProjectionMatcher FINAL : public NodeMatcher {
     *os << ")";
   }
 
-  virtual bool MatchAndExplain(Node* node,
-                               MatchResultListener* listener) const OVERRIDE {
+  bool MatchAndExplain(Node* node, MatchResultListener* listener) const FINAL {
     return (NodeMatcher::MatchAndExplain(node, listener) &&
             PrintMatchAndExplain(OpParameter<size_t>(node), "index",
                                  index_matcher_, listener) &&
@@ -385,7 +377,7 @@ class IsCall2Matcher FINAL : public NodeMatcher {
         effect_matcher_(effect_matcher),
         control_matcher_(control_matcher) {}
 
-  virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+  void DescribeTo(std::ostream* os) const FINAL {
     NodeMatcher::DescribeTo(os);
     *os << " whose value0 (";
     value0_matcher_.DescribeTo(os);
@@ -398,8 +390,7 @@ class IsCall2Matcher FINAL : public NodeMatcher {
     *os << ")";
   }
 
-  virtual bool MatchAndExplain(Node* node,
-                               MatchResultListener* listener) const OVERRIDE {
+  bool MatchAndExplain(Node* node, MatchResultListener* listener) const FINAL {
     return (NodeMatcher::MatchAndExplain(node, listener) &&
             PrintMatchAndExplain(OpParameter<CallDescriptor*>(node),
                                  "descriptor", descriptor_matcher_, listener) &&
@@ -440,7 +431,7 @@ class IsCall4Matcher FINAL : public NodeMatcher {
         effect_matcher_(effect_matcher),
         control_matcher_(control_matcher) {}
 
-  virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+  void DescribeTo(std::ostream* os) const FINAL {
     NodeMatcher::DescribeTo(os);
     *os << " whose value0 (";
     value0_matcher_.DescribeTo(os);
@@ -457,8 +448,7 @@ class IsCall4Matcher FINAL : public NodeMatcher {
     *os << ")";
   }
 
-  virtual bool MatchAndExplain(Node* node,
-                               MatchResultListener* listener) const OVERRIDE {
+  bool MatchAndExplain(Node* node, MatchResultListener* listener) const FINAL {
     return (NodeMatcher::MatchAndExplain(node, listener) &&
             PrintMatchAndExplain(OpParameter<CallDescriptor*>(node),
                                  "descriptor", descriptor_matcher_, listener) &&
@@ -499,7 +489,7 @@ class IsLoadFieldMatcher FINAL : public NodeMatcher {
         effect_matcher_(effect_matcher),
         control_matcher_(control_matcher) {}
 
-  virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+  void DescribeTo(std::ostream* os) const FINAL {
     NodeMatcher::DescribeTo(os);
     *os << " whose access (";
     access_matcher_.DescribeTo(os);
@@ -512,8 +502,7 @@ class IsLoadFieldMatcher FINAL : public NodeMatcher {
     *os << ")";
   }
 
-  virtual bool MatchAndExplain(Node* node,
-                               MatchResultListener* listener) const OVERRIDE {
+  bool MatchAndExplain(Node* node, MatchResultListener* listener) const FINAL {
     return (NodeMatcher::MatchAndExplain(node, listener) &&
             PrintMatchAndExplain(OpParameter<FieldAccess>(node), "access",
                                  access_matcher_, listener) &&
@@ -533,21 +522,198 @@ class IsLoadFieldMatcher FINAL : public NodeMatcher {
 };
 
 
+class IsStoreFieldMatcher FINAL : public NodeMatcher {
+ public:
+  IsStoreFieldMatcher(const Matcher<FieldAccess>& access_matcher,
+                      const Matcher<Node*>& base_matcher,
+                      const Matcher<Node*>& value_matcher,
+                      const Matcher<Node*>& effect_matcher,
+                      const Matcher<Node*>& control_matcher)
+      : NodeMatcher(IrOpcode::kStoreField),
+        access_matcher_(access_matcher),
+        base_matcher_(base_matcher),
+        value_matcher_(value_matcher),
+        effect_matcher_(effect_matcher),
+        control_matcher_(control_matcher) {}
+
+  void DescribeTo(std::ostream* os) const FINAL {
+    NodeMatcher::DescribeTo(os);
+    *os << " whose access (";
+    access_matcher_.DescribeTo(os);
+    *os << "), base (";
+    base_matcher_.DescribeTo(os);
+    *os << "), value (";
+    value_matcher_.DescribeTo(os);
+    *os << "), effect (";
+    effect_matcher_.DescribeTo(os);
+    *os << ") and control (";
+    control_matcher_.DescribeTo(os);
+    *os << ")";
+  }
+
+  bool MatchAndExplain(Node* node, MatchResultListener* listener) const FINAL {
+    return (NodeMatcher::MatchAndExplain(node, listener) &&
+            PrintMatchAndExplain(OpParameter<FieldAccess>(node), "access",
+                                 access_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), "base",
+                                 base_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1),
+                                 "value", value_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetEffectInput(node), "effect",
+                                 effect_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetControlInput(node),
+                                 "control", control_matcher_, listener));
+  }
+
+ private:
+  const Matcher<FieldAccess> access_matcher_;
+  const Matcher<Node*> base_matcher_;
+  const Matcher<Node*> value_matcher_;
+  const Matcher<Node*> effect_matcher_;
+  const Matcher<Node*> control_matcher_;
+};
+
+
+class IsLoadBufferMatcher FINAL : public NodeMatcher {
+ public:
+  IsLoadBufferMatcher(const Matcher<BufferAccess>& access_matcher,
+                      const Matcher<Node*>& buffer_matcher,
+                      const Matcher<Node*>& offset_matcher,
+                      const Matcher<Node*>& length_matcher,
+                      const Matcher<Node*>& effect_matcher,
+                      const Matcher<Node*>& control_matcher)
+      : NodeMatcher(IrOpcode::kLoadBuffer),
+        access_matcher_(access_matcher),
+        buffer_matcher_(buffer_matcher),
+        offset_matcher_(offset_matcher),
+        length_matcher_(length_matcher),
+        effect_matcher_(effect_matcher),
+        control_matcher_(control_matcher) {}
+
+  void DescribeTo(std::ostream* os) const FINAL {
+    NodeMatcher::DescribeTo(os);
+    *os << " whose access (";
+    access_matcher_.DescribeTo(os);
+    *os << "), buffer (";
+    buffer_matcher_.DescribeTo(os);
+    *os << "), offset (";
+    offset_matcher_.DescribeTo(os);
+    *os << "), length (";
+    length_matcher_.DescribeTo(os);
+    *os << "), effect (";
+    effect_matcher_.DescribeTo(os);
+    *os << ") and control (";
+    control_matcher_.DescribeTo(os);
+    *os << ")";
+  }
+
+  bool MatchAndExplain(Node* node, MatchResultListener* listener) const FINAL {
+    return (NodeMatcher::MatchAndExplain(node, listener) &&
+            PrintMatchAndExplain(BufferAccessOf(node->op()), "access",
+                                 access_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0),
+                                 "buffer", buffer_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1),
+                                 "offset", offset_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 2),
+                                 "length", length_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetEffectInput(node), "effect",
+                                 effect_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetControlInput(node),
+                                 "control", control_matcher_, listener));
+  }
+
+ private:
+  const Matcher<BufferAccess> access_matcher_;
+  const Matcher<Node*> buffer_matcher_;
+  const Matcher<Node*> offset_matcher_;
+  const Matcher<Node*> length_matcher_;
+  const Matcher<Node*> effect_matcher_;
+  const Matcher<Node*> control_matcher_;
+};
+
+
+class IsStoreBufferMatcher FINAL : public NodeMatcher {
+ public:
+  IsStoreBufferMatcher(const Matcher<BufferAccess>& access_matcher,
+                       const Matcher<Node*>& buffer_matcher,
+                       const Matcher<Node*>& offset_matcher,
+                       const Matcher<Node*>& length_matcher,
+                       const Matcher<Node*>& value_matcher,
+                       const Matcher<Node*>& effect_matcher,
+                       const Matcher<Node*>& control_matcher)
+      : NodeMatcher(IrOpcode::kStoreBuffer),
+        access_matcher_(access_matcher),
+        buffer_matcher_(buffer_matcher),
+        offset_matcher_(offset_matcher),
+        length_matcher_(length_matcher),
+        value_matcher_(value_matcher),
+        effect_matcher_(effect_matcher),
+        control_matcher_(control_matcher) {}
+
+  void DescribeTo(std::ostream* os) const FINAL {
+    NodeMatcher::DescribeTo(os);
+    *os << " whose access (";
+    access_matcher_.DescribeTo(os);
+    *os << "), buffer (";
+    buffer_matcher_.DescribeTo(os);
+    *os << "), offset (";
+    offset_matcher_.DescribeTo(os);
+    *os << "), length (";
+    length_matcher_.DescribeTo(os);
+    *os << "), value (";
+    value_matcher_.DescribeTo(os);
+    *os << "), effect (";
+    effect_matcher_.DescribeTo(os);
+    *os << ") and control (";
+    control_matcher_.DescribeTo(os);
+    *os << ")";
+  }
+
+  bool MatchAndExplain(Node* node, MatchResultListener* listener) const FINAL {
+    return (NodeMatcher::MatchAndExplain(node, listener) &&
+            PrintMatchAndExplain(BufferAccessOf(node->op()), "access",
+                                 access_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0),
+                                 "buffer", buffer_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1),
+                                 "offset", offset_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 2),
+                                 "length", length_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 3),
+                                 "value", value_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetEffectInput(node), "effect",
+                                 effect_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetControlInput(node),
+                                 "control", control_matcher_, listener));
+  }
+
+ private:
+  const Matcher<BufferAccess> access_matcher_;
+  const Matcher<Node*> buffer_matcher_;
+  const Matcher<Node*> offset_matcher_;
+  const Matcher<Node*> length_matcher_;
+  const Matcher<Node*> value_matcher_;
+  const Matcher<Node*> effect_matcher_;
+  const Matcher<Node*> control_matcher_;
+};
+
+
 class IsLoadElementMatcher FINAL : public NodeMatcher {
  public:
   IsLoadElementMatcher(const Matcher<ElementAccess>& access_matcher,
                        const Matcher<Node*>& base_matcher,
                        const Matcher<Node*>& index_matcher,
-                       const Matcher<Node*>& length_matcher,
-                       const Matcher<Node*>& effect_matcher)
+                       const Matcher<Node*>& effect_matcher,
+                       const Matcher<Node*>& control_matcher)
       : NodeMatcher(IrOpcode::kLoadElement),
         access_matcher_(access_matcher),
         base_matcher_(base_matcher),
         index_matcher_(index_matcher),
-        length_matcher_(length_matcher),
-        effect_matcher_(effect_matcher) {}
+        effect_matcher_(effect_matcher),
+        control_matcher_(control_matcher) {}
 
-  virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+  void DescribeTo(std::ostream* os) const FINAL {
     NodeMatcher::DescribeTo(os);
     *os << " whose access (";
     access_matcher_.DescribeTo(os);
@@ -555,15 +721,14 @@ class IsLoadElementMatcher FINAL : public NodeMatcher {
     base_matcher_.DescribeTo(os);
     *os << "), index (";
     index_matcher_.DescribeTo(os);
-    *os << "), length (";
-    length_matcher_.DescribeTo(os);
-    *os << ") and effect (";
+    *os << "), effect (";
     effect_matcher_.DescribeTo(os);
+    *os << ") and control (";
+    control_matcher_.DescribeTo(os);
     *os << ")";
   }
 
-  virtual bool MatchAndExplain(Node* node,
-                               MatchResultListener* listener) const OVERRIDE {
+  bool MatchAndExplain(Node* node, MatchResultListener* listener) const FINAL {
     return (NodeMatcher::MatchAndExplain(node, listener) &&
             PrintMatchAndExplain(OpParameter<ElementAccess>(node), "access",
                                  access_matcher_, listener) &&
@@ -571,18 +736,18 @@ class IsLoadElementMatcher FINAL : public NodeMatcher {
                                  base_matcher_, listener) &&
             PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1),
                                  "index", index_matcher_, listener) &&
-            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 2),
-                                 "length", length_matcher_, listener) &&
             PrintMatchAndExplain(NodeProperties::GetEffectInput(node), "effect",
-                                 effect_matcher_, listener));
+                                 effect_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetControlInput(node),
+                                 "control", control_matcher_, listener));
   }
 
  private:
   const Matcher<ElementAccess> access_matcher_;
   const Matcher<Node*> base_matcher_;
   const Matcher<Node*> index_matcher_;
-  const Matcher<Node*> length_matcher_;
   const Matcher<Node*> effect_matcher_;
+  const Matcher<Node*> control_matcher_;
 };
 
 
@@ -591,7 +756,6 @@ class IsStoreElementMatcher FINAL : public NodeMatcher {
   IsStoreElementMatcher(const Matcher<ElementAccess>& access_matcher,
                         const Matcher<Node*>& base_matcher,
                         const Matcher<Node*>& index_matcher,
-                        const Matcher<Node*>& length_matcher,
                         const Matcher<Node*>& value_matcher,
                         const Matcher<Node*>& effect_matcher,
                         const Matcher<Node*>& control_matcher)
@@ -599,12 +763,11 @@ class IsStoreElementMatcher FINAL : public NodeMatcher {
         access_matcher_(access_matcher),
         base_matcher_(base_matcher),
         index_matcher_(index_matcher),
-        length_matcher_(length_matcher),
         value_matcher_(value_matcher),
         effect_matcher_(effect_matcher),
         control_matcher_(control_matcher) {}
 
-  virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+  void DescribeTo(std::ostream* os) const FINAL {
     NodeMatcher::DescribeTo(os);
     *os << " whose access (";
     access_matcher_.DescribeTo(os);
@@ -612,8 +775,6 @@ class IsStoreElementMatcher FINAL : public NodeMatcher {
     base_matcher_.DescribeTo(os);
     *os << "), index (";
     index_matcher_.DescribeTo(os);
-    *os << "), length (";
-    length_matcher_.DescribeTo(os);
     *os << "), value (";
     value_matcher_.DescribeTo(os);
     *os << "), effect (";
@@ -623,8 +784,7 @@ class IsStoreElementMatcher FINAL : public NodeMatcher {
     *os << ")";
   }
 
-  virtual bool MatchAndExplain(Node* node,
-                               MatchResultListener* listener) const OVERRIDE {
+  bool MatchAndExplain(Node* node, MatchResultListener* listener) const FINAL {
     return (NodeMatcher::MatchAndExplain(node, listener) &&
             PrintMatchAndExplain(OpParameter<ElementAccess>(node), "access",
                                  access_matcher_, listener) &&
@@ -633,8 +793,6 @@ class IsStoreElementMatcher FINAL : public NodeMatcher {
             PrintMatchAndExplain(NodeProperties::GetValueInput(node, 1),
                                  "index", index_matcher_, listener) &&
             PrintMatchAndExplain(NodeProperties::GetValueInput(node, 2),
-                                 "length", length_matcher_, listener) &&
-            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 3),
                                  "value", value_matcher_, listener) &&
             PrintMatchAndExplain(NodeProperties::GetEffectInput(node), "effect",
                                  effect_matcher_, listener) &&
@@ -646,7 +804,6 @@ class IsStoreElementMatcher FINAL : public NodeMatcher {
   const Matcher<ElementAccess> access_matcher_;
   const Matcher<Node*> base_matcher_;
   const Matcher<Node*> index_matcher_;
-  const Matcher<Node*> length_matcher_;
   const Matcher<Node*> value_matcher_;
   const Matcher<Node*> effect_matcher_;
   const Matcher<Node*> control_matcher_;
@@ -667,7 +824,7 @@ class IsLoadMatcher FINAL : public NodeMatcher {
         effect_matcher_(effect_matcher),
         control_matcher_(control_matcher) {}
 
-  virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+  void DescribeTo(std::ostream* os) const FINAL {
     NodeMatcher::DescribeTo(os);
     *os << " whose rep (";
     rep_matcher_.DescribeTo(os);
@@ -682,8 +839,7 @@ class IsLoadMatcher FINAL : public NodeMatcher {
     *os << ")";
   }
 
-  virtual bool MatchAndExplain(Node* node,
-                               MatchResultListener* listener) const OVERRIDE {
+  bool MatchAndExplain(Node* node, MatchResultListener* listener) const FINAL {
     return (NodeMatcher::MatchAndExplain(node, listener) &&
             PrintMatchAndExplain(OpParameter<LoadRepresentation>(node), "rep",
                                  rep_matcher_, listener) &&
@@ -706,6 +862,51 @@ class IsLoadMatcher FINAL : public NodeMatcher {
 };
 
 
+class IsToNumberMatcher FINAL : public NodeMatcher {
+ public:
+  IsToNumberMatcher(const Matcher<Node*>& base_matcher,
+                    const Matcher<Node*>& context_matcher,
+                    const Matcher<Node*>& effect_matcher,
+                    const Matcher<Node*>& control_matcher)
+      : NodeMatcher(IrOpcode::kJSToNumber),
+        base_matcher_(base_matcher),
+        context_matcher_(context_matcher),
+        effect_matcher_(effect_matcher),
+        control_matcher_(control_matcher) {}
+
+  void DescribeTo(std::ostream* os) const FINAL {
+    NodeMatcher::DescribeTo(os);
+    *os << " whose base (";
+    base_matcher_.DescribeTo(os);
+    *os << "), context (";
+    context_matcher_.DescribeTo(os);
+    *os << "), effect (";
+    effect_matcher_.DescribeTo(os);
+    *os << ") and control (";
+    control_matcher_.DescribeTo(os);
+    *os << ")";
+  }
+
+  bool MatchAndExplain(Node* node, MatchResultListener* listener) const FINAL {
+    return (NodeMatcher::MatchAndExplain(node, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), "base",
+                                 base_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetContextInput(node),
+                                 "context", context_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetEffectInput(node), "effect",
+                                 effect_matcher_, listener) &&
+            PrintMatchAndExplain(NodeProperties::GetControlInput(node),
+                                 "control", control_matcher_, listener));
+  }
+
+ private:
+  const Matcher<Node*> base_matcher_;
+  const Matcher<Node*> context_matcher_;
+  const Matcher<Node*> effect_matcher_;
+  const Matcher<Node*> control_matcher_;
+};
+
+
 class IsStoreMatcher FINAL : public NodeMatcher {
  public:
   IsStoreMatcher(const Matcher<StoreRepresentation>& rep_matcher,
@@ -722,7 +923,7 @@ class IsStoreMatcher FINAL : public NodeMatcher {
         effect_matcher_(effect_matcher),
         control_matcher_(control_matcher) {}
 
-  virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+  void DescribeTo(std::ostream* os) const FINAL {
     NodeMatcher::DescribeTo(os);
     *os << " whose rep (";
     rep_matcher_.DescribeTo(os);
@@ -739,8 +940,7 @@ class IsStoreMatcher FINAL : public NodeMatcher {
     *os << ")";
   }
 
-  virtual bool MatchAndExplain(Node* node,
-                               MatchResultListener* listener) const OVERRIDE {
+  bool MatchAndExplain(Node* node, MatchResultListener* listener) const FINAL {
     return (NodeMatcher::MatchAndExplain(node, listener) &&
             PrintMatchAndExplain(OpParameter<StoreRepresentation>(node), "rep",
                                  rep_matcher_, listener) &&
@@ -774,7 +974,7 @@ class IsBinopMatcher FINAL : public NodeMatcher {
         lhs_matcher_(lhs_matcher),
         rhs_matcher_(rhs_matcher) {}
 
-  virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+  void DescribeTo(std::ostream* os) const FINAL {
     NodeMatcher::DescribeTo(os);
     *os << " whose lhs (";
     lhs_matcher_.DescribeTo(os);
@@ -783,8 +983,7 @@ class IsBinopMatcher FINAL : public NodeMatcher {
     *os << ")";
   }
 
-  virtual bool MatchAndExplain(Node* node,
-                               MatchResultListener* listener) const OVERRIDE {
+  bool MatchAndExplain(Node* node, MatchResultListener* listener) const FINAL {
     return (NodeMatcher::MatchAndExplain(node, listener) &&
             PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0), "lhs",
                                  lhs_matcher_, listener) &&
@@ -803,15 +1002,14 @@ class IsUnopMatcher FINAL : public NodeMatcher {
   IsUnopMatcher(IrOpcode::Value opcode, const Matcher<Node*>& input_matcher)
       : NodeMatcher(opcode), input_matcher_(input_matcher) {}
 
-  virtual void DescribeTo(std::ostream* os) const OVERRIDE {
+  void DescribeTo(std::ostream* os) const FINAL {
     NodeMatcher::DescribeTo(os);
     *os << " whose input (";
     input_matcher_.DescribeTo(os);
     *os << ")";
   }
 
-  virtual bool MatchAndExplain(Node* node,
-                               MatchResultListener* listener) const OVERRIDE {
+  bool MatchAndExplain(Node* node, MatchResultListener* listener) const FINAL {
     return (NodeMatcher::MatchAndExplain(node, listener) &&
             PrintMatchAndExplain(NodeProperties::GetValueInput(node, 0),
                                  "input", input_matcher_, listener));
@@ -966,27 +1164,62 @@ Matcher<Node*> IsLoadField(const Matcher<FieldAccess>& access_matcher,
 }
 
 
+Matcher<Node*> IsStoreField(const Matcher<FieldAccess>& access_matcher,
+                            const Matcher<Node*>& base_matcher,
+                            const Matcher<Node*>& value_matcher,
+                            const Matcher<Node*>& effect_matcher,
+                            const Matcher<Node*>& control_matcher) {
+  return MakeMatcher(new IsStoreFieldMatcher(access_matcher, base_matcher,
+                                             value_matcher, effect_matcher,
+                                             control_matcher));
+}
+
+
+Matcher<Node*> IsLoadBuffer(const Matcher<BufferAccess>& access_matcher,
+                            const Matcher<Node*>& buffer_matcher,
+                            const Matcher<Node*>& offset_matcher,
+                            const Matcher<Node*>& length_matcher,
+                            const Matcher<Node*>& effect_matcher,
+                            const Matcher<Node*>& control_matcher) {
+  return MakeMatcher(new IsLoadBufferMatcher(access_matcher, buffer_matcher,
+                                             offset_matcher, length_matcher,
+                                             effect_matcher, control_matcher));
+}
+
+
+Matcher<Node*> IsStoreBuffer(const Matcher<BufferAccess>& access_matcher,
+                             const Matcher<Node*>& buffer_matcher,
+                             const Matcher<Node*>& offset_matcher,
+                             const Matcher<Node*>& length_matcher,
+                             const Matcher<Node*>& value_matcher,
+                             const Matcher<Node*>& effect_matcher,
+                             const Matcher<Node*>& control_matcher) {
+  return MakeMatcher(new IsStoreBufferMatcher(
+      access_matcher, buffer_matcher, offset_matcher, length_matcher,
+      value_matcher, effect_matcher, control_matcher));
+}
+
+
 Matcher<Node*> IsLoadElement(const Matcher<ElementAccess>& access_matcher,
                              const Matcher<Node*>& base_matcher,
                              const Matcher<Node*>& index_matcher,
-                             const Matcher<Node*>& length_matcher,
-                             const Matcher<Node*>& effect_matcher) {
+                             const Matcher<Node*>& effect_matcher,
+                             const Matcher<Node*>& control_matcher) {
   return MakeMatcher(new IsLoadElementMatcher(access_matcher, base_matcher,
-                                              index_matcher, length_matcher,
-                                              effect_matcher));
+                                              index_matcher, effect_matcher,
+                                              control_matcher));
 }
 
 
 Matcher<Node*> IsStoreElement(const Matcher<ElementAccess>& access_matcher,
                               const Matcher<Node*>& base_matcher,
                               const Matcher<Node*>& index_matcher,
-                              const Matcher<Node*>& length_matcher,
                               const Matcher<Node*>& value_matcher,
                               const Matcher<Node*>& effect_matcher,
                               const Matcher<Node*>& control_matcher) {
   return MakeMatcher(new IsStoreElementMatcher(
-      access_matcher, base_matcher, index_matcher, length_matcher,
-      value_matcher, effect_matcher, control_matcher));
+      access_matcher, base_matcher, index_matcher, value_matcher,
+      effect_matcher, control_matcher));
 }
 
 
@@ -1000,6 +1233,15 @@ Matcher<Node*> IsLoad(const Matcher<LoadRepresentation>& rep_matcher,
 }
 
 
+Matcher<Node*> IsToNumber(const Matcher<Node*>& base_matcher,
+                          const Matcher<Node*>& context_matcher,
+                          const Matcher<Node*>& effect_matcher,
+                          const Matcher<Node*>& control_matcher) {
+  return MakeMatcher(new IsToNumberMatcher(base_matcher, context_matcher,
+                                           effect_matcher, control_matcher));
+}
+
+
 Matcher<Node*> IsStore(const Matcher<StoreRepresentation>& rep_matcher,
                        const Matcher<Node*>& base_matcher,
                        const Matcher<Node*>& index_matcher,
@@ -1063,6 +1305,8 @@ IS_UNOP_MATCHER(Float64Floor)
 IS_UNOP_MATCHER(Float64Ceil)
 IS_UNOP_MATCHER(Float64RoundTruncate)
 IS_UNOP_MATCHER(Float64RoundTiesAway)
+IS_UNOP_MATCHER(NumberToInt32)
+IS_UNOP_MATCHER(NumberToUint32)
 #undef IS_UNOP_MATCHER
 
 }  // namespace compiler
index 870d555..f1f20cf 100644 (file)
@@ -21,6 +21,7 @@ class Unique;
 namespace compiler {
 
 // Forward declarations.
+class BufferAccess;
 class CallDescriptor;
 struct ElementAccess;
 struct FieldAccess;
@@ -87,15 +88,32 @@ Matcher<Node*> IsLoadField(const Matcher<FieldAccess>& access_matcher,
                            const Matcher<Node*>& base_matcher,
                            const Matcher<Node*>& effect_matcher,
                            const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsStoreField(const Matcher<FieldAccess>& access_matcher,
+                            const Matcher<Node*>& base_matcher,
+                            const Matcher<Node*>& value_matcher,
+                            const Matcher<Node*>& effect_matcher,
+                            const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsLoadBuffer(const Matcher<BufferAccess>& access_matcher,
+                            const Matcher<Node*>& buffer_matcher,
+                            const Matcher<Node*>& offset_matcher,
+                            const Matcher<Node*>& length_matcher,
+                            const Matcher<Node*>& effect_matcher,
+                            const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsStoreBuffer(const Matcher<BufferAccess>& access_matcher,
+                             const Matcher<Node*>& buffer_matcher,
+                             const Matcher<Node*>& offset_matcher,
+                             const Matcher<Node*>& length_matcher,
+                             const Matcher<Node*>& value_matcher,
+                             const Matcher<Node*>& effect_matcher,
+                             const Matcher<Node*>& control_matcher);
 Matcher<Node*> IsLoadElement(const Matcher<ElementAccess>& access_matcher,
                              const Matcher<Node*>& base_matcher,
                              const Matcher<Node*>& index_matcher,
-                             const Matcher<Node*>& length_matcher,
+                             const Matcher<Node*>& control_matcher,
                              const Matcher<Node*>& effect_matcher);
 Matcher<Node*> IsStoreElement(const Matcher<ElementAccess>& access_matcher,
                               const Matcher<Node*>& base_matcher,
                               const Matcher<Node*>& index_matcher,
-                              const Matcher<Node*>& length_matcher,
                               const Matcher<Node*>& value_matcher,
                               const Matcher<Node*>& effect_matcher,
                               const Matcher<Node*>& control_matcher);
@@ -163,6 +181,12 @@ Matcher<Node*> IsFloat64Floor(const Matcher<Node*>& input_matcher);
 Matcher<Node*> IsFloat64Ceil(const Matcher<Node*>& input_matcher);
 Matcher<Node*> IsFloat64RoundTruncate(const Matcher<Node*>& input_matcher);
 Matcher<Node*> IsFloat64RoundTiesAway(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsToNumber(const Matcher<Node*>& base_matcher,
+                          const Matcher<Node*>& context_matcher,
+                          const Matcher<Node*>& effect_matcher,
+                          const Matcher<Node*>& control_matcher);
+Matcher<Node*> IsNumberToInt32(const Matcher<Node*>& input_matcher);
+Matcher<Node*> IsNumberToUint32(const Matcher<Node*>& input_matcher);
 
 }  // namespace compiler
 }  // namespace internal
index dbcdedb..12dedbd 100644 (file)
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
-#include "src/base/utils/random-number-generator.h"
-#include "src/compiler/register-allocator.h"
-#include "test/unittests/test-utils.h"
-#include "testing/gmock/include/gmock/gmock.h"
+#include "src/compiler/pipeline.h"
+#include "test/unittests/compiler/instruction-sequence-unittest.h"
 
 namespace v8 {
 namespace internal {
 namespace compiler {
 
-typedef BasicBlock::RpoNumber Rpo;
-
-namespace {
+class RegisterAllocatorTest : public InstructionSequenceTest {
+ public:
+  void Allocate() {
+    WireBlocks();
+    Pipeline::AllocateRegistersForTesting(config(), sequence(), true);
+  }
+};
 
-static const char*
-    general_register_names_[RegisterConfiguration::kMaxGeneralRegisters];
-static const char*
-    double_register_names_[RegisterConfiguration::kMaxDoubleRegisters];
-static char register_names_[10 * (RegisterConfiguration::kMaxGeneralRegisters +
-                                  RegisterConfiguration::kMaxDoubleRegisters)];
 
+TEST_F(RegisterAllocatorTest, CanAllocateThreeRegisters) {
+  // return p0 + p1;
+  StartBlock();
+  auto a_reg = Parameter();
+  auto b_reg = Parameter();
+  auto c_reg = EmitOI(Reg(1), Reg(a_reg, 1), Reg(b_reg, 0));
+  Return(c_reg);
+  EndBlock(Last());
 
-static void InitializeRegisterNames() {
-  char* loc = register_names_;
-  for (int i = 0; i < RegisterConfiguration::kMaxGeneralRegisters; ++i) {
-    general_register_names_[i] = loc;
-    loc += base::OS::SNPrintF(loc, 100, "gp_%d", i);
-    *loc++ = 0;
-  }
-  for (int i = 0; i < RegisterConfiguration::kMaxDoubleRegisters; ++i) {
-    double_register_names_[i] = loc;
-    loc += base::OS::SNPrintF(loc, 100, "fp_%d", i) + 1;
-    *loc++ = 0;
-  }
+  Allocate();
 }
 
-enum BlockCompletionType { kFallThrough, kBranch, kJump };
 
-struct BlockCompletion {
-  BlockCompletionType type_;
-  int vreg_;
-  int offset_0_;
-  int offset_1_;
-};
+TEST_F(RegisterAllocatorTest, SimpleLoop) {
+  // i = K;
+  // while(true) { i++ }
+  StartBlock();
+  auto i_reg = DefineConstant();
+  EndBlock();
 
-static const int kInvalidJumpOffset = kMinInt;
+  {
+    StartLoop(1);
 
-BlockCompletion FallThrough() {
-  BlockCompletion completion = {kFallThrough, -1, 1, kInvalidJumpOffset};
-  return completion;
-}
+    StartBlock();
+    auto phi = Phi(i_reg);
+    auto ipp = EmitOI(Same(), Reg(phi), Use(DefineConstant()));
+    Extend(phi, ipp);
+    EndBlock(Jump(0));
 
+    EndLoop();
+  }
 
-BlockCompletion Jump(int offset) {
-  BlockCompletion completion = {kJump, -1, offset, kInvalidJumpOffset};
-  return completion;
+  Allocate();
 }
 
 
-BlockCompletion Branch(int vreg, int left_offset, int right_offset) {
-  BlockCompletion completion = {kBranch, vreg, left_offset, right_offset};
-  return completion;
+TEST_F(RegisterAllocatorTest, SimpleBranch) {
+  // return i ? K1 : K2
+  StartBlock();
+  auto i = DefineConstant();
+  EndBlock(Branch(Reg(i), 1, 2));
+
+  StartBlock();
+  Return(DefineConstant());
+  EndBlock(Last());
+
+  StartBlock();
+  Return(DefineConstant());
+  EndBlock(Last());
+
+  Allocate();
 }
 
-}  // namespace
 
+TEST_F(RegisterAllocatorTest, SimpleDiamond) {
+  // return p0 ? p0 : p0
+  StartBlock();
+  auto param = Parameter();
+  EndBlock(Branch(Reg(param), 1, 2));
 
-class RegisterAllocatorTest : public TestWithZone {
- public:
-  static const int kDefaultNRegs = 4;
-
-  RegisterAllocatorTest()
-      : num_general_registers_(kDefaultNRegs),
-        num_double_registers_(kDefaultNRegs),
-        instruction_blocks_(zone()),
-        current_block_(nullptr),
-        is_last_block_(false) {
-    InitializeRegisterNames();
-  }
+  StartBlock();
+  EndBlock(Jump(2));
 
-  void SetNumRegs(int num_general_registers, int num_double_registers) {
-    CHECK(instruction_blocks_.empty());
-    num_general_registers_ = num_general_registers;
-    num_double_registers_ = num_double_registers;
-  }
+  StartBlock();
+  EndBlock(Jump(1));
 
-  RegisterConfiguration* config() {
-    if (config_.is_empty()) {
-      config_.Reset(new RegisterConfiguration(
-          num_general_registers_, num_double_registers_, num_double_registers_,
-          general_register_names_, double_register_names_));
-    }
-    return config_.get();
-  }
+  StartBlock();
+  Return(param);
+  EndBlock();
 
-  Frame* frame() {
-    if (frame_.is_empty()) {
-      frame_.Reset(new Frame());
-    }
-    return frame_.get();
-  }
+  Allocate();
+}
 
-  InstructionSequence* sequence() {
-    if (sequence_.is_empty()) {
-      sequence_.Reset(new InstructionSequence(zone(), &instruction_blocks_));
-    }
-    return sequence_.get();
-  }
 
-  RegisterAllocator* allocator() {
-    if (allocator_.is_empty()) {
-      allocator_.Reset(
-          new RegisterAllocator(config(), zone(), frame(), sequence()));
-    }
-    return allocator_.get();
-  }
+TEST_F(RegisterAllocatorTest, SimpleDiamondPhi) {
+  // return i ? K1 : K2
+  StartBlock();
+  EndBlock(Branch(Reg(DefineConstant()), 1, 2));
 
-  void StartLoop(int loop_blocks) {
-    CHECK(current_block_ == nullptr);
-    if (!loop_blocks_.empty()) {
-      CHECK(!loop_blocks_.back().loop_header_.IsValid());
-    }
-    LoopData loop_data = {Rpo::Invalid(), loop_blocks};
-    loop_blocks_.push_back(loop_data);
-  }
+  StartBlock();
+  auto t_val = DefineConstant();
+  EndBlock(Jump(2));
 
-  void EndLoop() {
-    CHECK(current_block_ == nullptr);
-    CHECK(!loop_blocks_.empty());
-    CHECK_EQ(0, loop_blocks_.back().expected_blocks_);
-    loop_blocks_.pop_back();
-  }
+  StartBlock();
+  auto f_val = DefineConstant();
+  EndBlock(Jump(1));
 
-  void StartLastBlock() {
-    CHECK(!is_last_block_);
-    is_last_block_ = true;
-    NewBlock();
-  }
+  StartBlock();
+  Return(Reg(Phi(t_val, f_val)));
+  EndBlock();
 
-  void StartBlock() {
-    CHECK(!is_last_block_);
-    NewBlock();
-  }
+  Allocate();
+}
 
-  void EndBlock(BlockCompletion completion = FallThrough()) {
-    completions_.push_back(completion);
-    switch (completion.type_) {
-      case kFallThrough:
-        if (is_last_block_) break;
-        // TODO(dcarney): we don't emit this after returns.
-        EmitFallThrough();
-        break;
-      case kJump:
-        EmitJump();
-        break;
-      case kBranch:
-        EmitBranch(completion.vreg_);
-        break;
-    }
-    CHECK(current_block_ != nullptr);
-    sequence()->EndBlock(current_block_->rpo_number());
-    current_block_ = nullptr;
-  }
 
-  void Allocate() {
-    CHECK_EQ(nullptr, current_block_);
-    CHECK(is_last_block_);
-    WireBlocks();
-    if (FLAG_trace_alloc || FLAG_trace_turbo) {
-      OFStream os(stdout);
-      PrintableInstructionSequence printable = {config(), sequence()};
-      os << "Before: " << std::endl << printable << std::endl;
-    }
-    allocator()->Allocate();
-    if (FLAG_trace_alloc || FLAG_trace_turbo) {
-      OFStream os(stdout);
-      PrintableInstructionSequence printable = {config(), sequence()};
-      os << "After: " << std::endl << printable << std::endl;
-    }
-  }
+TEST_F(RegisterAllocatorTest, DiamondManyPhis) {
+  const int kPhis = kDefaultNRegs * 2;
 
-  int NewReg() { return sequence()->NextVirtualRegister(); }
+  StartBlock();
+  EndBlock(Branch(Reg(DefineConstant()), 1, 2));
 
-  int Parameter() {
-    int vreg = NewReg();
-    InstructionOperand* outputs[1]{UseRegister(vreg)};
-    Emit(kArchNop, 1, outputs);
-    return vreg;
+  StartBlock();
+  VReg t_vals[kPhis];
+  for (int i = 0; i < kPhis; ++i) {
+    t_vals[i] = DefineConstant();
   }
+  EndBlock(Jump(2));
 
-  Instruction* Return(int vreg) {
-    InstructionOperand* inputs[1]{UseRegister(vreg)};
-    return Emit(kArchRet, 0, nullptr, 1, inputs);
+  StartBlock();
+  VReg f_vals[kPhis];
+  for (int i = 0; i < kPhis; ++i) {
+    f_vals[i] = DefineConstant();
   }
+  EndBlock(Jump(1));
 
-  PhiInstruction* Phi(int vreg) {
-    PhiInstruction* phi = new (zone()) PhiInstruction(zone(), NewReg());
-    phi->operands().push_back(vreg);
-    current_block_->AddPhi(phi);
-    return phi;
+  StartBlock();
+  TestOperand merged[kPhis];
+  for (int i = 0; i < kPhis; ++i) {
+    merged[i] = Use(Phi(t_vals[i], f_vals[i]));
   }
+  Return(EmitCall(Slot(-1), kPhis, merged));
+  EndBlock();
 
-  int DefineConstant(int32_t imm = 0) {
-    int virtual_register = NewReg();
-    sequence()->AddConstant(virtual_register, Constant(imm));
-    InstructionOperand* outputs[1]{
-        ConstantOperand::Create(virtual_register, zone())};
-    Emit(kArchNop, 1, outputs);
-    return virtual_register;
-  }
+  Allocate();
+}
 
-  ImmediateOperand* Immediate(int32_t imm = 0) {
-    int index = sequence()->AddImmediate(Constant(imm));
-    return ImmediateOperand::Create(index, zone());
-  }
 
-  Instruction* EmitFRI(int output_vreg, int input_vreg_0) {
-    InstructionOperand* outputs[1]{DefineSameAsFirst(output_vreg)};
-    InstructionOperand* inputs[2]{UseRegister(input_vreg_0), Immediate()};
-    return Emit(kArchNop, 1, outputs, 2, inputs);
-  }
+TEST_F(RegisterAllocatorTest, DoubleDiamondManyRedundantPhis) {
+  const int kPhis = kDefaultNRegs * 2;
 
-  Instruction* EmitFRU(int output_vreg, int input_vreg_0, int input_vreg_1) {
-    InstructionOperand* outputs[1]{DefineSameAsFirst(output_vreg)};
-    InstructionOperand* inputs[2]{UseRegister(input_vreg_0), Use(input_vreg_1)};
-    return Emit(kArchNop, 1, outputs, 2, inputs);
+  // First diamond.
+  StartBlock();
+  VReg vals[kPhis];
+  for (int i = 0; i < kPhis; ++i) {
+    vals[i] = Parameter(Slot(-1 - i));
   }
+  EndBlock(Branch(Reg(DefineConstant()), 1, 2));
 
-  Instruction* EmitRRR(int output_vreg, int input_vreg_0, int input_vreg_1) {
-    InstructionOperand* outputs[1]{UseRegister(output_vreg)};
-    InstructionOperand* inputs[2]{UseRegister(input_vreg_0),
-                                  UseRegister(input_vreg_1)};
-    return Emit(kArchNop, 1, outputs, 2, inputs);
-  }
+  StartBlock();
+  EndBlock(Jump(2));
 
- private:
-  InstructionOperand* Unallocated(int vreg,
-                                  UnallocatedOperand::ExtendedPolicy policy) {
-    UnallocatedOperand* op = new (zone()) UnallocatedOperand(policy);
-    op->set_virtual_register(vreg);
-    return op;
-  }
+  StartBlock();
+  EndBlock(Jump(1));
 
-  InstructionOperand* Unallocated(int vreg,
-                                  UnallocatedOperand::ExtendedPolicy policy,
-                                  UnallocatedOperand::Lifetime lifetime) {
-    UnallocatedOperand* op = new (zone()) UnallocatedOperand(policy, lifetime);
-    op->set_virtual_register(vreg);
-    return op;
-  }
+  // Second diamond.
+  StartBlock();
+  EndBlock(Branch(Reg(DefineConstant()), 1, 2));
 
-  InstructionOperand* UseRegister(int vreg) {
-    return Unallocated(vreg, UnallocatedOperand::MUST_HAVE_REGISTER);
-  }
+  StartBlock();
+  EndBlock(Jump(2));
 
-  InstructionOperand* DefineSameAsFirst(int vreg) {
-    return Unallocated(vreg, UnallocatedOperand::SAME_AS_FIRST_INPUT);
-  }
+  StartBlock();
+  EndBlock(Jump(1));
 
-  InstructionOperand* Use(int vreg) {
-    return Unallocated(vreg, UnallocatedOperand::NONE,
-                       UnallocatedOperand::USED_AT_START);
+  StartBlock();
+  TestOperand merged[kPhis];
+  for (int i = 0; i < kPhis; ++i) {
+    merged[i] = Use(Phi(vals[i], vals[i]));
   }
+  Return(EmitCall(Reg(0), kPhis, merged));
+  EndBlock();
 
-  void EmitBranch(int vreg) {
-    InstructionOperand* inputs[4]{UseRegister(vreg), Immediate(), Immediate(),
-                                  Immediate()};
-    InstructionCode opcode = kArchJmp | FlagsModeField::encode(kFlags_branch) |
-                             FlagsConditionField::encode(kEqual);
-    Instruction* instruction =
-        NewInstruction(opcode, 0, nullptr, 4, inputs)->MarkAsControl();
-    sequence()->AddInstruction(instruction);
-  }
+  Allocate();
+}
 
-  void EmitFallThrough() {
-    Instruction* instruction =
-        NewInstruction(kArchNop, 0, nullptr)->MarkAsControl();
-    sequence()->AddInstruction(instruction);
-  }
 
-  void EmitJump() {
-    InstructionOperand* inputs[1]{Immediate()};
-    Instruction* instruction =
-        NewInstruction(kArchJmp, 0, nullptr, 1, inputs)->MarkAsControl();
-    sequence()->AddInstruction(instruction);
-  }
+TEST_F(RegisterAllocatorTest, RegressionPhisNeedTooManyRegisters) {
+  const size_t kNumRegs = 3;
+  const size_t kParams = kNumRegs + 1;
+  // Override number of registers.
+  SetNumRegs(kNumRegs, kNumRegs);
 
-  Instruction* NewInstruction(InstructionCode code, size_t outputs_size,
-                              InstructionOperand** outputs,
-                              size_t inputs_size = 0,
-                              InstructionOperand* *inputs = nullptr,
-                              size_t temps_size = 0,
-                              InstructionOperand* *temps = nullptr) {
-    CHECK_NE(nullptr, current_block_);
-    return Instruction::New(zone(), code, outputs_size, outputs, inputs_size,
-                            inputs, temps_size, temps);
+  StartBlock();
+  auto constant = DefineConstant();
+  VReg parameters[kParams];
+  for (size_t i = 0; i < arraysize(parameters); ++i) {
+    parameters[i] = DefineConstant();
   }
+  EndBlock();
 
-  Instruction* Emit(InstructionCode code, size_t outputs_size,
-                    InstructionOperand** outputs, size_t inputs_size = 0,
-                    InstructionOperand* *inputs = nullptr,
-                    size_t temps_size = 0,
-                    InstructionOperand* *temps = nullptr) {
-    Instruction* instruction = NewInstruction(
-        code, outputs_size, outputs, inputs_size, inputs, temps_size, temps);
-    sequence()->AddInstruction(instruction);
-    return instruction;
-  }
+  PhiInstruction* phis[kParams];
+  {
+    StartLoop(2);
+
+    // Loop header.
+    StartBlock();
 
-  InstructionBlock* NewBlock() {
-    CHECK(current_block_ == nullptr);
-    BasicBlock::Id block_id =
-        BasicBlock::Id::FromSize(instruction_blocks_.size());
-    Rpo rpo = Rpo::FromInt(block_id.ToInt());
-    Rpo loop_header = Rpo::Invalid();
-    Rpo loop_end = Rpo::Invalid();
-    if (!loop_blocks_.empty()) {
-      auto& loop_data = loop_blocks_.back();
-      // This is a loop header.
-      if (!loop_data.loop_header_.IsValid()) {
-        loop_end = Rpo::FromInt(block_id.ToInt() + loop_data.expected_blocks_);
-        loop_data.expected_blocks_--;
-        loop_data.loop_header_ = rpo;
-      } else {
-        // This is a loop body.
-        CHECK_NE(0, loop_data.expected_blocks_);
-        // TODO(dcarney): handle nested loops.
-        loop_data.expected_blocks_--;
-        loop_header = loop_data.loop_header_;
-      }
+    for (size_t i = 0; i < arraysize(parameters); ++i) {
+      phis[i] = Phi(parameters[i]);
     }
-    // Construct instruction block.
-    InstructionBlock* instruction_block = new (zone()) InstructionBlock(
-        zone(), block_id, rpo, rpo, loop_header, loop_end, false);
-    instruction_blocks_.push_back(instruction_block);
-    current_block_ = instruction_block;
-    sequence()->StartBlock(rpo);
-    return instruction_block;
-  }
 
-  void WireBlocks() {
-    CHECK(instruction_blocks_.size() == completions_.size());
-    size_t offset = 0;
-    size_t size = instruction_blocks_.size();
-    for (const auto& completion : completions_) {
-      switch (completion.type_) {
-        case kFallThrough:
-          if (offset == size - 1) break;
-        // Fallthrough.
-        case kJump:
-          WireBlock(offset, completion.offset_0_);
-          break;
-        case kBranch:
-          WireBlock(offset, completion.offset_0_);
-          WireBlock(offset, completion.offset_1_);
-          break;
-      }
-      ++offset;
+    // Perform some computations.
+    // something like phi[i] += const
+    for (size_t i = 0; i < arraysize(parameters); ++i) {
+      auto result = EmitOI(Same(), Reg(phis[i]), Use(constant));
+      Extend(phis[i], result);
     }
-  }
 
-  void WireBlock(size_t block_offset, int jump_offset) {
-    size_t target_block_offset =
-        block_offset + static_cast<size_t>(jump_offset);
-    CHECK(block_offset < instruction_blocks_.size());
-    CHECK(target_block_offset < instruction_blocks_.size());
-    InstructionBlock* block = instruction_blocks_[block_offset];
-    InstructionBlock* target = instruction_blocks_[target_block_offset];
-    block->successors().push_back(target->rpo_number());
-    target->predecessors().push_back(block->rpo_number());
-  }
+    EndBlock(Branch(Reg(DefineConstant()), 1, 2));
 
-  struct LoopData {
-    Rpo loop_header_;
-    int expected_blocks_;
-  };
-  typedef std::vector<LoopData> LoopBlocks;
-  typedef std::vector<BlockCompletion> Completions;
-
-  SmartPointer<RegisterConfiguration> config_;
-  SmartPointer<Frame> frame_;
-  SmartPointer<RegisterAllocator> allocator_;
-  SmartPointer<InstructionSequence> sequence_;
-  int num_general_registers_;
-  int num_double_registers_;
-
-  // Block building state.
-  InstructionBlocks instruction_blocks_;
-  Completions completions_;
-  LoopBlocks loop_blocks_;
-  InstructionBlock* current_block_;
-  bool is_last_block_;
-};
+    // Jump back to loop header.
+    StartBlock();
+    EndBlock(Jump(-1));
 
+    EndLoop();
+  }
 
-TEST_F(RegisterAllocatorTest, CanAllocateThreeRegisters) {
-  StartLastBlock();
-  int a_reg = Parameter();
-  int b_reg = Parameter();
-  int c_reg = NewReg();
-  Instruction* res = EmitRRR(c_reg, a_reg, b_reg);
-  Return(c_reg);
+  StartBlock();
+  Return(DefineConstant());
   EndBlock();
 
   Allocate();
-
-  ASSERT_TRUE(res->OutputAt(0)->IsRegister());
 }
 
 
-TEST_F(RegisterAllocatorTest, SimpleLoop) {
-  // i = K;
-  // while(true) { i++ }
+TEST_F(RegisterAllocatorTest, SpillPhi) {
+  StartBlock();
+  EndBlock(Branch(Imm(), 1, 2));
 
   StartBlock();
-  int i_reg = DefineConstant();
+  auto left = Define(Reg(0));
+  EndBlock(Jump(2));
+
+  StartBlock();
+  auto right = Define(Reg(0));
   EndBlock();
 
-  {
-    StartLoop(1);
+  StartBlock();
+  auto phi = Phi(left, right);
+  EmitCall(Slot(-1));
+  Return(Reg(phi));
+  EndBlock();
 
-    StartLastBlock();
-    PhiInstruction* phi = Phi(i_reg);
-    int ipp = NewReg();
-    EmitFRU(ipp, phi->virtual_register(), DefineConstant());
-    phi->operands().push_back(ipp);
-    EndBlock(Jump(0));
+  Allocate();
+}
 
-    EndLoop();
+
+TEST_F(RegisterAllocatorTest, MoveLotsOfConstants) {
+  StartBlock();
+  VReg constants[kDefaultNRegs];
+  for (size_t i = 0; i < arraysize(constants); ++i) {
+    constants[i] = DefineConstant();
+  }
+  TestOperand call_ops[kDefaultNRegs * 2];
+  for (int i = 0; i < kDefaultNRegs; ++i) {
+    call_ops[i] = Reg(constants[i], i);
   }
+  for (int i = 0; i < kDefaultNRegs; ++i) {
+    call_ops[i + kDefaultNRegs] = Slot(constants[i], i);
+  }
+  EmitCall(Slot(-1), arraysize(call_ops), call_ops);
+  EndBlock(Last());
 
   Allocate();
 }
 
 
-TEST_F(RegisterAllocatorTest, SimpleBranch) {
-  // return i ? K1 : K2
+TEST_F(RegisterAllocatorTest, SplitBeforeInstruction) {
+  const int kNumRegs = 6;
+  SetNumRegs(kNumRegs, kNumRegs);
+
+  StartBlock();
+
+  // Stack parameters/spilled values.
+  auto p_0 = Define(Slot(-1));
+  auto p_1 = Define(Slot(-2));
+
+  // Fill registers.
+  VReg values[kNumRegs];
+  for (size_t i = 0; i < arraysize(values); ++i) {
+    values[i] = Define(Reg(static_cast<int>(i)));
+  }
+
+  // values[0] will be split in the second half of this instruction.
+  // Models Intel mod instructions.
+  EmitOI(Reg(0), Reg(p_0, 1), UniqueReg(p_1));
+  EmitI(Reg(values[0], 0));
+  EndBlock(Last());
+
+  Allocate();
+}
+
+
+TEST_F(RegisterAllocatorTest, NestedDiamondPhiMerge) {
+  // Outer diamond.
   StartBlock();
-  int i_reg = DefineConstant();
-  EndBlock(Branch(i_reg, 1, 2));
+  EndBlock(Branch(Imm(), 1, 5));
 
+  // Diamond 1
   StartBlock();
-  Return(DefineConstant());
+  EndBlock(Branch(Imm(), 1, 2));
+
+  StartBlock();
+  auto ll = Define(Reg());
+  EndBlock(Jump(2));
+
+  StartBlock();
+  auto lr = Define(Reg());
   EndBlock();
 
-  StartLastBlock();
-  Return(DefineConstant());
+  StartBlock();
+  auto l_phi = Phi(ll, lr);
+  EndBlock(Jump(5));
+
+  // Diamond 2
+  StartBlock();
+  EndBlock(Branch(Imm(), 1, 2));
+
+  StartBlock();
+  auto rl = Define(Reg());
+  EndBlock(Jump(2));
+
+  StartBlock();
+  auto rr = Define(Reg());
+  EndBlock();
+
+  StartBlock();
+  auto r_phi = Phi(rl, rr);
+  EndBlock();
+
+  // Outer diamond merge.
+  StartBlock();
+  auto phi = Phi(l_phi, r_phi);
+  Return(Reg(phi));
   EndBlock();
 
   Allocate();
 }
 
 
-TEST_F(RegisterAllocatorTest, RegressionPhisNeedTooManyRegisters) {
-  const size_t kNumRegs = 3;
-  const size_t kParams = kNumRegs + 1;
-  int parameters[kParams];
+TEST_F(RegisterAllocatorTest, NestedDiamondPhiMergeDifferent) {
+  // Outer diamond.
+  StartBlock();
+  EndBlock(Branch(Imm(), 1, 5));
 
-  // Override number of registers.
-  SetNumRegs(kNumRegs, kNumRegs);
+  // Diamond 1
+  StartBlock();
+  EndBlock(Branch(Imm(), 1, 2));
 
-  // Initial block.
   StartBlock();
-  int constant = DefineConstant();
-  for (size_t i = 0; i < arraysize(parameters); ++i) {
-    parameters[i] = DefineConstant();
-  }
+  auto ll = Define(Reg(0));
+  EndBlock(Jump(2));
+
+  StartBlock();
+  auto lr = Define(Reg(1));
   EndBlock();
 
-  PhiInstruction* phis[kParams];
-  {
-    StartLoop(2);
+  StartBlock();
+  auto l_phi = Phi(ll, lr);
+  EndBlock(Jump(5));
 
-    // Loop header.
-    StartBlock();
+  // Diamond 2
+  StartBlock();
+  EndBlock(Branch(Imm(), 1, 2));
 
-    for (size_t i = 0; i < arraysize(parameters); ++i) {
-      phis[i] = Phi(parameters[i]);
-    }
+  StartBlock();
+  auto rl = Define(Reg(2));
+  EndBlock(Jump(2));
 
-    // Perform some computations.
-    // something like phi[i] += const
-    for (size_t i = 0; i < arraysize(parameters); ++i) {
-      int result = NewReg();
-      EmitFRU(result, phis[i]->virtual_register(), constant);
-      phis[i]->operands().push_back(result);
-    }
+  StartBlock();
+  auto rr = Define(Reg(3));
+  EndBlock();
 
-    EndBlock(Branch(DefineConstant(), 1, 2));
+  StartBlock();
+  auto r_phi = Phi(rl, rr);
+  EndBlock();
 
-    // Jump back to loop header.
-    StartBlock();
-    EndBlock(Jump(-1));
+  // Outer diamond merge.
+  StartBlock();
+  auto phi = Phi(l_phi, r_phi);
+  Return(Reg(phi));
+  EndBlock();
 
-    EndLoop();
+  Allocate();
+}
+
+
+TEST_F(RegisterAllocatorTest, RegressionSplitBeforeAndMove) {
+  StartBlock();
+
+  // Fill registers.
+  VReg values[kDefaultNRegs];
+  for (size_t i = 0; i < arraysize(values); ++i) {
+    if (i == 0 || i == 1) continue;  // Leave a hole for c_1 to take.
+    values[i] = Define(Reg(static_cast<int>(i)));
   }
 
-  // End block.
-  StartLastBlock();
+  auto c_0 = DefineConstant();
+  auto c_1 = DefineConstant();
 
-  // Return sum.
-  Return(DefineConstant());
-  EndBlock();
+  EmitOI(Reg(1), Reg(c_0, 0), UniqueReg(c_1));
+
+  // Use previous values to force c_1 to split before the previous instruction.
+  for (size_t i = 0; i < arraysize(values); ++i) {
+    if (i == 0 || i == 1) continue;
+    EmitI(Reg(values[i], static_cast<int>(i)));
+  }
+
+  EndBlock(Last());
 
   Allocate();
 }
 
+
+TEST_F(RegisterAllocatorTest, RegressionSpillTwice) {
+  StartBlock();
+  auto p_0 = Parameter(Reg(1));
+  EmitCall(Slot(-2), Unique(p_0), Reg(p_0, 1));
+  EndBlock(Last());
+
+  Allocate();
+}
+
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
index 6dbd7ad..51efc83 100644 (file)
@@ -10,6 +10,7 @@
 using testing::AllOf;
 using testing::Capture;
 using testing::CaptureEq;
+using testing::Not;
 
 namespace v8 {
 namespace internal {
@@ -33,12 +34,12 @@ TEST_F(SelectLoweringTest, SelectWithSameConditions) {
   Node* const p2 = Parameter(2);
   Node* const p3 = Parameter(3);
   Node* const p4 = Parameter(4);
+  Node* const s0 = graph()->NewNode(common()->Select(kMachInt32), p0, p1, p2);
 
   Capture<Node*> branch;
   Capture<Node*> merge;
   {
-    Reduction const r =
-        Reduce(graph()->NewNode(common()->Select(kMachInt32), p0, p1, p2));
+    Reduction const r = Reduce(s0);
     ASSERT_TRUE(r.Changed());
     EXPECT_THAT(
         r.replacement(),
@@ -55,6 +56,15 @@ TEST_F(SelectLoweringTest, SelectWithSameConditions) {
     ASSERT_TRUE(r.Changed());
     EXPECT_THAT(r.replacement(), IsPhi(kMachInt32, p3, p4, CaptureEq(&merge)));
   }
+  {
+    // We must not reuse the diamond if it is reachable from either else/then
+    // values of the Select, because the resulting graph can not be scheduled.
+    Reduction const r =
+        Reduce(graph()->NewNode(common()->Select(kMachInt32), p0, s0, p0));
+    ASSERT_TRUE(r.Changed());
+    EXPECT_THAT(r.replacement(),
+                IsPhi(kMachInt32, s0, p0, Not(CaptureEq(&merge))));
+  }
 }
 
 }  // namespace compiler
index 465ee84..066cbe9 100644 (file)
@@ -18,11 +18,11 @@ class SimplifiedOperatorReducerTest : public GraphTest {
  public:
   explicit SimplifiedOperatorReducerTest(int num_parameters = 1)
       : GraphTest(num_parameters), simplified_(zone()) {}
-  virtual ~SimplifiedOperatorReducerTest() {}
+  ~SimplifiedOperatorReducerTest() OVERRIDE {}
 
  protected:
   Reduction Reduce(Node* node) {
-    MachineOperatorBuilder machine;
+    MachineOperatorBuilder machine(zone());
     JSOperatorBuilder javascript(zone());
     JSGraph jsgraph(graph(), common(), &javascript, &machine);
     SimplifiedOperatorReducer reducer(&jsgraph);
@@ -43,7 +43,7 @@ class SimplifiedOperatorReducerTestWithParam
  public:
   explicit SimplifiedOperatorReducerTestWithParam(int num_parameters = 1)
       : SimplifiedOperatorReducerTest(num_parameters) {}
-  virtual ~SimplifiedOperatorReducerTestWithParam() {}
+  ~SimplifiedOperatorReducerTestWithParam() OVERRIDE {}
 };
 
 
@@ -478,126 +478,6 @@ TEST_F(SimplifiedOperatorReducerTest, ChangeUint32ToTagged) {
   }
 }
 
-
-// -----------------------------------------------------------------------------
-// LoadElement
-
-
-TEST_F(SimplifiedOperatorReducerTest, LoadElementWithConstantKeyAndLength) {
-  ElementAccess const access = {kTypedArrayBoundsCheck, kUntaggedBase, 0,
-                                Type::Any(), kMachAnyTagged};
-  ElementAccess access_nocheck = access;
-  access_nocheck.bounds_check = kNoBoundsCheck;
-  Node* const base = Parameter(0);
-  Node* const effect = graph()->start();
-  {
-    Node* const key = NumberConstant(-42.0);
-    Node* const length = NumberConstant(100.0);
-    Reduction r = Reduce(graph()->NewNode(simplified()->LoadElement(access),
-                                          base, key, length, effect));
-    ASSERT_FALSE(r.Changed());
-  }
-  {
-    Node* const key = NumberConstant(-0.0);
-    Node* const length = NumberConstant(1.0);
-    Reduction r = Reduce(graph()->NewNode(simplified()->LoadElement(access),
-                                          base, key, length, effect));
-    ASSERT_TRUE(r.Changed());
-    EXPECT_THAT(r.replacement(),
-                IsLoadElement(access_nocheck, base, key, length, effect));
-  }
-  {
-    Node* const key = NumberConstant(0);
-    Node* const length = NumberConstant(1);
-    Reduction r = Reduce(graph()->NewNode(simplified()->LoadElement(access),
-                                          base, key, length, effect));
-    ASSERT_TRUE(r.Changed());
-    EXPECT_THAT(r.replacement(),
-                IsLoadElement(access_nocheck, base, key, length, effect));
-  }
-  {
-    Node* const key = NumberConstant(42.2);
-    Node* const length = NumberConstant(128);
-    Reduction r = Reduce(graph()->NewNode(simplified()->LoadElement(access),
-                                          base, key, length, effect));
-    ASSERT_TRUE(r.Changed());
-    EXPECT_THAT(r.replacement(),
-                IsLoadElement(access_nocheck, base, key, length, effect));
-  }
-  {
-    Node* const key = NumberConstant(39.2);
-    Node* const length = NumberConstant(32.0);
-    Reduction r = Reduce(graph()->NewNode(simplified()->LoadElement(access),
-                                          base, key, length, effect));
-    ASSERT_FALSE(r.Changed());
-  }
-}
-
-
-// -----------------------------------------------------------------------------
-// StoreElement
-
-
-TEST_F(SimplifiedOperatorReducerTest, StoreElementWithConstantKeyAndLength) {
-  ElementAccess const access = {kTypedArrayBoundsCheck, kUntaggedBase, 0,
-                                Type::Any(), kMachAnyTagged};
-  ElementAccess access_nocheck = access;
-  access_nocheck.bounds_check = kNoBoundsCheck;
-  Node* const base = Parameter(0);
-  Node* const value = Parameter(1);
-  Node* const effect = graph()->start();
-  Node* const control = graph()->start();
-  {
-    Node* const key = NumberConstant(-72.1);
-    Node* const length = NumberConstant(0.0);
-    Reduction r =
-        Reduce(graph()->NewNode(simplified()->StoreElement(access), base, key,
-                                length, value, effect, control));
-    ASSERT_FALSE(r.Changed());
-  }
-  {
-    Node* const key = NumberConstant(-0.0);
-    Node* const length = NumberConstant(999);
-    Reduction r =
-        Reduce(graph()->NewNode(simplified()->StoreElement(access), base, key,
-                                length, value, effect, control));
-    ASSERT_TRUE(r.Changed());
-    EXPECT_THAT(r.replacement(),
-                IsStoreElement(access_nocheck, base, key, length, value, effect,
-                               control));
-  }
-  {
-    Node* const key = NumberConstant(0);
-    Node* const length = NumberConstant(1);
-    Reduction r =
-        Reduce(graph()->NewNode(simplified()->StoreElement(access), base, key,
-                                length, value, effect, control));
-    ASSERT_TRUE(r.Changed());
-    EXPECT_THAT(r.replacement(),
-                IsStoreElement(access_nocheck, base, key, length, value, effect,
-                               control));
-  }
-  {
-    Node* const key = NumberConstant(42.2);
-    Node* const length = NumberConstant(128);
-    Reduction r =
-        Reduce(graph()->NewNode(simplified()->StoreElement(access), base, key,
-                                length, value, effect, control));
-    ASSERT_TRUE(r.Changed());
-    EXPECT_THAT(r.replacement(),
-                IsStoreElement(access_nocheck, base, key, length, value, effect,
-                               control));
-  }
-  {
-    Node* const key = NumberConstant(39.2);
-    Node* const length = NumberConstant(32.0);
-    Reduction r =
-        Reduce(graph()->NewNode(simplified()->StoreElement(access), base, key,
-                                length, value, effect, control));
-    ASSERT_FALSE(r.Changed());
-  }
-}
-
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
index 031a897..e7fceba 100644 (file)
@@ -2,9 +2,11 @@
 // Use of this source code is governed by a BSD-style license that can be
 // found in the LICENSE file.
 
+#include "src/compiler/opcodes.h"
+#include "src/compiler/operator.h"
+#include "src/compiler/operator-properties.h"
 #include "src/compiler/simplified-operator.h"
-
-#include "src/compiler/operator-properties-inl.h"
+#include "src/types-inl.h"
 #include "test/unittests/test-utils.h"
 
 namespace v8 {
@@ -37,6 +39,7 @@ const PureOperator kPureOperators[] = {
         Operator::kPure | properties, input_count        \
   }
     PURE(BooleanNot, Operator::kNoProperties, 1),
+    PURE(BooleanToNumber, Operator::kNoProperties, 1),
     PURE(NumberEqual, Operator::kCommutative, 2),
     PURE(NumberLessThan, Operator::kNoProperties, 2),
     PURE(NumberLessThanOrEqual, Operator::kNoProperties, 2),
@@ -58,7 +61,9 @@ const PureOperator kPureOperators[] = {
     PURE(ChangeUint32ToTagged, Operator::kNoProperties, 1),
     PURE(ChangeFloat64ToTagged, Operator::kNoProperties, 1),
     PURE(ChangeBoolToBit, Operator::kNoProperties, 1),
-    PURE(ChangeBitToBool, Operator::kNoProperties, 1)
+    PURE(ChangeBitToBool, Operator::kNoProperties, 1),
+    PURE(ObjectIsSmi, Operator::kNoProperties, 1),
+    PURE(ObjectIsNonNegativeSmi, Operator::kNoProperties, 1)
 #undef PURE
 };
 
@@ -114,49 +119,116 @@ INSTANTIATE_TEST_CASE_P(SimplifiedOperatorTest, SimplifiedPureOperatorTest,
 
 
 // -----------------------------------------------------------------------------
+// Buffer access operators.
+
+
+namespace {
+
+const ExternalArrayType kExternalArrayTypes[] = {
+    kExternalUint8Array,   kExternalInt8Array,   kExternalUint16Array,
+    kExternalInt16Array,   kExternalUint32Array, kExternalInt32Array,
+    kExternalFloat32Array, kExternalFloat64Array};
+
+}  // namespace
+
+
+class SimplifiedBufferAccessOperatorTest
+    : public TestWithZone,
+      public ::testing::WithParamInterface<ExternalArrayType> {};
+
+
+TEST_P(SimplifiedBufferAccessOperatorTest, InstancesAreGloballyShared) {
+  BufferAccess const access(GetParam());
+  SimplifiedOperatorBuilder simplified1(zone());
+  SimplifiedOperatorBuilder simplified2(zone());
+  EXPECT_EQ(simplified1.LoadBuffer(access), simplified2.LoadBuffer(access));
+  EXPECT_EQ(simplified1.StoreBuffer(access), simplified2.StoreBuffer(access));
+}
+
+
+TEST_P(SimplifiedBufferAccessOperatorTest, LoadBuffer) {
+  SimplifiedOperatorBuilder simplified(zone());
+  BufferAccess const access(GetParam());
+  const Operator* op = simplified.LoadBuffer(access);
+
+  EXPECT_EQ(IrOpcode::kLoadBuffer, op->opcode());
+  EXPECT_EQ(Operator::kNoThrow | Operator::kNoWrite, op->properties());
+  EXPECT_EQ(access, BufferAccessOf(op));
+
+  EXPECT_EQ(3, op->ValueInputCount());
+  EXPECT_EQ(1, op->EffectInputCount());
+  EXPECT_EQ(1, op->ControlInputCount());
+  EXPECT_EQ(5, OperatorProperties::GetTotalInputCount(op));
+
+  EXPECT_EQ(1, op->ValueOutputCount());
+  EXPECT_EQ(1, op->EffectOutputCount());
+  EXPECT_EQ(0, op->ControlOutputCount());
+}
+
+
+TEST_P(SimplifiedBufferAccessOperatorTest, StoreBuffer) {
+  SimplifiedOperatorBuilder simplified(zone());
+  BufferAccess const access(GetParam());
+  const Operator* op = simplified.StoreBuffer(access);
+
+  EXPECT_EQ(IrOpcode::kStoreBuffer, op->opcode());
+  EXPECT_EQ(Operator::kNoRead | Operator::kNoThrow, op->properties());
+  EXPECT_EQ(access, BufferAccessOf(op));
+
+  EXPECT_EQ(4, op->ValueInputCount());
+  EXPECT_EQ(1, op->EffectInputCount());
+  EXPECT_EQ(1, op->ControlInputCount());
+  EXPECT_EQ(6, OperatorProperties::GetTotalInputCount(op));
+
+  EXPECT_EQ(0, op->ValueOutputCount());
+  EXPECT_EQ(1, op->EffectOutputCount());
+  EXPECT_EQ(0, op->ControlOutputCount());
+}
+
+
+INSTANTIATE_TEST_CASE_P(SimplifiedOperatorTest,
+                        SimplifiedBufferAccessOperatorTest,
+                        ::testing::ValuesIn(kExternalArrayTypes));
+
+
+// -----------------------------------------------------------------------------
 // Element access operators.
 
+
 namespace {
 
 const ElementAccess kElementAccesses[] = {
-    {kNoBoundsCheck, kTaggedBase, FixedArray::kHeaderSize, Type::Any(),
-     kMachAnyTagged},
-    {kNoBoundsCheck, kUntaggedBase, kNonHeapObjectHeaderSize - kHeapObjectTag,
-     Type::Any(), kMachInt8},
-    {kNoBoundsCheck, kUntaggedBase, kNonHeapObjectHeaderSize - kHeapObjectTag,
-     Type::Any(), kMachInt16},
-    {kNoBoundsCheck, kUntaggedBase, kNonHeapObjectHeaderSize - kHeapObjectTag,
-     Type::Any(), kMachInt32},
-    {kNoBoundsCheck, kUntaggedBase, kNonHeapObjectHeaderSize - kHeapObjectTag,
-     Type::Any(), kMachUint8},
-    {kNoBoundsCheck, kUntaggedBase, kNonHeapObjectHeaderSize - kHeapObjectTag,
-     Type::Any(), kMachUint16},
-    {kNoBoundsCheck, kUntaggedBase, kNonHeapObjectHeaderSize - kHeapObjectTag,
-     Type::Any(), kMachUint32},
-    {kTypedArrayBoundsCheck, kUntaggedBase, 0, Type::Signed32(), kMachInt8},
-    {kTypedArrayBoundsCheck, kUntaggedBase, 0, Type::Unsigned32(), kMachUint8},
-    {kTypedArrayBoundsCheck, kUntaggedBase, 0, Type::Signed32(), kMachInt16},
-    {kTypedArrayBoundsCheck, kUntaggedBase, 0, Type::Unsigned32(), kMachUint16},
-    {kTypedArrayBoundsCheck, kUntaggedBase, 0, Type::Signed32(), kMachInt32},
-    {kTypedArrayBoundsCheck, kUntaggedBase, 0, Type::Unsigned32(), kMachUint32},
-    {kTypedArrayBoundsCheck, kUntaggedBase, 0, Type::Number(), kRepFloat32},
-    {kTypedArrayBoundsCheck, kUntaggedBase, 0, Type::Number(), kRepFloat64},
-    {kTypedArrayBoundsCheck, kTaggedBase, FixedTypedArrayBase::kDataOffset,
-     Type::Signed32(), kMachInt8},
-    {kTypedArrayBoundsCheck, kTaggedBase, FixedTypedArrayBase::kDataOffset,
-     Type::Unsigned32(), kMachUint8},
-    {kTypedArrayBoundsCheck, kTaggedBase, FixedTypedArrayBase::kDataOffset,
-     Type::Signed32(), kMachInt16},
-    {kTypedArrayBoundsCheck, kTaggedBase, FixedTypedArrayBase::kDataOffset,
-     Type::Unsigned32(), kMachUint16},
-    {kTypedArrayBoundsCheck, kTaggedBase, FixedTypedArrayBase::kDataOffset,
-     Type::Signed32(), kMachInt32},
-    {kTypedArrayBoundsCheck, kTaggedBase, FixedTypedArrayBase::kDataOffset,
-     Type::Unsigned32(), kMachUint32},
-    {kTypedArrayBoundsCheck, kTaggedBase, FixedTypedArrayBase::kDataOffset,
-     Type::Number(), kRepFloat32},
-    {kTypedArrayBoundsCheck, kTaggedBase, FixedTypedArrayBase::kDataOffset,
-     Type::Number(), kRepFloat64}};
+    {kTaggedBase, FixedArray::kHeaderSize, Type::Any(), kMachAnyTagged},
+    {kUntaggedBase, 0, Type::Any(), kMachInt8},
+    {kUntaggedBase, 0, Type::Any(), kMachInt16},
+    {kUntaggedBase, 0, Type::Any(), kMachInt32},
+    {kUntaggedBase, 0, Type::Any(), kMachUint8},
+    {kUntaggedBase, 0, Type::Any(), kMachUint16},
+    {kUntaggedBase, 0, Type::Any(), kMachUint32},
+    {kUntaggedBase, 0, Type::Signed32(), kMachInt8},
+    {kUntaggedBase, 0, Type::Unsigned32(), kMachUint8},
+    {kUntaggedBase, 0, Type::Signed32(), kMachInt16},
+    {kUntaggedBase, 0, Type::Unsigned32(), kMachUint16},
+    {kUntaggedBase, 0, Type::Signed32(), kMachInt32},
+    {kUntaggedBase, 0, Type::Unsigned32(), kMachUint32},
+    {kUntaggedBase, 0, Type::Number(), kRepFloat32},
+    {kUntaggedBase, 0, Type::Number(), kRepFloat64},
+    {kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Signed32(),
+     kMachInt8},
+    {kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Unsigned32(),
+     kMachUint8},
+    {kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Signed32(),
+     kMachInt16},
+    {kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Unsigned32(),
+     kMachUint16},
+    {kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Signed32(),
+     kMachInt32},
+    {kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Unsigned32(),
+     kMachUint32},
+    {kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Number(),
+     kRepFloat32},
+    {kTaggedBase, FixedTypedArrayBase::kDataOffset, Type::Number(),
+     kRepFloat64}};
 
 }  // namespace
 
@@ -175,9 +247,9 @@ TEST_P(SimplifiedElementAccessOperatorTest, LoadElement) {
   EXPECT_EQ(Operator::kNoThrow | Operator::kNoWrite, op->properties());
   EXPECT_EQ(access, ElementAccessOf(op));
 
-  EXPECT_EQ(3, op->ValueInputCount());
+  EXPECT_EQ(2, op->ValueInputCount());
   EXPECT_EQ(1, op->EffectInputCount());
-  EXPECT_EQ(0, op->ControlInputCount());
+  EXPECT_EQ(1, op->ControlInputCount());
   EXPECT_EQ(4, OperatorProperties::GetTotalInputCount(op));
 
   EXPECT_EQ(1, op->ValueOutputCount());
@@ -195,10 +267,10 @@ TEST_P(SimplifiedElementAccessOperatorTest, StoreElement) {
   EXPECT_EQ(Operator::kNoRead | Operator::kNoThrow, op->properties());
   EXPECT_EQ(access, ElementAccessOf(op));
 
-  EXPECT_EQ(4, op->ValueInputCount());
+  EXPECT_EQ(3, op->ValueInputCount());
   EXPECT_EQ(1, op->EffectInputCount());
   EXPECT_EQ(1, op->ControlInputCount());
-  EXPECT_EQ(6, OperatorProperties::GetTotalInputCount(op));
+  EXPECT_EQ(5, OperatorProperties::GetTotalInputCount(op));
 
   EXPECT_EQ(0, op->ValueOutputCount());
   EXPECT_EQ(1, op->EffectOutputCount());
index 48c074e..9ef0fa5 100644 (file)
@@ -244,18 +244,540 @@ TEST_F(InstructionSelectorTest, TruncateInt64ToInt32WithWord64Shr) {
 // Addition.
 
 
-TEST_F(InstructionSelectorTest, Int32AddWithInt32AddWithParameters) {
+TEST_F(InstructionSelectorTest, Int32AddWithInt32ParametersLea) {
   StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
   Node* const p0 = m.Parameter(0);
   Node* const p1 = m.Parameter(1);
   Node* const a0 = m.Int32Add(p0, p1);
-  m.Return(m.Int32Add(a0, p0));
+  // Additional uses of input to add chooses lea
+  Node* const a1 = m.Int32Div(p0, p1);
+  m.Return(m.Int32Div(a0, a1));
+  Stream s = m.Build();
+  ASSERT_EQ(3U, s.size());
+  EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+  ASSERT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddConstantAsLeaSingle) {
+  StreamBuilder m(this, kMachInt32, kMachInt32);
+  Node* const p0 = m.Parameter(0);
+  Node* const c0 = m.Int32Constant(15);
+  // If one of the add's operands is only used once, use an "leal", even though
+  // an "addl" could be used. The "leal" has proven faster--out best guess is
+  // that it gives the register allocation more freedom and it doesn't set
+  // flags, reducing pressure in the CPU's pipeline. If we're lucky with
+  // register allocation, then code generation will select an "addl" later for
+  // the cases that have been measured to be faster.
+  Node* const v0 = m.Int32Add(p0, c0);
+  m.Return(v0);
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
+  ASSERT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+  EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddConstantAsAdd) {
+  StreamBuilder m(this, kMachInt32, kMachInt32);
+  Node* const p0 = m.Parameter(0);
+  Node* const c0 = m.Int32Constant(1);
+  // If there is only a single use of an add's input and the immediate constant
+  // for the add is 1, don't use an inc. It is much slower on modern Intel
+  // architectures.
+  m.Return(m.Int32Add(p0, c0));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
+  ASSERT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+  EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddConstantAsLeaDouble) {
+  StreamBuilder m(this, kMachInt32, kMachInt32);
+  Node* const p0 = m.Parameter(0);
+  Node* const c0 = m.Int32Constant(15);
+  // A second use of an add's input uses lea
+  Node* const a0 = m.Int32Add(p0, c0);
+  m.Return(m.Int32Div(a0, p0));
   Stream s = m.Build();
   ASSERT_EQ(2U, s.size());
-  EXPECT_EQ(kX64Add32, s[0]->arch_opcode());
+  EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
   ASSERT_EQ(2U, s[0]->InputCount());
-  EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(0)));
-  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1)));
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+  EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddCommutedConstantAsLeaSingle) {
+  StreamBuilder m(this, kMachInt32, kMachInt32);
+  Node* const p0 = m.Parameter(0);
+  Node* const c0 = m.Int32Constant(15);
+  // If one of the add's operands is only used once, use an "leal", even though
+  // an "addl" could be used. The "leal" has proven faster--out best guess is
+  // that it gives the register allocation more freedom and it doesn't set
+  // flags, reducing pressure in the CPU's pipeline. If we're lucky with
+  // register allocation, then code generation will select an "addl" later for
+  // the cases that have been measured to be faster.
+  m.Return(m.Int32Add(c0, p0));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
+  ASSERT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+  EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddCommutedConstantAsLeaDouble) {
+  StreamBuilder m(this, kMachInt32, kMachInt32);
+  Node* const p0 = m.Parameter(0);
+  Node* const c0 = m.Int32Constant(15);
+  // A second use of an add's input uses lea
+  Node* const a0 = m.Int32Add(c0, p0);
+  USE(a0);
+  m.Return(m.Int32Div(a0, p0));
+  Stream s = m.Build();
+  ASSERT_EQ(2U, s.size());
+  EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
+  ASSERT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+  EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddSimpleAsAdd) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  Node* const p0 = m.Parameter(0);
+  Node* const p1 = m.Parameter(1);
+  // If one of the add's operands is only used once, use an "leal", even though
+  // an "addl" could be used. The "leal" has proven faster--out best guess is
+  // that it gives the register allocation more freedom and it doesn't set
+  // flags, reducing pressure in the CPU's pipeline. If we're lucky with
+  // register allocation, then code generation will select an "addl" later for
+  // the cases that have been measured to be faster.
+  m.Return(m.Int32Add(p0, p1));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_MR1, s[0]->addressing_mode());
+  ASSERT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddSimpleAsLea) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  Node* const p0 = m.Parameter(0);
+  Node* const p1 = m.Parameter(1);
+  // If all of of the add's operands are used multiple times, use an "leal".
+  Node* const v1 = m.Int32Add(p0, p1);
+  m.Return(m.Int32Add(m.Int32Add(v1, p1), p0));
+  Stream s = m.Build();
+  ASSERT_EQ(3U, s.size());
+  EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_MR1, s[0]->addressing_mode());
+  ASSERT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddScaled2Mul) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  Node* const p0 = m.Parameter(0);
+  Node* const p1 = m.Parameter(1);
+  Node* const s0 = m.Int32Mul(p1, m.Int32Constant(2));
+  m.Return(m.Int32Add(p0, s0));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_MR2, s[0]->addressing_mode());
+  ASSERT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddCommutedScaled2Mul) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  Node* const p0 = m.Parameter(0);
+  Node* const p1 = m.Parameter(1);
+  Node* const s0 = m.Int32Mul(p1, m.Int32Constant(2));
+  m.Return(m.Int32Add(s0, p0));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_MR2, s[0]->addressing_mode());
+  ASSERT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddScaled2Shl) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  Node* const p0 = m.Parameter(0);
+  Node* const p1 = m.Parameter(1);
+  Node* const s0 = m.Word32Shl(p1, m.Int32Constant(1));
+  m.Return(m.Int32Add(p0, s0));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_MR2, s[0]->addressing_mode());
+  ASSERT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddCommutedScaled2Shl) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  Node* const p0 = m.Parameter(0);
+  Node* const p1 = m.Parameter(1);
+  Node* const s0 = m.Word32Shl(p1, m.Int32Constant(1));
+  m.Return(m.Int32Add(s0, p0));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_MR2, s[0]->addressing_mode());
+  ASSERT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddScaled4Mul) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  Node* const p0 = m.Parameter(0);
+  Node* const p1 = m.Parameter(1);
+  Node* const s0 = m.Int32Mul(p1, m.Int32Constant(4));
+  m.Return(m.Int32Add(p0, s0));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_MR4, s[0]->addressing_mode());
+  ASSERT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddScaled4Shl) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  Node* const p0 = m.Parameter(0);
+  Node* const p1 = m.Parameter(1);
+  Node* const s0 = m.Word32Shl(p1, m.Int32Constant(2));
+  m.Return(m.Int32Add(p0, s0));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_MR4, s[0]->addressing_mode());
+  ASSERT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddScaled8Mul) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  Node* const p0 = m.Parameter(0);
+  Node* const p1 = m.Parameter(1);
+  Node* const s0 = m.Int32Mul(p1, m.Int32Constant(8));
+  m.Return(m.Int32Add(p0, s0));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_MR8, s[0]->addressing_mode());
+  ASSERT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddScaled8Shl) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  Node* const p0 = m.Parameter(0);
+  Node* const p1 = m.Parameter(1);
+  Node* const s0 = m.Word32Shl(p1, m.Int32Constant(3));
+  m.Return(m.Int32Add(p0, s0));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_MR8, s[0]->addressing_mode());
+  ASSERT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddScaled2MulWithConstant) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  Node* const p0 = m.Parameter(0);
+  Node* const p1 = m.Parameter(1);
+  Node* const s0 = m.Int32Mul(p1, m.Int32Constant(2));
+  Node* const c0 = m.Int32Constant(15);
+  m.Return(m.Int32Add(c0, m.Int32Add(p0, s0)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_MR2I, s[0]->addressing_mode());
+  ASSERT_EQ(3U, s[0]->InputCount());
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+  EXPECT_TRUE(s[0]->InputAt(2)->IsImmediate());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddScaled2MulWithConstantShuffle1) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  Node* const p0 = m.Parameter(0);
+  Node* const p1 = m.Parameter(1);
+  Node* const s0 = m.Int32Mul(p1, m.Int32Constant(2));
+  Node* const c0 = m.Int32Constant(15);
+  m.Return(m.Int32Add(p0, m.Int32Add(s0, c0)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_MR2I, s[0]->addressing_mode());
+  ASSERT_EQ(3U, s[0]->InputCount());
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+  EXPECT_TRUE(s[0]->InputAt(2)->IsImmediate());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddScaled2MulWithConstantShuffle2) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  Node* const p0 = m.Parameter(0);
+  Node* const p1 = m.Parameter(1);
+  Node* const s0 = m.Int32Mul(p1, m.Int32Constant(2));
+  Node* const c0 = m.Int32Constant(15);
+  m.Return(m.Int32Add(s0, m.Int32Add(c0, p0)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_MR2I, s[0]->addressing_mode());
+  ASSERT_EQ(3U, s[0]->InputCount());
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+  EXPECT_TRUE(s[0]->InputAt(2)->IsImmediate());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddScaled2MulWithConstantShuffle3) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  Node* const p0 = m.Parameter(0);
+  Node* const p1 = m.Parameter(1);
+  Node* const s0 = m.Int32Mul(p1, m.Int32Constant(2));
+  Node* const c0 = m.Int32Constant(15);
+  m.Return(m.Int32Add(m.Int32Add(s0, c0), p0));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_MR2I, s[0]->addressing_mode());
+  ASSERT_EQ(3U, s[0]->InputCount());
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+  EXPECT_TRUE(s[0]->InputAt(2)->IsImmediate());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddScaled2MulWithConstantShuffle4) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  Node* const p0 = m.Parameter(0);
+  Node* const p1 = m.Parameter(1);
+  Node* const s0 = m.Int32Mul(p1, m.Int32Constant(2));
+  Node* const c0 = m.Int32Constant(15);
+  m.Return(m.Int32Add(m.Int32Add(c0, p0), s0));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_MR2I, s[0]->addressing_mode());
+  ASSERT_EQ(3U, s[0]->InputCount());
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+  EXPECT_TRUE(s[0]->InputAt(2)->IsImmediate());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddScaled2MulWithConstantShuffle5) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  Node* const p0 = m.Parameter(0);
+  Node* const p1 = m.Parameter(1);
+  Node* const s0 = m.Int32Mul(p1, m.Int32Constant(2));
+  Node* const c0 = m.Int32Constant(15);
+  m.Return(m.Int32Add(m.Int32Add(p0, s0), c0));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_MR2I, s[0]->addressing_mode());
+  ASSERT_EQ(3U, s[0]->InputCount());
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+  EXPECT_TRUE(s[0]->InputAt(2)->IsImmediate());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddScaled2ShlWithConstant) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  Node* const p0 = m.Parameter(0);
+  Node* const p1 = m.Parameter(1);
+  Node* const s0 = m.Word32Shl(p1, m.Int32Constant(1));
+  Node* const c0 = m.Int32Constant(15);
+  m.Return(m.Int32Add(c0, m.Int32Add(p0, s0)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_MR2I, s[0]->addressing_mode());
+  ASSERT_EQ(3U, s[0]->InputCount());
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+  EXPECT_TRUE(s[0]->InputAt(2)->IsImmediate());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddScaled4MulWithConstant) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  Node* const p0 = m.Parameter(0);
+  Node* const p1 = m.Parameter(1);
+  Node* const s0 = m.Int32Mul(p1, m.Int32Constant(4));
+  Node* const c0 = m.Int32Constant(15);
+  m.Return(m.Int32Add(c0, m.Int32Add(p0, s0)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_MR4I, s[0]->addressing_mode());
+  ASSERT_EQ(3U, s[0]->InputCount());
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+  EXPECT_TRUE(s[0]->InputAt(2)->IsImmediate());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddScaled4ShlWithConstant) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  Node* const p0 = m.Parameter(0);
+  Node* const p1 = m.Parameter(1);
+  Node* const s0 = m.Word32Shl(p1, m.Int32Constant(2));
+  Node* const c0 = m.Int32Constant(15);
+  m.Return(m.Int32Add(c0, m.Int32Add(p0, s0)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_MR4I, s[0]->addressing_mode());
+  ASSERT_EQ(3U, s[0]->InputCount());
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+  EXPECT_TRUE(s[0]->InputAt(2)->IsImmediate());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddScaled8MulWithConstant) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  Node* const p0 = m.Parameter(0);
+  Node* const p1 = m.Parameter(1);
+  Node* const s0 = m.Int32Mul(p1, m.Int32Constant(8));
+  Node* const c0 = m.Int32Constant(15);
+  m.Return(m.Int32Add(c0, m.Int32Add(p0, s0)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_MR8I, s[0]->addressing_mode());
+  ASSERT_EQ(3U, s[0]->InputCount());
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+  EXPECT_TRUE(s[0]->InputAt(2)->IsImmediate());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddScaled8ShlWithConstant) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32);
+  Node* const p0 = m.Parameter(0);
+  Node* const p1 = m.Parameter(1);
+  Node* const s0 = m.Word32Shl(p1, m.Int32Constant(3));
+  Node* const c0 = m.Int32Constant(15);
+  m.Return(m.Int32Add(c0, m.Int32Add(p0, s0)));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_MR8I, s[0]->addressing_mode());
+  ASSERT_EQ(3U, s[0]->InputCount());
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+  EXPECT_TRUE(s[0]->InputAt(2)->IsImmediate());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32SubConstantAsSub) {
+  StreamBuilder m(this, kMachInt32, kMachInt32);
+  Node* const p0 = m.Parameter(0);
+  Node* const c0 = m.Int32Constant(-1);
+  // If there is only a single use of on of the sub's non-constant input, use a
+  // "subl" instruction.
+  m.Return(m.Int32Sub(p0, c0));
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
+  ASSERT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+  EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32SubConstantAsLea) {
+  StreamBuilder m(this, kMachInt32, kMachInt32);
+  Node* const p0 = m.Parameter(0);
+  Node* const c0 = m.Int32Constant(-1);
+  // If there are multiple uses of on of the sub's non-constant input, use a
+  // "leal" instruction.
+  Node* const v0 = m.Int32Sub(p0, c0);
+  m.Return(m.Int32Div(p0, v0));
+  Stream s = m.Build();
+  ASSERT_EQ(2U, s.size());
+  EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_MRI, s[0]->addressing_mode());
+  ASSERT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+  EXPECT_TRUE(s[0]->InputAt(1)->IsImmediate());
+}
+
+
+TEST_F(InstructionSelectorTest, Int32AddScaled2Other) {
+  StreamBuilder m(this, kMachInt32, kMachInt32, kMachInt32, kMachInt32);
+  Node* const p0 = m.Parameter(0);
+  Node* const p1 = m.Parameter(1);
+  Node* const p2 = m.Parameter(2);
+  Node* const s0 = m.Int32Mul(p1, m.Int32Constant(2));
+  Node* const a0 = m.Int32Add(s0, p2);
+  Node* const a1 = m.Int32Add(p0, a0);
+  m.Return(a1);
+  Stream s = m.Build();
+  ASSERT_EQ(2U, s.size());
+  EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_MR2, s[0]->addressing_mode());
+  ASSERT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(s.ToVreg(p2), s.ToVreg(s[0]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(p1), s.ToVreg(s[0]->InputAt(1)));
+  EXPECT_EQ(s.ToVreg(a0), s.ToVreg(s[0]->OutputAt(0)));
+  ASSERT_EQ(2U, s[1]->InputCount());
+  EXPECT_EQ(kX64Lea32, s[1]->arch_opcode());
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[1]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(a0), s.ToVreg(s[1]->InputAt(1)));
+  EXPECT_EQ(s.ToVreg(a1), s.ToVreg(s[1]->OutputAt(0)));
 }
 
 
@@ -324,6 +846,150 @@ TEST_F(InstructionSelectorTest, Uint32MulHigh) {
 }
 
 
+TEST_F(InstructionSelectorTest, Int32Mul2BecomesLea) {
+  StreamBuilder m(this, kMachUint32, kMachUint32, kMachUint32);
+  Node* const p0 = m.Parameter(0);
+  Node* const c1 = m.Int32Constant(2);
+  Node* const n = m.Int32Mul(p0, c1);
+  m.Return(n);
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_MR1, s[0]->addressing_mode());
+  ASSERT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32Mul3BecomesLea) {
+  StreamBuilder m(this, kMachUint32, kMachUint32, kMachUint32);
+  Node* const p0 = m.Parameter(0);
+  Node* const c1 = m.Int32Constant(3);
+  Node* const n = m.Int32Mul(p0, c1);
+  m.Return(n);
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_MR2, s[0]->addressing_mode());
+  ASSERT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32Mul4BecomesLea) {
+  StreamBuilder m(this, kMachUint32, kMachUint32, kMachUint32);
+  Node* const p0 = m.Parameter(0);
+  Node* const c1 = m.Int32Constant(4);
+  Node* const n = m.Int32Mul(p0, c1);
+  m.Return(n);
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_M4, s[0]->addressing_mode());
+  ASSERT_EQ(1U, s[0]->InputCount());
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32Mul5BecomesLea) {
+  StreamBuilder m(this, kMachUint32, kMachUint32, kMachUint32);
+  Node* const p0 = m.Parameter(0);
+  Node* const c1 = m.Int32Constant(5);
+  Node* const n = m.Int32Mul(p0, c1);
+  m.Return(n);
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_MR4, s[0]->addressing_mode());
+  ASSERT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32Mul8BecomesLea) {
+  StreamBuilder m(this, kMachUint32, kMachUint32, kMachUint32);
+  Node* const p0 = m.Parameter(0);
+  Node* const c1 = m.Int32Constant(8);
+  Node* const n = m.Int32Mul(p0, c1);
+  m.Return(n);
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_M8, s[0]->addressing_mode());
+  ASSERT_EQ(1U, s[0]->InputCount());
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32Mul9BecomesLea) {
+  StreamBuilder m(this, kMachUint32, kMachUint32, kMachUint32);
+  Node* const p0 = m.Parameter(0);
+  Node* const c1 = m.Int32Constant(9);
+  Node* const n = m.Int32Mul(p0, c1);
+  m.Return(n);
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_MR8, s[0]->addressing_mode());
+  ASSERT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1)));
+}
+
+
+// -----------------------------------------------------------------------------
+// Word32Shl.
+
+
+TEST_F(InstructionSelectorTest, Int32Shl1BecomesLea) {
+  StreamBuilder m(this, kMachUint32, kMachUint32, kMachUint32);
+  Node* const p0 = m.Parameter(0);
+  Node* const c1 = m.Int32Constant(1);
+  Node* const n = m.Word32Shl(p0, c1);
+  m.Return(n);
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_MR1, s[0]->addressing_mode());
+  ASSERT_EQ(2U, s[0]->InputCount());
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(1)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32Shl2BecomesLea) {
+  StreamBuilder m(this, kMachUint32, kMachUint32, kMachUint32);
+  Node* const p0 = m.Parameter(0);
+  Node* const c1 = m.Int32Constant(2);
+  Node* const n = m.Word32Shl(p0, c1);
+  m.Return(n);
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_M4, s[0]->addressing_mode());
+  ASSERT_EQ(1U, s[0]->InputCount());
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+}
+
+
+TEST_F(InstructionSelectorTest, Int32Shl4BecomesLea) {
+  StreamBuilder m(this, kMachUint32, kMachUint32, kMachUint32);
+  Node* const p0 = m.Parameter(0);
+  Node* const c1 = m.Int32Constant(3);
+  Node* const n = m.Word32Shl(p0, c1);
+  m.Return(n);
+  Stream s = m.Build();
+  ASSERT_EQ(1U, s.size());
+  EXPECT_EQ(kX64Lea32, s[0]->arch_opcode());
+  EXPECT_EQ(kMode_M8, s[0]->addressing_mode());
+  ASSERT_EQ(1U, s[0]->InputCount());
+  EXPECT_EQ(s.ToVreg(p0), s.ToVreg(s[0]->InputAt(0)));
+}
+
+
 // -----------------------------------------------------------------------------
 // Word64Shl.
 
@@ -365,6 +1031,38 @@ TEST_F(InstructionSelectorTest, Word64ShlWithChangeUint32ToUint64) {
   }
 }
 
+
+TEST_F(InstructionSelectorTest, Float64BinopArithmetic) {
+  {
+    StreamBuilder m(this, kMachFloat64, kMachFloat64, kMachFloat64);
+    Node* add = m.Float64Add(m.Parameter(0), m.Parameter(1));
+    Node* mul = m.Float64Mul(add, m.Parameter(1));
+    Node* sub = m.Float64Sub(mul, add);
+    Node* ret = m.Float64Div(mul, sub);
+    m.Return(ret);
+    Stream s = m.Build(AVX);
+    ASSERT_EQ(4U, s.size());
+    EXPECT_EQ(kAVXFloat64Add, s[0]->arch_opcode());
+    EXPECT_EQ(kAVXFloat64Mul, s[1]->arch_opcode());
+    EXPECT_EQ(kAVXFloat64Sub, s[2]->arch_opcode());
+    EXPECT_EQ(kAVXFloat64Div, s[3]->arch_opcode());
+  }
+  {
+    StreamBuilder m(this, kMachFloat64, kMachFloat64, kMachFloat64);
+    Node* add = m.Float64Add(m.Parameter(0), m.Parameter(1));
+    Node* mul = m.Float64Mul(add, m.Parameter(1));
+    Node* sub = m.Float64Sub(mul, add);
+    Node* ret = m.Float64Div(mul, sub);
+    m.Return(ret);
+    Stream s = m.Build();
+    ASSERT_EQ(4U, s.size());
+    EXPECT_EQ(kSSEFloat64Add, s[0]->arch_opcode());
+    EXPECT_EQ(kSSEFloat64Mul, s[1]->arch_opcode());
+    EXPECT_EQ(kSSEFloat64Sub, s[2]->arch_opcode());
+    EXPECT_EQ(kSSEFloat64Div, s[3]->arch_opcode());
+  }
+}
+
 }  // namespace compiler
 }  // namespace internal
 }  // namespace v8
index 55dd6c6..2076e60 100644 (file)
@@ -22,6 +22,7 @@ class GCIdleTimeHandlerTest : public ::testing::Test {
   GCIdleTimeHandler::HeapState DefaultHeapState() {
     GCIdleTimeHandler::HeapState result;
     result.contexts_disposed = 0;
+    result.contexts_disposal_rate = GCIdleTimeHandler::kHighContextDisposalRate;
     result.size_of_objects = kSizeOfObjects;
     result.incremental_marking_stopped = false;
     result.can_start_incremental_marking = true;
@@ -179,13 +180,51 @@ TEST_F(GCIdleTimeHandlerTest, DontDoMarkCompact) {
 }
 
 
+TEST_F(GCIdleTimeHandlerTest, ShouldDoFinalIncrementalMarkCompact) {
+  size_t idle_time_in_ms = 16;
+  EXPECT_TRUE(GCIdleTimeHandler::ShouldDoFinalIncrementalMarkCompact(
+      idle_time_in_ms, 0, 0));
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, DontDoFinalIncrementalMarkCompact) {
+  size_t idle_time_in_ms = 1;
+  EXPECT_FALSE(GCIdleTimeHandler::ShouldDoFinalIncrementalMarkCompact(
+      idle_time_in_ms, kSizeOfObjects, kMarkingSpeed));
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, ContextDisposeLowRate) {
+  GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+  heap_state.contexts_disposed = 1;
+  heap_state.incremental_marking_stopped = true;
+  double idle_time_ms = 0;
+  GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+  EXPECT_EQ(DO_NOTHING, action.type);
+}
+
+
+TEST_F(GCIdleTimeHandlerTest, ContextDisposeHighRate) {
+  GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
+  heap_state.contexts_disposed = 1;
+  heap_state.contexts_disposal_rate =
+      GCIdleTimeHandler::kHighContextDisposalRate - 1;
+  heap_state.incremental_marking_stopped = true;
+  double idle_time_ms = 0;
+  GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+  EXPECT_EQ(DO_FULL_GC, action.type);
+}
+
+
 TEST_F(GCIdleTimeHandlerTest, AfterContextDisposeLargeIdleTime) {
   GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
   heap_state.contexts_disposed = 1;
+  heap_state.contexts_disposal_rate = 1.0;
   heap_state.incremental_marking_stopped = true;
+  heap_state.can_start_incremental_marking = false;
   size_t speed = heap_state.mark_compact_speed_in_bytes_per_ms;
-  int idle_time_ms =
-      static_cast<int>((heap_state.size_of_objects + speed - 1) / speed);
+  double idle_time_ms =
+      static_cast<double>((heap_state.size_of_objects + speed - 1) / speed);
   GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
   EXPECT_EQ(DO_FULL_GC, action.type);
 }
@@ -194,9 +233,9 @@ TEST_F(GCIdleTimeHandlerTest, AfterContextDisposeLargeIdleTime) {
 TEST_F(GCIdleTimeHandlerTest, AfterContextDisposeZeroIdleTime) {
   GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
   heap_state.contexts_disposed = 1;
+  heap_state.contexts_disposal_rate = 1.0;
   heap_state.incremental_marking_stopped = true;
-  heap_state.size_of_objects = GCIdleTimeHandler::kSmallHeapSize / 2;
-  int idle_time_ms = 0;
+  double idle_time_ms = 0;
   GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
   EXPECT_EQ(DO_FULL_GC, action.type);
 }
@@ -205,9 +244,11 @@ TEST_F(GCIdleTimeHandlerTest, AfterContextDisposeZeroIdleTime) {
 TEST_F(GCIdleTimeHandlerTest, AfterContextDisposeSmallIdleTime1) {
   GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
   heap_state.contexts_disposed = 1;
+  heap_state.contexts_disposal_rate = 1.0;
   heap_state.incremental_marking_stopped = true;
   size_t speed = heap_state.mark_compact_speed_in_bytes_per_ms;
-  int idle_time_ms = static_cast<int>(heap_state.size_of_objects / speed - 1);
+  double idle_time_ms =
+      static_cast<double>(heap_state.size_of_objects / speed - 1);
   GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
   EXPECT_EQ(DO_INCREMENTAL_MARKING, action.type);
 }
@@ -216,8 +257,10 @@ TEST_F(GCIdleTimeHandlerTest, AfterContextDisposeSmallIdleTime1) {
 TEST_F(GCIdleTimeHandlerTest, AfterContextDisposeSmallIdleTime2) {
   GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
   heap_state.contexts_disposed = 1;
+  heap_state.contexts_disposal_rate = 1.0;
   size_t speed = heap_state.mark_compact_speed_in_bytes_per_ms;
-  int idle_time_ms = static_cast<int>(heap_state.size_of_objects / speed - 1);
+  double idle_time_ms =
+      static_cast<double>(heap_state.size_of_objects / speed - 1);
   GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
   EXPECT_EQ(DO_INCREMENTAL_MARKING, action.type);
 }
@@ -226,7 +269,7 @@ TEST_F(GCIdleTimeHandlerTest, AfterContextDisposeSmallIdleTime2) {
 TEST_F(GCIdleTimeHandlerTest, IncrementalMarking1) {
   GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
   size_t speed = heap_state.incremental_marking_speed_in_bytes_per_ms;
-  int idle_time_ms = 10;
+  double idle_time_ms = 10;
   GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
   EXPECT_EQ(DO_INCREMENTAL_MARKING, action.type);
   EXPECT_GT(speed * static_cast<size_t>(idle_time_ms),
@@ -239,7 +282,7 @@ TEST_F(GCIdleTimeHandlerTest, IncrementalMarking2) {
   GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
   heap_state.incremental_marking_stopped = true;
   size_t speed = heap_state.incremental_marking_speed_in_bytes_per_ms;
-  int idle_time_ms = 10;
+  double idle_time_ms = 10;
   GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
   EXPECT_EQ(DO_INCREMENTAL_MARKING, action.type);
   EXPECT_GT(speed * static_cast<size_t>(idle_time_ms),
@@ -253,7 +296,8 @@ TEST_F(GCIdleTimeHandlerTest, NotEnoughTime) {
   heap_state.incremental_marking_stopped = true;
   heap_state.can_start_incremental_marking = false;
   size_t speed = heap_state.mark_compact_speed_in_bytes_per_ms;
-  int idle_time_ms = static_cast<int>(heap_state.size_of_objects / speed - 1);
+  double idle_time_ms =
+      static_cast<double>(heap_state.size_of_objects / speed - 1);
   GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
   EXPECT_EQ(DO_NOTHING, action.type);
 }
@@ -264,7 +308,8 @@ TEST_F(GCIdleTimeHandlerTest, StopEventually1) {
   heap_state.incremental_marking_stopped = true;
   heap_state.can_start_incremental_marking = false;
   size_t speed = heap_state.mark_compact_speed_in_bytes_per_ms;
-  int idle_time_ms = static_cast<int>(heap_state.size_of_objects / speed + 1);
+  double idle_time_ms =
+      static_cast<double>(heap_state.size_of_objects / speed + 1);
   for (int i = 0; i < GCIdleTimeHandler::kMaxMarkCompactsInIdleRound; i++) {
     GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
     EXPECT_EQ(DO_FULL_GC, action.type);
@@ -277,7 +322,7 @@ TEST_F(GCIdleTimeHandlerTest, StopEventually1) {
 
 TEST_F(GCIdleTimeHandlerTest, StopEventually2) {
   GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
-  int idle_time_ms = 10;
+  double idle_time_ms = 10;
   for (int i = 0; i < GCIdleTimeHandler::kMaxMarkCompactsInIdleRound; i++) {
     GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
     EXPECT_EQ(DO_INCREMENTAL_MARKING, action.type);
@@ -296,7 +341,8 @@ TEST_F(GCIdleTimeHandlerTest, ContinueAfterStop1) {
   heap_state.incremental_marking_stopped = true;
   heap_state.can_start_incremental_marking = false;
   size_t speed = heap_state.mark_compact_speed_in_bytes_per_ms;
-  int idle_time_ms = static_cast<int>(heap_state.size_of_objects / speed + 1);
+  double idle_time_ms =
+      static_cast<double>(heap_state.size_of_objects / speed + 1);
   for (int i = 0; i < GCIdleTimeHandler::kMaxMarkCompactsInIdleRound; i++) {
     GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
     EXPECT_EQ(DO_FULL_GC, action.type);
@@ -315,7 +361,7 @@ TEST_F(GCIdleTimeHandlerTest, ContinueAfterStop1) {
 
 TEST_F(GCIdleTimeHandlerTest, ContinueAfterStop2) {
   GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
-  int idle_time_ms = 10;
+  double idle_time_ms = 10;
   for (int i = 0; i < GCIdleTimeHandler::kMaxMarkCompactsInIdleRound; i++) {
     GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
     if (action.type == DONE) break;
@@ -343,7 +389,8 @@ TEST_F(GCIdleTimeHandlerTest, Scavenge) {
   heap_state.used_new_space_size =
       heap_state.new_space_capacity -
       (kNewSpaceAllocationThroughput * idle_time_ms);
-  GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+  GCIdleTimeAction action =
+      handler()->Compute(static_cast<double>(idle_time_ms), heap_state);
   EXPECT_EQ(DO_SCAVENGE, action.type);
 }
 
@@ -356,17 +403,18 @@ TEST_F(GCIdleTimeHandlerTest, ScavengeAndDone) {
   heap_state.used_new_space_size =
       heap_state.new_space_capacity -
       (kNewSpaceAllocationThroughput * idle_time_ms);
-  GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
+  GCIdleTimeAction action =
+      handler()->Compute(static_cast<double>(idle_time_ms), heap_state);
   EXPECT_EQ(DO_SCAVENGE, action.type);
   heap_state.used_new_space_size = 0;
-  action = handler()->Compute(idle_time_ms, heap_state);
+  action = handler()->Compute(static_cast<double>(idle_time_ms), heap_state);
   EXPECT_EQ(DO_NOTHING, action.type);
 }
 
 
 TEST_F(GCIdleTimeHandlerTest, ZeroIdleTimeNothingToDo) {
   GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
-  int idle_time_ms = 0;
+  double idle_time_ms = 0;
   GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
   EXPECT_EQ(DO_NOTHING, action.type);
 }
@@ -374,7 +422,7 @@ TEST_F(GCIdleTimeHandlerTest, ZeroIdleTimeNothingToDo) {
 
 TEST_F(GCIdleTimeHandlerTest, ZeroIdleTimeDoNothingButStartIdleRound) {
   GCIdleTimeHandler::HeapState heap_state = DefaultHeapState();
-  int idle_time_ms = 10;
+  double idle_time_ms = 10;
   for (int i = 0; i < GCIdleTimeHandler::kMaxMarkCompactsInIdleRound; i++) {
     GCIdleTimeAction action = handler()->Compute(idle_time_ms, heap_state);
     if (action.type == DONE) break;
index b228499..31d724a 100644 (file)
@@ -98,6 +98,11 @@ TestWithIsolate::~TestWithIsolate() {}
 Factory* TestWithIsolate::factory() const { return isolate()->factory(); }
 
 
+base::RandomNumberGenerator* TestWithIsolate::random_number_generator() const {
+  return isolate()->random_number_generator();
+}
+
+
 TestWithZone::~TestWithZone() {}
 
 }  // namespace internal
index 2025b8a..511e357 100644 (file)
@@ -83,6 +83,7 @@ class TestWithIsolate : public virtual ::v8::TestWithIsolate {
   Isolate* isolate() const {
     return reinterpret_cast<Isolate*>(::v8::TestWithIsolate::isolate());
   }
+  base::RandomNumberGenerator* random_number_generator() const;
 
  private:
   DISALLOW_COPY_AND_ASSIGN(TestWithIsolate);
index a881e46..2ead44f 100644 (file)
@@ -28,6 +28,7 @@
         'base/division-by-constant-unittest.cc',
         'base/flags-unittest.cc',
         'base/functional-unittest.cc',
+        'base/iterator-unittest.cc',
         'base/platform/condition-variable-unittest.cc',
         'base/platform/mutex-unittest.cc',
         'base/platform/platform-unittest.cc',
         'base/utils/random-number-generator-unittest.cc',
         'char-predicates-unittest.cc',
         'compiler/change-lowering-unittest.cc',
+        'compiler/common-operator-reducer-unittest.cc',
         'compiler/common-operator-unittest.cc',
         'compiler/compiler-test-utils.h',
+        'compiler/control-equivalence-unittest.cc',
         'compiler/diamond-unittest.cc',
         'compiler/graph-reducer-unittest.cc',
         'compiler/graph-unittest.cc',
         'compiler/graph-unittest.h',
         'compiler/instruction-selector-unittest.cc',
         'compiler/instruction-selector-unittest.h',
+        'compiler/instruction-sequence-unittest.cc',
+        'compiler/instruction-sequence-unittest.h',
         'compiler/js-builtin-reducer-unittest.cc',
         'compiler/js-operator-unittest.cc',
         'compiler/js-typed-lowering-unittest.cc',
+        'compiler/load-elimination-unittest.cc',
         'compiler/machine-operator-reducer-unittest.cc',
         'compiler/machine-operator-unittest.cc',
+        'compiler/move-optimizer-unittest.cc',
+        'compiler/node-matchers-unittest.cc',
         'compiler/node-test-utils.cc',
         'compiler/node-test-utils.h',
         'compiler/register-allocator-unittest.cc',
             'compiler/mips/instruction-selector-mips-unittest.cc',
           ],
         }],
+        ['v8_target_arch=="mips64el"', {
+          'sources': [  ### gcmole(arch:mips64el) ###
+            'compiler/mips64/instruction-selector-mips64-unittest.cc',
+          ],
+        }],
         ['v8_target_arch=="x64"', {
           'sources': [  ### gcmole(arch:x64) ###
             'compiler/x64/instruction-selector-x64-unittest.cc',
           # compiler-unittests can't be built against a shared library, so we
           # need to depend on the underlying static target in that case.
           'conditions': [
-            ['v8_use_snapshot=="true"', {
+            ['v8_use_snapshot=="true" and v8_use_external_startup_data==0', {
               'dependencies': ['../../tools/gyp/v8.gyp:v8_snapshot'],
-            },
-            {
-              'dependencies': [
-                '../../tools/gyp/v8.gyp:v8_nosnapshot',
-              ],
+            }],
+            ['v8_use_snapshot=="true" and v8_use_external_startup_data==1', {
+              'dependencies': ['../../tools/gyp/v8.gyp:v8_external_snapshot'],
+            }],
+            ['v8_use_snapshot!="true"', {
+              'dependencies': ['../../tools/gyp/v8.gyp:v8_nosnapshot'],
             }],
           ],
         }, {
index 030d7f9..c96c836 100644 (file)
@@ -1,25 +1,6 @@
-# Copyright 2013 the V8 project authors. All rights reserved.
-# Copyright (C) 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-# 1.  Redistributions of source code must retain the above copyright
-#     notice, this list of conditions and the following disclaimer.
-# 2.  Redistributions in binary form must reproduce the above copyright
-#     notice, this list of conditions and the following disclaimer in the
-#     documentation and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY
-# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-# DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY
-# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
-# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
-# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
-# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
 
 Test to ensure correct behaviour of Object.getOwnPropertyNames
 
@@ -66,11 +47,11 @@ PASS getSortedOwnPropertyNames(encodeURIComponent) is ['arguments', 'caller', 'l
 PASS getSortedOwnPropertyNames(Object) is ['arguments', 'caller', 'create', 'defineProperties', 'defineProperty', 'deliverChangeRecords', 'freeze', 'getNotifier', 'getOwnPropertyDescriptor', 'getOwnPropertyNames', 'getOwnPropertySymbols', 'getPrototypeOf', 'is', 'isExtensible', 'isFrozen', 'isSealed', 'keys', 'length', 'name', 'observe', 'preventExtensions', 'prototype', 'seal', 'setPrototypeOf', 'unobserve']
 PASS getSortedOwnPropertyNames(Object.prototype) is ['__defineGetter__', '__defineSetter__', '__lookupGetter__', '__lookupSetter__', '__proto__', 'constructor', 'hasOwnProperty', 'isPrototypeOf', 'propertyIsEnumerable', 'toLocaleString', 'toString', 'valueOf']
 PASS getSortedOwnPropertyNames(Function) is ['arguments', 'caller', 'length', 'name', 'prototype']
-PASS getSortedOwnPropertyNames(Function.prototype) is ['apply', 'arguments', 'bind', 'call', 'caller', 'constructor', 'length', 'name', 'toString']
+PASS getSortedOwnPropertyNames(Function.prototype) is ['apply', 'arguments', 'bind', 'call', 'caller', 'constructor', 'length', 'name', 'toMethod', 'toString']
 PASS getSortedOwnPropertyNames(Array) is ['arguments', 'caller', 'isArray', 'length', 'name', 'observe', 'prototype', 'unobserve']
 PASS getSortedOwnPropertyNames(Array.prototype) is ['concat', 'constructor', 'entries', 'every', 'filter', 'forEach', 'indexOf', 'join', 'keys', 'lastIndexOf', 'length', 'map', 'pop', 'push', 'reduce', 'reduceRight', 'reverse', 'shift', 'slice', 'some', 'sort', 'splice', 'toLocaleString', 'toString', 'unshift']
-PASS getSortedOwnPropertyNames(String) is ['arguments', 'caller', 'fromCharCode', 'length', 'name', 'prototype']
-PASS getSortedOwnPropertyNames(String.prototype) is ['anchor', 'big', 'blink', 'bold', 'charAt', 'charCodeAt', 'concat', 'constructor', 'fixed', 'fontcolor', 'fontsize', 'indexOf', 'italics', 'lastIndexOf', 'length', 'link', 'localeCompare', 'match', 'normalize', 'replace', 'search', 'slice', 'small', 'split', 'strike', 'sub', 'substr', 'substring', 'sup', 'toLocaleLowerCase', 'toLocaleUpperCase', 'toLowerCase', 'toString', 'toUpperCase', 'trim', 'trimLeft', 'trimRight', 'valueOf']
+PASS getSortedOwnPropertyNames(String) is ['arguments', 'caller', 'fromCharCode', 'fromCodePoint', 'length', 'name', 'prototype', 'raw']
+PASS getSortedOwnPropertyNames(String.prototype) is ['anchor', 'big', 'blink', 'bold', 'charAt', 'charCodeAt', 'codePointAt', 'concat', 'constructor', 'endsWith', 'fixed', 'fontcolor', 'fontsize', 'includes', 'indexOf', 'italics', 'lastIndexOf', 'length', 'link', 'localeCompare', 'match', 'normalize', 'repeat', 'replace', 'search', 'slice', 'small', 'split', 'startsWith', 'strike', 'sub', 'substr', 'substring', 'sup', 'toLocaleLowerCase', 'toLocaleUpperCase', 'toLowerCase', 'toString', 'toUpperCase', 'trim', 'trimLeft', 'trimRight', 'valueOf']
 PASS getSortedOwnPropertyNames(Boolean) is ['arguments', 'caller', 'length', 'name', 'prototype']
 PASS getSortedOwnPropertyNames(Boolean.prototype) is ['constructor', 'toString', 'valueOf']
 PASS getSortedOwnPropertyNames(Number) is ['EPSILON', 'MAX_SAFE_INTEGER', 'MAX_VALUE', 'MIN_SAFE_INTEGER', 'MIN_VALUE', 'NEGATIVE_INFINITY', 'NaN', 'POSITIVE_INFINITY', 'arguments', 'caller', 'isFinite', 'isInteger', 'isNaN', 'isSafeInteger', 'length', 'name', 'parseFloat', 'parseInt', 'prototype']
index caa0111..46d7966 100644 (file)
@@ -74,11 +74,11 @@ var expectedPropertyNamesSet = {
     "Object": "['arguments', 'caller', 'create', 'defineProperties', 'defineProperty', 'deliverChangeRecords', 'freeze', 'getNotifier', 'getOwnPropertyDescriptor', 'getOwnPropertyNames', 'getOwnPropertySymbols', 'getPrototypeOf', 'is', 'isExtensible', 'isFrozen', 'isSealed', 'keys', 'length', 'name', 'observe', 'preventExtensions', 'prototype', 'seal', 'setPrototypeOf', 'unobserve']",
     "Object.prototype": "['__defineGetter__', '__defineSetter__', '__lookupGetter__', '__lookupSetter__', '__proto__', 'constructor', 'hasOwnProperty', 'isPrototypeOf', 'propertyIsEnumerable', 'toLocaleString', 'toString', 'valueOf']",
     "Function": "['arguments', 'caller', 'length', 'name', 'prototype']",
-    "Function.prototype": "['apply', 'arguments', 'bind', 'call', 'caller', 'constructor', 'length', 'name', 'toString']",
+    "Function.prototype": "['apply', 'arguments', 'bind', 'call', 'caller', 'constructor', 'length', 'name', 'toMethod', 'toString']",
     "Array": "['arguments', 'caller', 'isArray', 'length', 'name', 'observe', 'prototype', 'unobserve']",
     "Array.prototype": "['concat', 'constructor', 'entries', 'every', 'filter', 'forEach', 'indexOf', 'join', 'keys', 'lastIndexOf', 'length', 'map', 'pop', 'push', 'reduce', 'reduceRight', 'reverse', 'shift', 'slice', 'some', 'sort', 'splice', 'toLocaleString', 'toString', 'unshift']",
-    "String": "['arguments', 'caller', 'fromCharCode', 'length', 'name', 'prototype']",
-    "String.prototype": "['anchor', 'big', 'blink', 'bold', 'charAt', 'charCodeAt', 'concat', 'constructor', 'fixed', 'fontcolor', 'fontsize', 'indexOf', 'italics', 'lastIndexOf', 'length', 'link', 'localeCompare', 'match', 'normalize', 'replace', 'search', 'slice', 'small', 'split', 'strike', 'sub', 'substr', 'substring', 'sup', 'toLocaleLowerCase', 'toLocaleUpperCase', 'toLowerCase', 'toString', 'toUpperCase', 'trim', 'trimLeft', 'trimRight', 'valueOf']",
+    "String": "['arguments', 'caller', 'fromCharCode', 'fromCodePoint', 'length', 'name', 'prototype', 'raw']",
+    "String.prototype": "['anchor', 'big', 'blink', 'bold', 'charAt', 'charCodeAt', 'codePointAt', 'concat', 'constructor', 'endsWith', 'fixed', 'fontcolor', 'fontsize', 'includes', 'indexOf', 'italics', 'lastIndexOf', 'length', 'link', 'localeCompare', 'match', 'normalize', 'repeat', 'replace', 'search', 'slice', 'small', 'split', 'startsWith', 'strike', 'sub', 'substr', 'substring', 'sup', 'toLocaleLowerCase', 'toLocaleUpperCase', 'toLowerCase', 'toString', 'toUpperCase', 'trim', 'trimLeft', 'trimRight', 'valueOf']",
     "Boolean": "['arguments', 'caller', 'length', 'name', 'prototype']",
     "Boolean.prototype": "['constructor', 'toString', 'valueOf']",
     "Number": "['EPSILON', 'MAX_SAFE_INTEGER', 'MAX_VALUE', 'MIN_SAFE_INTEGER', 'MIN_VALUE', 'NEGATIVE_INFINITY', 'NaN', 'POSITIVE_INFINITY', 'arguments', 'caller', 'isFinite', 'isInteger', 'isNaN', 'isSafeInteger', 'length', 'name', 'parseFloat', 'parseInt', 'prototype']",
index 45f71bf..0e6228e 100644 (file)
@@ -129,7 +129,7 @@ PASS (function (){ 'use strict'; delete someDeclaredGlobal;}) threw exception Sy
 PASS (function(){(function (){ 'use strict'; delete someDeclaredGlobal;})}) threw exception SyntaxError: Delete of an unqualified identifier in strict mode..
 PASS 'use strict'; if (0) { someGlobal = 'Shouldn\'t be able to assign this.'; }; true; is true
 PASS 'use strict'; someGlobal = 'Shouldn\'t be able to assign this.';  threw exception ReferenceError: someGlobal is not defined.
-FAIL 'use strict'; (function f(){ f = 'shouldn\'t be able to assign to function expression name'; })() should throw an exception. Was undefined.
+PASS 'use strict'; (function f(){ f = 'shouldn\'t be able to assign to function expression name'; })() threw exception TypeError: Assignment to constant variable..
 PASS 'use strict'; eval('var introducedVariable = "FAIL: variable introduced into containing scope";'); introducedVariable threw exception ReferenceError: introducedVariable is not defined.
 PASS 'use strict'; objectWithReadonlyProperty.prop = 'fail' threw exception TypeError: Cannot assign to read only property 'prop' of #<Object>.
 PASS 'use strict'; delete objectWithReadonlyProperty.prop threw exception TypeError: Cannot delete property 'prop' of #<Object>.
index a6eefd9..1024072 100644 (file)
@@ -26,23 +26,23 @@ This page tests toString conversion of RegExp objects, particularly wrt to '/' c
 On success, you will see a series of "PASS" messages, followed by "TEST COMPLETE".
 
 
-FAIL RegExp('/').source should be \/. Was /.
+PASS RegExp('/').source is "\\/"
 PASS RegExp('').source is "(?:)"
 PASS RegExp.prototype.source is "(?:)"
-FAIL RegExp('/').toString() should be /\//. Was ///.
+PASS RegExp('/').toString() is "/\\//"
 PASS RegExp('').toString() is "/(?:)/"
 PASS RegExp.prototype.toString() is "/(?:)/"
-FAIL testForwardSlash("^/$", "/"); should be true. Threw exception SyntaxError: Unexpected end of input
-FAIL testForwardSlash("^/$", "/"); should be true. Threw exception SyntaxError: Unexpected end of input
-FAIL testForwardSlash("^\/$", "/"); should be true. Threw exception SyntaxError: Unexpected end of input
+PASS testForwardSlash("^/$", "/"); is true
+PASS testForwardSlash("^/$", "/"); is true
+PASS testForwardSlash("^\/$", "/"); is true
 PASS testForwardSlash("^\\/$", "\/"); is true
 PASS testForwardSlash("^\\\/$", "\/"); is true
 FAIL testForwardSlash("^\\\\/$", "\\/"); should be true. Threw exception SyntaxError: Unexpected end of input
 FAIL testForwardSlash("^\\\\\/$", "\\/"); should be true. Threw exception SyntaxError: Unexpected end of input
-FAIL testForwardSlash("x/x/x", "x\/x\/x"); should be true. Threw exception SyntaxError: Unexpected end of input
-FAIL testForwardSlash("x\/x/x", "x\/x\/x"); should be true. Threw exception SyntaxError: Unexpected end of input
-FAIL testForwardSlash("x/x\/x", "x\/x\/x"); should be true. Threw exception SyntaxError: Unexpected end of input
-FAIL testForwardSlash("x\/x\/x", "x\/x\/x"); should be true. Threw exception SyntaxError: Unexpected end of input
+PASS testForwardSlash("x/x/x", "x\/x\/x"); is true
+PASS testForwardSlash("x\/x/x", "x\/x\/x"); is true
+PASS testForwardSlash("x/x\/x", "x\/x\/x"); is true
+PASS testForwardSlash("x\/x\/x", "x\/x\/x"); is true
 FAIL testLineTerminator("\n"); should be false. Was true.
 PASS testLineTerminator("\\n"); is false
 FAIL testLineTerminator("\r"); should be false. Was true.
@@ -51,8 +51,8 @@ FAIL testLineTerminator("\u2028"); should be false. Was true.
 PASS testLineTerminator("\\u2028"); is false
 FAIL testLineTerminator("\u2029"); should be false. Was true.
 PASS testLineTerminator("\\u2029"); is false
-PASS RegExp('[/]').source is '[/]'
-FAIL RegExp('\\[/]').source should be \[\/]. Was \[/].
+FAIL RegExp('[/]').source should be [/]. Was [\/].
+PASS RegExp('\\[/]').source is '\\[\\/]'
 PASS var o = new RegExp(); o.toString() === '/'+o.source+'/' && eval(o.toString()+'.exec(String())') is [""]
 PASS successfullyParsed is true
 
index e4e3f8f..aa81964 100644 (file)
@@ -109,7 +109,11 @@ class WebkitTestSuite(testsuite.TestSuite):
             string.startswith("tools/nacl-run.py") or
             string.find("BYPASSING ALL ACL CHECKS") > 0 or
             string.find("Native Client module will be loaded") > 0 or
-            string.find("NaClHostDescOpen:") > 0)
+            string.find("NaClHostDescOpen:") > 0 or
+            # FIXME(machenbach): The test driver shouldn't try to use slow
+            # asserts if they weren't compiled. This fails in optdebug=2.
+            string == "Warning: unknown flag --enable-slow-asserts." or
+            string == "Try --help for options")
 
   def IsFailureOutput(self, output, testpath):
     if super(WebkitTestSuite, self).IsFailureOutput(output, testpath):
index 89a5cab..c33f1b9 100644 (file)
 ['arch == arm64 and simulator_run == True', {
   'dfg-int-overflow-in-loop': [SKIP],
 }],  # 'arch == arm64 and simulator_run == True'
+['dcheck_always_on == True and arch == arm64', {
+  # Doesn't work with gcc 4.6 on arm64 for some reason.
+  'reentrant-caching': [SKIP],
+}],  # 'dcheck_always_on == True and arch == arm64'
 
 
 ##############################################################################
@@ -66,8 +70,8 @@
 }],  # 'gc_stress == True'
 
 ['gc_stress == True and mode == debug', {
-  # Skip tests that timout with turbofan.
-  'array-iterate-backwards': [PASS, NO_VARIANTS]
+  # Skip tests that timeout.
+  'array-iterate-backwards': [SKIP]
 }],  # 'gc_stress == True and mode == debug'
 
 ##############################################################################
index 460e92d..4acb1cc 100755 (executable)
@@ -88,6 +88,7 @@ function sync_dir {
 echo -n "sync to $ANDROID_V8/$OUTDIR/$ARCH_MODE"
 sync_file "$OUTDIR/$ARCH_MODE/cctest"
 sync_file "$OUTDIR/$ARCH_MODE/d8"
+sync_file "$OUTDIR/$ARCH_MODE/unittests"
 echo ""
 echo -n "sync to $ANDROID_V8/tools"
 sync_file tools/consarray.js
diff --git a/deps/v8/tools/find-commit-for-patch.py b/deps/v8/tools/find-commit-for-patch.py
new file mode 100755 (executable)
index 0000000..657826c
--- /dev/null
@@ -0,0 +1,93 @@
+#!/usr/bin/env python
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import argparse
+import subprocess
+import sys
+
+
+def GetArgs():
+  parser = argparse.ArgumentParser(
+      description="Finds a commit that a given patch can be applied to. "
+                  "Does not actually apply the patch or modify your checkout "
+                  "in any way.")
+  parser.add_argument("patch_file", help="Patch file to match")
+  parser.add_argument(
+      "--branch", "-b", default="origin/master", type=str,
+      help="Git tree-ish where to start searching for commits, "
+           "default: %(default)s")
+  parser.add_argument(
+      "--limit", "-l", default=500, type=int,
+      help="Maximum number of commits to search, default: %(default)s")
+  parser.add_argument(
+      "--verbose", "-v", default=False, action="store_true",
+      help="Print verbose output for your entertainment")
+  return parser.parse_args()
+
+
+def FindFilesInPatch(patch_file):
+  files = {}
+  next_file = ""
+  with open(patch_file) as patch:
+    for line in patch:
+      if line.startswith("diff --git "):
+        # diff --git a/src/objects.cc b/src/objects.cc
+        words = line.split()
+        assert words[2].startswith("a/") and len(words[2]) > 2
+        next_file = words[2][2:]
+      elif line.startswith("index "):
+        # index add3e61..d1bbf6a 100644
+        hashes = line.split()[1]
+        old_hash = hashes.split("..")[0]
+        if old_hash.startswith("0000000"): continue  # Ignore new files.
+        files[next_file] = old_hash
+  return files
+
+
+def GetGitCommitHash(treeish):
+  cmd = ["git", "log", "-1", "--format=%H", treeish]
+  return subprocess.check_output(cmd).strip()
+
+
+def CountMatchingFiles(commit, files):
+  matched_files = 0
+  # Calling out to git once and parsing the result Python-side is faster
+  # than calling 'git ls-tree' for every file.
+  cmd = ["git", "ls-tree", "-r", commit] + [f for f in files]
+  output = subprocess.check_output(cmd)
+  for line in output.splitlines():
+    # 100644 blob c6d5daaa7d42e49a653f9861224aad0a0244b944      src/objects.cc
+    _, _, actual_hash, filename = line.split()
+    expected_hash = files[filename]
+    if actual_hash.startswith(expected_hash): matched_files += 1
+  return matched_files
+
+
+def FindFirstMatchingCommit(start, files, limit, verbose):
+  commit = GetGitCommitHash(start)
+  num_files = len(files)
+  if verbose: print(">>> Found %d files modified by patch." % num_files)
+  for _ in range(limit):
+    matched_files = CountMatchingFiles(commit, files)
+    if verbose: print("Commit %s matched %d files" % (commit, matched_files))
+    if matched_files == num_files:
+      return commit
+    commit = GetGitCommitHash("%s^" % commit)
+  print("Sorry, no matching commit found. "
+        "Try running 'git fetch', specifying the correct --branch, "
+        "and/or setting a higher --limit.")
+  sys.exit(1)
+
+
+if __name__ == "__main__":
+  args = GetArgs()
+  files = FindFilesInPatch(args.patch_file)
+  commit = FindFirstMatchingCommit(args.branch, files, args.limit, args.verbose)
+  if args.verbose:
+    print(">>> Matching commit: %s" % commit)
+    print(subprocess.check_output(["git", "log", "-1", commit]))
+    print(">>> Kthxbai.")
+  else:
+    print(commit)
diff --git a/deps/v8/tools/find_depot_tools.py b/deps/v8/tools/find_depot_tools.py
new file mode 100644 (file)
index 0000000..95ae9e8
--- /dev/null
@@ -0,0 +1,40 @@
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+"""Small utility function to find depot_tools and add it to the python path.
+"""
+
+import os
+import sys
+
+
+def directory_really_is_depot_tools(directory):
+  return os.path.isfile(os.path.join(directory, 'gclient.py'))
+
+
+def add_depot_tools_to_path():
+  """Search for depot_tools and add it to sys.path."""
+  # First look if depot_tools is already in PYTHONPATH.
+  for i in sys.path:
+    if i.rstrip(os.sep).endswith('depot_tools'):
+      if directory_really_is_depot_tools(i):
+        return i
+
+  # Then look if depot_tools is in PATH, common case.
+  for i in os.environ['PATH'].split(os.pathsep):
+    if i.rstrip(os.sep).endswith('depot_tools'):
+      if directory_really_is_depot_tools(i):
+        sys.path.insert(0, i.rstrip(os.sep))
+        return i
+  # Rare case, it's not even in PATH, look upward up to root.
+  root_dir = os.path.dirname(os.path.abspath(__file__))
+  previous_dir = os.path.abspath(__file__)
+  while root_dir and root_dir != previous_dir:
+    if directory_really_is_depot_tools(os.path.join(root_dir, 'depot_tools')):
+      i = os.path.join(root_dir, 'depot_tools')
+      sys.path.insert(0, i)
+      return i
+    previous_dir = root_dir
+    root_dir = os.path.dirname(root_dir)
+  print >> sys.stderr, 'Failed to find depot_tools'
+  return None
index 62e103a..04a1ea8 100644 (file)
@@ -70,6 +70,8 @@ consts_misc = [
     { 'name': 'ExternalStringTag',      'value': 'kExternalStringTag' },
     { 'name': 'SlicedStringTag',        'value': 'kSlicedStringTag' },
 
+    { 'name': 'FailureTag',             'value': 'kFailureTag' },
+    { 'name': 'FailureTagMask',         'value': 'kFailureTagMask' },
     { 'name': 'HeapObjectTag',          'value': 'kHeapObjectTag' },
     { 'name': 'HeapObjectTagMask',      'value': 'kHeapObjectTagMask' },
     { 'name': 'SmiTag',                 'value': 'kSmiTag' },
@@ -92,6 +94,8 @@ consts_misc = [
         'value': 'DescriptorArray::kFirstIndex' },
     { 'name': 'prop_type_field',
         'value': 'FIELD' },
+    { 'name': 'prop_type_first_phantom',
+        'value': 'TRANSITION' },
     { 'name': 'prop_type_mask',
         'value': 'PropertyDetails::TypeField::kMask' },
     { 'name': 'prop_index_mask',
@@ -116,9 +120,9 @@ consts_misc = [
         'value': 'DICTIONARY_ELEMENTS' },
 
     { 'name': 'bit_field2_elements_kind_mask',
-        'value': 'Map::ElementsKindBits::kMask' },
+       'value': 'Map::kElementsKindMask' },
     { 'name': 'bit_field2_elements_kind_shift',
-        'value': 'Map::ElementsKindBits::kShift' },
+       'value': 'Map::kElementsKindShift' },
     { 'name': 'bit_field3_dictionary_map_shift',
         'value': 'Map::DictionaryMap::kShift' },
 
@@ -192,9 +196,9 @@ header = '''
  * This file is generated by %s.  Do not edit directly.
  */
 
-#include "src/v8.h"
-#include "src/frames.h"
-#include "src/frames-inl.h" /* for architecture-specific frame constants */
+#include "v8.h"
+#include "frames.h"
+#include "frames-inl.h" /* for architecture-specific frame constants */
 
 using namespace v8::internal;
 
index b675e18..696434d 100644 (file)
         '<(SHARED_INTERMEDIATE_DIR)/libraries.cc',
         '<(SHARED_INTERMEDIATE_DIR)/experimental-libraries.cc',
         '<(INTERMEDIATE_DIR)/snapshot.cc',
-        '../../src/snapshot-common.cc',
       ],
       'actions': [
         {
       'sources': [
         '<(SHARED_INTERMEDIATE_DIR)/libraries.cc',
         '<(SHARED_INTERMEDIATE_DIR)/experimental-libraries.cc',
-        '../../src/snapshot-common.cc',
         '../../src/snapshot-empty.cc',
       ],
       'conditions': [
               'inputs': [
                 '<(PRODUCT_DIR)/<(EXECUTABLE_PREFIX)mksnapshot<(EXECUTABLE_SUFFIX)',
               ],
+              'variables': {
+                'mksnapshot_flags': [
+                  '--log-snapshot-positions',
+                  '--logfile', '<(INTERMEDIATE_DIR)/snapshot.log',
+                ],
+                'conditions': [
+                  ['v8_random_seed!=0', {
+                    'mksnapshot_flags': ['--random-seed', '<(v8_random_seed)'],
+                  }],
+                ],
+              },
               'conditions': [
                 ['want_separate_host_toolset==1', {
                   'target_conditions': [
                         '<(INTERMEDIATE_DIR)/snapshot.cc',
                         '<(PRODUCT_DIR)/snapshot_blob_host.bin',
                       ],
+                      'action': [
+                        '<@(_inputs)',
+                        '<@(mksnapshot_flags)',
+                        '<@(INTERMEDIATE_DIR)/snapshot.cc',
+                        '--startup_blob', '<(PRODUCT_DIR)/snapshot_blob_host.bin',
+                      ],
                     }, {
                       'outputs': [
                         '<(INTERMEDIATE_DIR)/snapshot.cc',
                         '<(PRODUCT_DIR)/snapshot_blob.bin',
                       ],
+                      'action': [
+                        '<@(_inputs)',
+                        '<@(mksnapshot_flags)',
+                        '<@(INTERMEDIATE_DIR)/snapshot.cc',
+                        '--startup_blob', '<(PRODUCT_DIR)/snapshot_blob.bin',
+                      ],
                     }],
                   ],
                 }, {
                     '<(INTERMEDIATE_DIR)/snapshot.cc',
                     '<(PRODUCT_DIR)/snapshot_blob.bin',
                   ],
+                  'action': [
+                    '<@(_inputs)',
+                    '<@(mksnapshot_flags)',
+                    '<@(INTERMEDIATE_DIR)/snapshot.cc',
+                    '--startup_blob', '<(PRODUCT_DIR)/snapshot_blob.bin',
+                  ],
                 }],
               ],
-              'variables': {
-                'mksnapshot_flags': [
-                  '--log-snapshot-positions',
-                  '--logfile', '<(INTERMEDIATE_DIR)/snapshot.log',
-                ],
-                'conditions': [
-                  ['v8_random_seed!=0', {
-                    'mksnapshot_flags': ['--random-seed', '<(v8_random_seed)'],
-                  }],
-                ],
-              },
-              'action': [
-                '<@(_inputs)',
-                '<@(mksnapshot_flags)',
-                '<@(INTERMEDIATE_DIR)/snapshot.cc',
-                '--startup_blob', '<(PRODUCT_DIR)/snapshot_blob.bin',
-              ],
             },
           ],
         }],
         '../../src/assembler.h',
         '../../src/assert-scope.h',
         '../../src/assert-scope.cc',
+        '../../src/ast-this-access-visitor.cc',
+        '../../src/ast-this-access-visitor.h',
         '../../src/ast-value-factory.cc',
         '../../src/ast-value-factory.h',
         '../../src/ast-numbering.cc',
         '../../src/compiler/code-generator-impl.h',
         '../../src/compiler/code-generator.cc',
         '../../src/compiler/code-generator.h',
+        '../../src/compiler/common-node-cache.cc',
         '../../src/compiler/common-node-cache.h',
+        '../../src/compiler/common-operator-reducer.cc',
+        '../../src/compiler/common-operator-reducer.h',
         '../../src/compiler/common-operator.cc',
         '../../src/compiler/common-operator.h',
         '../../src/compiler/control-builders.cc',
         '../../src/compiler/control-builders.h',
+        '../../src/compiler/control-equivalence.h',
         '../../src/compiler/control-reducer.cc',
         '../../src/compiler/control-reducer.h',
         '../../src/compiler/diamond.h',
         '../../src/compiler/frame.h',
         '../../src/compiler/gap-resolver.cc',
         '../../src/compiler/gap-resolver.h',
-        '../../src/compiler/generic-algorithm-inl.h',
         '../../src/compiler/generic-algorithm.h',
-        '../../src/compiler/generic-graph.h',
-        '../../src/compiler/generic-node-inl.h',
-        '../../src/compiler/generic-node.h',
         '../../src/compiler/graph-builder.cc',
         '../../src/compiler/graph-builder.h',
         '../../src/compiler/graph-inl.h',
         '../../src/compiler/js-operator.h',
         '../../src/compiler/js-typed-lowering.cc',
         '../../src/compiler/js-typed-lowering.h',
+        '../../src/compiler/jump-threading.cc',
+        '../../src/compiler/jump-threading.h',
         '../../src/compiler/linkage-impl.h',
         '../../src/compiler/linkage.cc',
         '../../src/compiler/linkage.h',
+        '../../src/compiler/load-elimination.cc',
+        '../../src/compiler/load-elimination.h',
+        '../../src/compiler/loop-analysis.cc',
+        '../../src/compiler/loop-analysis.h',
         '../../src/compiler/machine-operator-reducer.cc',
         '../../src/compiler/machine-operator-reducer.h',
         '../../src/compiler/machine-operator.cc',
         '../../src/compiler/machine-operator.h',
         '../../src/compiler/machine-type.cc',
         '../../src/compiler/machine-type.h',
+        '../../src/compiler/move-optimizer.cc',
+        '../../src/compiler/move-optimizer.h',
         '../../src/compiler/node-aux-data-inl.h',
         '../../src/compiler/node-aux-data.h',
         '../../src/compiler/node-cache.cc',
         '../../src/compiler/node-properties.h',
         '../../src/compiler/node.cc',
         '../../src/compiler/node.h',
+        '../../src/compiler/opcodes.cc',
         '../../src/compiler/opcodes.h',
-        '../../src/compiler/operator-properties-inl.h',
+        '../../src/compiler/operator-properties.cc',
         '../../src/compiler/operator-properties.h',
         '../../src/compiler/operator.cc',
         '../../src/compiler/operator.h',
-        '../../src/compiler/phi-reducer.h',
         '../../src/compiler/pipeline.cc',
         '../../src/compiler/pipeline.h',
         '../../src/compiler/pipeline-statistics.cc',
         '../../src/compiler/raw-machine-assembler.h',
         '../../src/compiler/register-allocator.cc',
         '../../src/compiler/register-allocator.h',
+        '../../src/compiler/register-allocator-verifier.cc',
+        '../../src/compiler/register-allocator-verifier.h',
         '../../src/compiler/register-configuration.cc',
         '../../src/compiler/register-configuration.h',
         '../../src/compiler/representation-change.h',
         '../../src/jsregexp-inl.h',
         '../../src/jsregexp.cc',
         '../../src/jsregexp.h',
+       '../../src/layout-descriptor-inl.h',
+       '../../src/layout-descriptor.cc',
+       '../../src/layout-descriptor.h',
         '../../src/list-inl.h',
         '../../src/list.h',
         '../../src/lithium-allocator-inl.h',
         '../../src/runtime/runtime-utils.h',
         '../../src/runtime/runtime.cc',
         '../../src/runtime/runtime.h',
-        '../../src/runtime/string-builder.h',
         '../../src/safepoint-table.cc',
         '../../src/safepoint-table.h',
         '../../src/sampler.cc',
         '../../src/small-pointer-list.h',
         '../../src/smart-pointers.h',
         '../../src/snapshot.h',
+        '../../src/snapshot-common.cc',
         '../../src/snapshot-source-sink.cc',
         '../../src/snapshot-source-sink.h',
+        '../../src/string-builder.cc',
+        '../../src/string-builder.h',
         '../../src/string-search.cc',
         '../../src/string-search.h',
         '../../src/string-stream.cc',
             '../../src/mips64/regexp-macro-assembler-mips64.cc',
             '../../src/mips64/regexp-macro-assembler-mips64.h',
             '../../src/mips64/simulator-mips64.cc',
+            '../../src/compiler/mips64/code-generator-mips64.cc',
+            '../../src/compiler/mips64/instruction-codes-mips64.h',
+            '../../src/compiler/mips64/instruction-selector-mips64.cc',
+            '../../src/compiler/mips64/linkage-mips64.cc',
             '../../src/ic/mips64/access-compiler-mips64.cc',
             '../../src/ic/mips64/handler-compiler-mips64.cc',
             '../../src/ic/mips64/ic-mips64.cc',
             '../../src/compiler/x64/linkage-x64.cc',
           ],
         }],
-        ['OS=="linux"', {
-            'link_settings': {
-              'conditions': [
-                ['v8_compress_startup_data=="bz2"', {
-                  'libraries': [
-                    '-lbz2',
-                  ]
-                }],
-              ],
-            },
-          }
-        ],
         ['OS=="win"', {
           'variables': {
             'gyp_generators': '<!(echo $GYP_GENERATORS)',
         '../../src/base/flags.h',
         '../../src/base/functional.cc',
         '../../src/base/functional.h',
+        '../../src/base/iterator.h',
         '../../src/base/lazy-instance.h',
         '../../src/base/logging.cc',
         '../../src/base/logging.h',
         ['OS=="solaris"', {
             'link_settings': {
               'libraries': [
-                '-lnsl',
+                '-lnsl -lrt',
             ]},
             'sources': [
               '../../src/base/platform/platform-solaris.cc',
           '../../src/generator.js',
           '../../src/harmony-string.js',
           '../../src/harmony-array.js',
+          '../../src/harmony-array-includes.js',
           '../../src/harmony-tostring.js',
           '../../src/harmony-typedarray.js',
           '../../src/harmony-classes.js',
+          '../../src/harmony-templates.js',
+          '../../src/harmony-regexp.js'
         ],
         'libraries_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries.bin',
         'libraries_experimental_bin_file': '<(SHARED_INTERMEDIATE_DIR)/libraries-experimental.bin',
             '../../tools/js2c.py',
             '<(SHARED_INTERMEDIATE_DIR)/libraries.cc',
             'CORE',
-            '<(v8_compress_startup_data)',
             '<@(library_files)',
             '<@(i18n_library_files)',
           ],
             '../../tools/js2c.py',
             '<(SHARED_INTERMEDIATE_DIR)/experimental-libraries.cc',
             'EXPERIMENTAL',
-            '<(v8_compress_startup_data)',
             '<@(experimental_library_files)'
           ],
           'conditions': [
         }, {
           'toolsets': ['target'],
         }],
-        ['v8_compress_startup_data=="bz2"', {
-          'libraries': [
-            '-lbz2',
-          ]
-        }],
       ],
     },
   ],
index 0d85faf..621ed5a 100755 (executable)
@@ -255,8 +255,6 @@ namespace internal {
 
 %(sources_declaration)s\
 
-%(raw_sources_declaration)s\
-
   template <>
   int NativesCollection<%(type)s>::GetBuiltinsCount() {
     return %(builtin_count)i;
@@ -274,13 +272,8 @@ namespace internal {
   }
 
   template <>
-  int NativesCollection<%(type)s>::GetRawScriptsSize() {
-    return %(raw_total_length)i;
-  }
-
-  template <>
-  Vector<const char> NativesCollection<%(type)s>::GetRawScriptSource(int index) {
-%(get_raw_script_source_cases)s\
+  Vector<const char> NativesCollection<%(type)s>::GetScriptSource(int index) {
+%(get_script_source_cases)s\
     return Vector<const char>("", 0);
   }
 
@@ -291,32 +284,15 @@ namespace internal {
   }
 
   template <>
-  Vector<const byte> NativesCollection<%(type)s>::GetScriptsSource() {
-    return Vector<const byte>(sources, %(total_length)i);
-  }
-
-  template <>
-  void NativesCollection<%(type)s>::SetRawScriptsSource(Vector<const char> raw_source) {
-    DCHECK(%(raw_total_length)i == raw_source.length());
-    raw_sources = raw_source.start();
+  Vector<const char> NativesCollection<%(type)s>::GetScriptsSource() {
+    return Vector<const char>(sources, %(total_length)i);
   }
-
 }  // internal
 }  // v8
 """
 
 SOURCES_DECLARATION = """\
-  static const byte sources[] = { %s };
-"""
-
-
-RAW_SOURCES_COMPRESSION_DECLARATION = """\
-  static const char* raw_sources = NULL;
-"""
-
-
-RAW_SOURCES_DECLARATION = """\
-  static const char* raw_sources = reinterpret_cast<const char*>(sources);
+  static const char sources[] = { %s };
 """
 
 
@@ -325,8 +301,8 @@ GET_INDEX_CASE = """\
 """
 
 
-GET_RAW_SCRIPT_SOURCE_CASE = """\
-    if (index == %(i)i) return Vector<const char>(raw_sources + %(offset)i, %(raw_length)i);
+GET_SCRIPT_SOURCE_CASE = """\
+    if (index == %(i)i) return Vector<const char>(sources + %(offset)i, %(source_length)i);
 """
 
 
@@ -440,7 +416,7 @@ def BuildMetadata(sources, source_bytes, native_type):
   # Loop over modules and build up indices into the source blob:
   get_index_cases = []
   get_script_name_cases = []
-  get_raw_script_source_cases = []
+  get_script_source_cases = []
   offset = 0
   for i in xrange(len(sources.modules)):
     native_name = "native %s.js" % sources.names[i]
@@ -450,53 +426,27 @@ def BuildMetadata(sources, source_bytes, native_type):
         "name": native_name,
         "length": len(native_name),
         "offset": offset,
-        "raw_length": len(sources.modules[i]),
+        "source_length": len(sources.modules[i]),
     }
     get_index_cases.append(GET_INDEX_CASE % d)
     get_script_name_cases.append(GET_SCRIPT_NAME_CASE % d)
-    get_raw_script_source_cases.append(GET_RAW_SCRIPT_SOURCE_CASE % d)
+    get_script_source_cases.append(GET_SCRIPT_SOURCE_CASE % d)
     offset += len(sources.modules[i])
   assert offset == len(raw_sources)
 
-  # If we have the raw sources we can declare them accordingly.
-  have_raw_sources = source_bytes == raw_sources
-  raw_sources_declaration = (RAW_SOURCES_DECLARATION
-      if have_raw_sources else RAW_SOURCES_COMPRESSION_DECLARATION)
-
   metadata = {
     "builtin_count": len(sources.modules),
     "debugger_count": sum(sources.is_debugger_id),
     "sources_declaration": SOURCES_DECLARATION % ToCArray(source_bytes),
-    "raw_sources_declaration": raw_sources_declaration,
-    "raw_total_length": sum(map(len, sources.modules)),
     "total_length": total_length,
     "get_index_cases": "".join(get_index_cases),
-    "get_raw_script_source_cases": "".join(get_raw_script_source_cases),
+    "get_script_source_cases": "".join(get_script_source_cases),
     "get_script_name_cases": "".join(get_script_name_cases),
     "type": native_type,
   }
   return metadata
 
 
-def CompressMaybe(sources, compression_type):
-  """Take the prepared sources and generate a sequence of bytes.
-
-  Args:
-    sources: A Sources instance with the prepared sourced.
-    compression_type: string, describing the desired compression.
-
-  Returns:
-    A sequence of bytes.
-  """
-  sources_bytes = "".join(sources.modules)
-  if compression_type == "off":
-    return sources_bytes
-  elif compression_type == "bz2":
-    return bz2.compress(sources_bytes)
-  else:
-    raise Error("Unknown compression type %s." % compression_type)
-
-
 def PutInt(blob_file, value):
   assert(value >= 0 and value < (1 << 28))
   if (value < 1 << 6):
@@ -545,9 +495,9 @@ def WriteStartupBlob(sources, startup_blob):
   output.close()
 
 
-def JS2C(source, target, native_type, compression_type, raw_file, startup_blob):
+def JS2C(source, target, native_type, raw_file, startup_blob):
   sources = PrepareSources(source)
-  sources_bytes = CompressMaybe(sources, compression_type)
+  sources_bytes = "".join(sources.modules)
   metadata = BuildMetadata(sources, sources_bytes, native_type)
 
   # Optionally emit raw file.
@@ -571,14 +521,13 @@ def main():
                     help="file to write the processed sources array to.")
   parser.add_option("--startup_blob", action="store",
                     help="file to write the startup blob to.")
-  parser.set_usage("""js2c out.cc type compression sources.js ...
+  parser.set_usage("""js2c out.cc type sources.js ...
       out.cc: C code to be generated.
       type: type parameter for NativesCollection template.
-      compression: type of compression used. [off|bz2]
       sources.js: JS internal sources or macros.py.""")
   (options, args) = parser.parse_args()
 
-  JS2C(args[3:], args[0], args[1], args[2], options.raw, options.startup_blob)
+  JS2C(args[2:], args[0], args[1], options.raw, options.startup_blob)
 
 
 if __name__ == "__main__":
index 3b58084..321d291 100755 (executable)
@@ -327,16 +327,25 @@ class SourceProcessor(SourceFileProcessor):
     return (super(SourceProcessor, self).IgnoreDir(name) or
             name in ('third_party', 'gyp', 'out', 'obj', 'DerivedSources'))
 
-  IGNORE_COPYRIGHTS = ['cpplint.py',
+  IGNORE_COPYRIGHTS = ['box2d.js',
+                       'cpplint.py',
+                       'copy.js',
+                       'corrections.js',
+                       'crypto.js',
                        'daemon.py',
                        'earley-boyer.js',
-                       'raytrace.js',
-                       'crypto.js',
+                       'fannkuch.js',
+                       'fasta.js',
+                       'jsmin.py',
                        'libraries.cc',
                        'libraries-empty.cc',
-                       'jsmin.py',
+                       'lua_binarytrees.js',
+                       'memops.js',
+                       'primes.js',
+                       'raytrace.js',
                        'regexp-pcre.js',
-                       'gnuplot-4.6.3-emscripten.js']
+                       'gnuplot-4.6.3-emscripten.js',
+                       'zlib.js']
   IGNORE_TABS = IGNORE_COPYRIGHTS + ['unicode-test.js', 'html-comments.js']
 
   def EndOfDeclaration(self, line):
index b0f1b26..34afa4a 100755 (executable)
@@ -70,13 +70,12 @@ class CheckTreeStatus(Step):
                % self["tree_message"])
 
 
-class FetchLKGR(Step):
-  MESSAGE = "Fetching V8 LKGR."
+class FetchCandidate(Step):
+  MESSAGE = "Fetching V8 roll candidate ref."
 
   def RunStep(self):
-    lkgr_url = "https://v8-status.appspot.com/lkgr"
-    # Retry several times since app engine might have issues.
-    self["lkgr"] = self.ReadURL(lkgr_url, wait_plan=[5, 20, 300, 300])
+    self.Git("fetch origin +refs/heads/candidate:refs/heads/candidate")
+    self["candidate"] = self.Git("show-ref -s refs/heads/candidate").strip()
 
 
 class CheckLastPush(Step):
@@ -94,8 +93,8 @@ class CheckLastPush(Step):
       self.Die("Could not retrieve bleeding edge revision for trunk push %s"
                % last_push)
 
-    if self["lkgr"] == last_push_be:
-      print "Already pushed current lkgr %s" % last_push_be
+    if self["candidate"] == last_push_be:
+      print "Already pushed current candidate %s" % last_push_be
       return True
 
 
@@ -103,21 +102,15 @@ class PushToCandidates(Step):
   MESSAGE = "Pushing to candidates if specified."
 
   def RunStep(self):
-    print "Pushing lkgr %s to candidates." % self["lkgr"]
+    print "Pushing candidate %s to candidates." % self["candidate"]
 
     args = [
       "--author", self._options.author,
       "--reviewer", self._options.reviewer,
-      "--revision", self["lkgr"],
+      "--revision", self["candidate"],
       "--force",
     ]
 
-    if self._options.svn:
-      args.extend(["--svn", self._options.svn])
-    if self._options.svn_config:
-      args.extend(["--svn-config", self._options.svn_config])
-    if self._options.vc_interface:
-      args.extend(["--vc-interface", self._options.vc_interface])
     if self._options.work_dir:
       args.extend(["--work-dir", self._options.work_dir])
 
@@ -150,7 +143,7 @@ class AutoPush(ScriptsBase):
       Preparation,
       CheckAutoPushSettings,
       CheckTreeStatus,
-      FetchLKGR,
+      FetchCandidate,
       CheckLastPush,
       PushToCandidates,
     ]
index 4a10b86..647708c 100755 (executable)
@@ -198,13 +198,10 @@ class ChangeVersion(Step):
       msg = "[Auto-roll] Bump up version to %s" % self["new_version"]
       self.GitCommit("%s\n\nTBR=%s" % (msg, self._options.author),
                      author=self._options.author)
-      if self._options.svn:
-        self.SVNCommit("branches/bleeding_edge", msg)
-      else:
-        self.GitUpload(author=self._options.author,
-                       force=self._options.force_upload,
-                       bypass_hooks=True)
-        self.GitDCommit()
+      self.GitUpload(author=self._options.author,
+                     force=self._options.force_upload,
+                     bypass_hooks=True)
+      self.GitCLLand()
       print "Successfully changed the version."
     finally:
       # Clean up.
diff --git a/deps/v8/tools/push-to-trunk/check_clusterfuzz.py b/deps/v8/tools/push-to-trunk/check_clusterfuzz.py
new file mode 100755 (executable)
index 0000000..d4ba90b
--- /dev/null
@@ -0,0 +1,174 @@
+#!/usr/bin/env python
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+"""
+Script to check for new clusterfuzz issues since the last rolled v8 revision.
+
+Returns a json list with test case IDs if any.
+
+Security considerations: The security key and request data must never be
+written to public logs. Public automated callers of this script should
+suppress stdout and stderr and only process contents of the results_file.
+"""
+
+
+import argparse
+import httplib
+import json
+import os
+import re
+import sys
+import urllib
+import urllib2
+
+
+# Constants to git repos.
+BASE_URL = "https://chromium.googlesource.com"
+DEPS_LOG = BASE_URL + "/chromium/src/+log/master/DEPS?format=JSON"
+
+# Constants for retrieving v8 rolls.
+CRREV = "https://cr-rev.appspot.com/_ah/api/crrev/v1/commit/%s"
+V8_COMMIT_RE = re.compile(
+    r"^Update V8 to version \d+\.\d+\.\d+ \(based on ([a-fA-F0-9]+)\)\..*")
+
+# Constants for the clusterfuzz backend.
+HOSTNAME = "backend-dot-cluster-fuzz.appspot.com"
+
+# Crash patterns.
+V8_INTERNAL_RE = re.compile(r"^v8::internal.*")
+ANY_RE = re.compile(r".*")
+
+# List of all api requests.
+BUG_SPECS = [
+  {
+    "args": {
+      "job_type": "linux_asan_chrome_v8",
+      "reproducible": "True",
+      "open": "True",
+      "bug_information": "",
+    },
+    "crash_state": V8_INTERNAL_RE,
+  },
+  {
+    "args": {
+      "job_type": "linux_asan_d8_dbg",
+      "reproducible": "True",
+      "open": "True",
+      "bug_information": "",
+    },
+    "crash_state": ANY_RE,
+  },
+]
+
+
+def GetRequest(url):
+  url_fh = urllib2.urlopen(url, None, 60)
+  try:
+    return url_fh.read()
+  finally:
+    url_fh.close()
+
+
+def GetLatestV8InChromium():
+  """Returns the commit position number of the latest v8 roll in chromium."""
+
+  # Check currently rolled v8 revision.
+  result = GetRequest(DEPS_LOG)
+  if not result:
+    return None
+
+  # Strip security header and load json.
+  commits = json.loads(result[5:])
+
+  git_revision = None
+  for commit in commits["log"]:
+    # Get latest commit that matches the v8 roll pattern. Ignore cherry-picks.
+    match = re.match(V8_COMMIT_RE, commit["message"])
+    if match:
+      git_revision = match.group(1)
+      break
+  else:
+    return None
+
+  # Get commit position number for v8 revision.
+  result = GetRequest(CRREV % git_revision)
+  if not result:
+    return None
+
+  commit = json.loads(result)
+  assert commit["repo"] == "v8/v8"
+  return commit["number"]
+
+
+def APIRequest(key, **params):
+  """Send a request to the clusterfuzz api.
+
+  Returns a json dict of the response.
+  """
+
+  params["api_key"] = key
+  params = urllib.urlencode(params)
+
+  headers = {"Content-type": "application/x-www-form-urlencoded"}
+
+  try:
+    conn = httplib.HTTPSConnection(HOSTNAME)
+    conn.request("POST", "/_api/", params, headers)
+
+    response = conn.getresponse()
+
+    # Never leak "data" into public logs.
+    data = response.read()
+  except:
+    raise Exception("ERROR: Connection problem.")
+
+  try:
+    return json.loads(data)
+  except:
+    raise Exception("ERROR: Could not read response. Is your key valid?")
+
+  return None
+
+
+def Main():
+  parser = argparse.ArgumentParser()
+  parser.add_argument("-k", "--key-file", required=True,
+                      help="A file with the clusterfuzz api key.")
+  parser.add_argument("-r", "--results-file",
+                      help="A file to write the results to.")
+  options = parser.parse_args()
+
+  # Get api key. The key's content must never be logged.
+  assert options.key_file
+  with open(options.key_file) as f:
+    key = f.read().strip()
+  assert key
+
+  revision_number = GetLatestV8InChromium()
+
+  results = []
+  for spec in BUG_SPECS:
+    args = dict(spec["args"])
+    # Use incremented revision as we're interested in all revision greater than
+    # what's currently rolled into chromium.
+    if revision_number:
+      args["revision_greater_or_equal"] = str(int(revision_number) + 1)
+
+    # Never print issue details in public logs.
+    issues = APIRequest(key, **args)
+    assert issues is not None
+    for issue in issues:
+      if re.match(spec["crash_state"], issue["crash_state"]):
+        results.append(issue["id"])
+
+  if options.results_file:
+    with open(options.results_file, "w") as f:
+      f.write(json.dumps(results))
+  else:
+    print results
+
+
+if __name__ == "__main__":
+  sys.exit(Main())
index bb040f5..ac78ef8 100644 (file)
@@ -45,6 +45,7 @@ import urllib2
 from git_recipes import GitRecipesMixin
 from git_recipes import GitFailedException
 
+CHANGELOG_FILE = "ChangeLog"
 VERSION_FILE = os.path.join("src", "version.cc")
 
 # V8 base directory.
@@ -271,12 +272,6 @@ class VCInterface(object):
   def GetBranches(self):
     raise NotImplementedError()
 
-  def GitSvn(self, hsh, branch=""):
-    raise NotImplementedError()
-
-  def SvnGit(self, rev, branch=""):
-    raise NotImplementedError()
-
   def MasterBranch(self):
     raise NotImplementedError()
 
@@ -292,15 +287,9 @@ class VCInterface(object):
   def RemoteBranch(self, name):
     raise NotImplementedError()
 
-  def Land(self):
-    raise NotImplementedError()
-
   def CLLand(self):
     raise NotImplementedError()
 
-  # TODO(machenbach): There is some svn knowledge in this interface. In svn,
-  # tag and commit are different remote commands, while in git we would commit
-  # and tag locally and then push/land in one unique step.
   def Tag(self, tag, remote, message):
     """Sets a tag for the current commit.
 
@@ -309,68 +298,12 @@ class VCInterface(object):
     raise NotImplementedError()
 
 
-class GitSvnInterface(VCInterface):
-  def Pull(self):
-    self.step.GitSVNRebase()
-
-  def Fetch(self):
-    self.step.GitSVNFetch()
-
-  def GetTags(self):
-    # Get remote tags.
-    tags = filter(lambda s: re.match(r"^svn/tags/[\d+\.]+$", s),
-                  self.step.GitRemotes())
-
-    # Remove 'svn/tags/' prefix.
-    return map(lambda s: s[9:], tags)
-
-  def GetBranches(self):
-    # Get relevant remote branches, e.g. "svn/3.25".
-    branches = filter(lambda s: re.match(r"^svn/\d+\.\d+$", s),
-                      self.step.GitRemotes())
-    # Remove 'svn/' prefix.
-    return map(lambda s: s[4:], branches)
-
-  def GitSvn(self, hsh, branch=""):
-    return self.step.GitSVNFindSVNRev(hsh, branch)
-
-  def SvnGit(self, rev, branch=""):
-    return self.step.GitSVNFindGitHash(rev, branch)
-
-  def MasterBranch(self):
-    return "bleeding_edge"
-
-  def CandidateBranch(self):
-    return "trunk"
-
-  def RemoteMasterBranch(self):
-    return "svn/bleeding_edge"
-
-  def RemoteCandidateBranch(self):
-    return "svn/trunk"
-
-  def RemoteBranch(self, name):
-    return "svn/%s" % name
-
-  def Land(self):
-    self.step.GitSVNDCommit()
-
-  def CLLand(self):
-    self.step.GitDCommit()
-
-  def Tag(self, tag, remote, _):
-    self.step.GitSVNFetch()
-    self.step.Git("rebase %s" % remote)
-    self.step.GitSVNTag(tag)
-
-
-class GitTagsOnlyMixin(VCInterface):
+class GitInterface(VCInterface):
   def Pull(self):
     self.step.GitPull()
 
   def Fetch(self):
     self.step.Git("fetch")
-    self.step.GitSVNFetch()
 
   def GetTags(self):
      return self.step.Git("tag").strip().splitlines()
@@ -400,9 +333,6 @@ class GitTagsOnlyMixin(VCInterface):
       return "origin/%s" % name
     return "branch-heads/%s" % name
 
-  def PushRef(self, ref):
-    self.step.Git("push origin %s" % ref)
-
   def Tag(self, tag, remote, message):
     # Wait for the commit to appear. Assumes unique commit message titles (this
     # is the case for all automated merge and push commits - also no title is
@@ -421,42 +351,11 @@ class GitTagsOnlyMixin(VCInterface):
                     "git updater is lagging behind?")
 
     self.step.Git("tag %s %s" % (tag, commit))
-    self.PushRef(tag)
-
-
-class GitReadSvnWriteInterface(GitTagsOnlyMixin, GitSvnInterface):
-  pass
-
-
-class GitInterface(GitTagsOnlyMixin):
-  def Fetch(self):
-    self.step.Git("fetch")
-
-  def GitSvn(self, hsh, branch=""):
-    return ""
-
-  def SvnGit(self, rev, branch=""):
-    raise NotImplementedError()
-
-  def Land(self):
-    # FIXME(machenbach): This will not work with checkouts from bot_update
-    # after flag day because it will push to the cache. Investigate if it
-    # will work with "cl land".
-    self.step.Git("push origin")
+    self.step.Git("push origin %s" % tag)
 
   def CLLand(self):
     self.step.GitCLLand()
 
-  def PushRef(self, ref):
-    self.step.Git("push https://chromium.googlesource.com/v8/v8 %s" % ref)
-
-
-VC_INTERFACES = {
-  "git_svn": GitSvnInterface,
-  "git_read_svn_write": GitReadSvnWriteInterface,
-  "git": GitInterface,
-}
-
 
 class Step(GitRecipesMixin):
   def __init__(self, text, number, config, state, options, handler):
@@ -466,7 +365,7 @@ class Step(GitRecipesMixin):
     self._state = state
     self._options = options
     self._side_effect_handler = handler
-    self.vc = VC_INTERFACES[options.vc_interface]()
+    self.vc = GitInterface()
     self.vc.InjectStep(self)
 
     # The testing configuration might set a different default cwd.
@@ -560,11 +459,6 @@ class Step(GitRecipesMixin):
       raise GitFailedException("'git %s' failed." % args)
     return result
 
-  def SVN(self, args="", prefix="", pipe=True, retry_on=None, cwd=None):
-    cmd = lambda: self._side_effect_handler.Command(
-        "svn", args, prefix, pipe, cwd=cwd or self.default_cwd)
-    return self.Retry(cmd, retry_on, [5, 30])
-
   def Editor(self, args):
     if self._options.requires_editor:
       return self._side_effect_handler.Command(
@@ -726,34 +620,6 @@ class Step(GitRecipesMixin):
       output += "%s\n" % line
     TextToFile(output, version_file)
 
-  def SVNCommit(self, root, commit_message):
-    patch = self.GitDiff("HEAD^", "HEAD")
-    TextToFile(patch, self._config["PATCH_FILE"])
-    self.Command("svn", "update", cwd=self._options.svn)
-    if self.Command("svn", "status", cwd=self._options.svn) != "":
-      self.Die("SVN checkout not clean.")
-    if not self.Command("patch", "-d %s -p1 -i %s" %
-                        (root, self._config["PATCH_FILE"]),
-                        cwd=self._options.svn):
-      self.Die("Could not apply patch.")
-    for line in self.Command(
-        "svn", "status", cwd=self._options.svn).splitlines():
-      # Check for added and removed items. Svn status has seven status columns.
-      # The first contains ? for unknown and ! for missing.
-      match = re.match(r"^(.)...... (.*)$", line)
-      if match and match.group(1) == "?":
-        self.Command("svn", "add --force %s" % match.group(2),
-                     cwd=self._options.svn)
-      if match and match.group(1) == "!":
-        self.Command("svn", "delete --force %s" % match.group(2),
-                     cwd=self._options.svn)
-
-    self.Command(
-        "svn",
-        "commit --non-interactive --username=%s --config-dir=%s -m \"%s\"" %
-            (self._options.author, self._options.svn_config, commit_message),
-        cwd=self._options.svn)
-
 
 class BootstrapStep(Step):
   MESSAGE = "Bootstapping v8 checkout."
@@ -872,17 +738,9 @@ class ScriptsBase(object):
                         help=("Determine current sheriff to review CLs. On "
                               "success, this will overwrite the reviewer "
                               "option."))
-    parser.add_argument("--svn",
-                        help=("Optional full svn checkout for the commit."
-                              "The folder needs to be the svn root."))
-    parser.add_argument("--svn-config",
-                        help=("Optional folder used as svn --config-dir."))
     parser.add_argument("-s", "--step",
         help="Specify the step where to start work. Default: 0.",
         default=0, type=int)
-    parser.add_argument("--vc-interface",
-                        help=("Choose VC interface out of git_svn|"
-                              "git_read_svn_write."))
     parser.add_argument("--work-dir",
                         help=("Location where to bootstrap a working v8 "
                               "checkout."))
@@ -902,10 +760,6 @@ class ScriptsBase(object):
       print "To determine the current sheriff, requires the googler mapping"
       parser.print_help()
       return None
-    if options.svn and not options.svn_config:
-      print "Using pure svn for committing requires also --svn-config"
-      parser.print_help()
-      return None
 
     # Defaults for options, common to all scripts.
     options.manual = getattr(options, "manual", True)
@@ -923,8 +777,6 @@ class ScriptsBase(object):
       parser.print_help()
       return None
 
-    if not options.vc_interface:
-      options.vc_interface = "git_read_svn_write"
     if not options.work_dir:
       options.work_dir = "/tmp/v8-release-scripts-work-dir"
     return options
index 1b1887b..3d2a9ef 100644 (file)
@@ -45,7 +45,7 @@ GIT_SVN_ID_FOOTER_KEY = 'git-svn-id'
 
 # e.g., git-svn-id: https://v8.googlecode.com/svn/trunk@23117
 #     ce2b1a6d-e550-0410-aec6-3dcde31c8c00
-GIT_SVN_ID_RE = re.compile(r'((?:\w+)://[^@]+)@(\d+)\s+(?:[a-zA-Z0-9\-]+)')
+GIT_SVN_ID_RE = re.compile(r'[^@]+@(\d+)\s+(?:[a-zA-Z0-9\-]+)')
 
 
 # Copied from bot_update.py.
@@ -231,10 +231,6 @@ class GitRecipesMixin(object):
   def GitPresubmit(self, **kwargs):
     self.Git("cl presubmit", "PRESUBMIT_TREE_CHECK=\"skip\"", **kwargs)
 
-  def GitDCommit(self, **kwargs):
-    self.Git(
-        "cl dcommit -f --bypass-hooks", retry_on=lambda x: x is None, **kwargs)
-
   def GitCLLand(self, **kwargs):
     self.Git(
         "cl land -f --bypass-hooks", retry_on=lambda x: x is None, **kwargs)
@@ -248,17 +244,6 @@ class GitRecipesMixin(object):
   def GitFetchOrigin(self, **kwargs):
     self.Git("fetch origin", **kwargs)
 
-  def GitConvertToSVNRevision(self, git_hash, **kwargs):
-    result = self.Git(MakeArgs(["rev-list", "-n", "1", git_hash]), **kwargs)
-    if not result or not SHA1_RE.match(result):
-      raise GitFailedException("Git hash %s is unknown." % git_hash)
-    log = self.GitLog(n=1, format="%B", git_hash=git_hash, **kwargs)
-    for line in reversed(log.splitlines()):
-      match = ROLL_DEPS_GIT_SVN_ID_RE.match(line.strip())
-      if match:
-        return match.group(1)
-    raise GitFailedException("Couldn't convert %s to SVN." % git_hash)
-
   @Strip
   # Copied from bot_update.py and modified for svn-like numbers only.
   def GetCommitPositionNumber(self, git_hash, **kwargs):
@@ -285,39 +270,6 @@ class GitRecipesMixin(object):
     if value:
       match = GIT_SVN_ID_RE.match(value)
       if match:
-        return match.group(2)
-    return None
-
-  ### Git svn stuff
-
-  def GitSVNFetch(self, **kwargs):
-    self.Git("svn fetch", **kwargs)
-
-  def GitSVNRebase(self, **kwargs):
-    self.Git("svn rebase", **kwargs)
-
-  # TODO(machenbach): Unused? Remove.
-  @Strip
-  def GitSVNLog(self, **kwargs):
-    return self.Git("svn log -1 --oneline", **kwargs)
-
-  @Strip
-  def GitSVNFindGitHash(self, revision, branch="", **kwargs):
-    assert revision
-    args = MakeArgs(["svn find-rev", "r%s" % revision, branch])
-
-    # Pick the last line if multiple lines are available. The first lines might
-    # print information about rebuilding the svn-git mapping.
-    return self.Git(args, **kwargs).splitlines()[-1]
-
-  @Strip
-  def GitSVNFindSVNRev(self, git_hash, branch="", **kwargs):
-    return self.Git(MakeArgs(["svn find-rev", git_hash, branch]), **kwargs)
-
-  def GitSVNDCommit(self, **kwargs):
-    return self.Git("svn dcommit 2>&1", retry_on=lambda x: x is None, **kwargs)
-
-  def GitSVNTag(self, version, **kwargs):
-    self.Git(("svn tag %s -m \"Tagging version %s\"" % (version, version)),
-             retry_on=lambda x: x is None,
-             **kwargs)
+        return match.group(1)
+    raise GitFailedException("Couldn't determine commit position for %s" %
+                             git_hash)
index da9d310..9e7f1fb 100755 (executable)
@@ -276,9 +276,6 @@ class MergeToBranch(ScriptsBase):
     # CC ulan to make sure that fixes are merged to Google3.
     options.cc = "ulan@chromium.org"
 
-    # Thd old git-svn workflow is deprecated for this script.
-    assert options.vc_interface != "git_svn"
-
     # Make sure to use git hashes in the new workflows.
     for revision in options.revisions:
       if (IsSvnNumber(revision) or
index 0df548b..6e821f2 100755 (executable)
@@ -34,7 +34,6 @@ import urllib2
 
 from common_includes import *
 
-PUSH_MSG_SVN_RE = re.compile(r".* \(based on bleeding_edge revision r(\d+)\)$")
 PUSH_MSG_GIT_SUFFIX = " (based on %s)"
 PUSH_MSG_GIT_RE = re.compile(r".* \(based on (?P<git_rev>[a-fA-F0-9]+)\)$")
 
@@ -93,18 +92,8 @@ class DetectLastPush(Step):
       # Retrieve the bleeding edge revision of the last push from the text in
       # the push commit message.
       last_push_title = self.GitLog(n=1, format="%s", git_hash=last_push)
-      # TODO(machenbach): This is only needed for the git transition. Can be
-      # removed after one successful trunk push.
-      match = PUSH_MSG_SVN_RE.match(last_push_title)
-      if match:
-        last_push_be_svn = match.group(1)
-        if not last_push_be_svn:  # pragma: no cover
-          self.Die("Could not retrieve bleeding edge rev for trunk push %s"
-                   % last_push)
-        last_push_bleeding_edge = self.vc.SvnGit(last_push_be_svn)
-      else:
-        last_push_bleeding_edge = PUSH_MSG_GIT_RE.match(
-            last_push_title).group("git_rev")
+      last_push_bleeding_edge = PUSH_MSG_GIT_RE.match(
+          last_push_title).group("git_rev")
 
       if not last_push_bleeding_edge:  # pragma: no cover
         self.Die("Could not retrieve bleeding edge git hash for trunk push %s"
@@ -319,12 +308,11 @@ class AddChangeLog(Step):
     # The change log has been modified by the patch. Reset it to the version
     # on trunk and apply the exact changes determined by this PrepareChangeLog
     # step above.
-    self.GitCheckoutFile(self.Config("CHANGELOG_FILE"),
-                         self.vc.RemoteCandidateBranch())
+    self.GitCheckoutFile(CHANGELOG_FILE, self.vc.RemoteCandidateBranch())
     changelog_entry = FileToText(self.Config("CHANGELOG_ENTRY_FILE"))
-    old_change_log = FileToText(self.Config("CHANGELOG_FILE"))
+    old_change_log = FileToText(os.path.join(self.default_cwd, CHANGELOG_FILE))
     new_change_log = "%s\n\n\n%s" % (changelog_entry, old_change_log)
-    TextToFile(new_change_log, self.Config("CHANGELOG_FILE"))
+    TextToFile(new_change_log, os.path.join(self.default_cwd, CHANGELOG_FILE))
     os.remove(self.Config("CHANGELOG_ENTRY_FILE"))
 
 
@@ -358,14 +346,11 @@ class SanityCheck(Step):
       self.Die("Execution canceled.")  # pragma: no cover
 
 
-class CommitSVN(Step):
-  MESSAGE = "Commit to SVN."
+class Land(Step):
+  MESSAGE = "Land the patch."
 
   def RunStep(self):
-    if self._options.svn:
-      self.SVNCommit("trunk", self["commit_title"])
-    else:
-      self.vc.Land()
+    self.vc.CLLand()
 
 
 class TagRevision(Step):
@@ -423,7 +408,6 @@ class PushToTrunk(ScriptsBase):
       "BRANCHNAME": "prepare-push",
       "TRUNKBRANCH": "trunk-push",
       "PERSISTFILE_BASENAME": "/tmp/v8-push-to-trunk-tempfile",
-      "CHANGELOG_FILE": "ChangeLog",
       "CHANGELOG_ENTRY_FILE": "/tmp/v8-push-to-trunk-tempfile-changelog-entry",
       "PATCH_FILE": "/tmp/v8-push-to-trunk-tempfile-patch-file",
       "COMMITMSG_FILE": "/tmp/v8-push-to-trunk-tempfile-commitmsg",
@@ -447,7 +431,7 @@ class PushToTrunk(ScriptsBase):
       SetVersion,
       CommitTrunk,
       SanityCheck,
-      CommitSVN,
+      Land,
       TagRevision,
       CleanUp,
     ]
index 2090c00..1a5b15c 100755 (executable)
@@ -136,9 +136,6 @@ class RetrieveV8Releases(Step):
     return (self._options.max_releases > 0
             and len(releases) > self._options.max_releases)
 
-  def GetBleedingEdgeFromPush(self, title):
-    return MatchSafe(PUSH_MSG_SVN_RE.match(title))
-
   def GetBleedingEdgeGitFromPush(self, title):
     return MatchSafe(PUSH_MSG_GIT_RE.match(title))
 
@@ -166,13 +163,13 @@ class RetrieveV8Releases(Step):
   def GetReleaseDict(
       self, git_hash, bleeding_edge_rev, bleeding_edge_git, branch, version,
       patches, cl_body):
-    revision = self.vc.GitSvn(git_hash)
+    revision = self.GetCommitPositionNumber(git_hash)
     return {
-      # The SVN revision on the branch.
+      # The cr commit position number on the branch.
       "revision": revision,
       # The git revision on the branch.
       "revision_git": git_hash,
-      # The SVN revision on bleeding edge (only for newer trunk pushes).
+      # The cr commit position number on master.
       "bleeding_edge": bleeding_edge_rev,
       # The same for git.
       "bleeding_edge_git": bleeding_edge_git,
@@ -211,28 +208,29 @@ class RetrieveV8Releases(Step):
         patches = self.GetMergedPatches(body)
 
     title = self.GitLog(n=1, format="%s", git_hash=git_hash)
-    bleeding_edge_revision = self.GetBleedingEdgeFromPush(title)
-    bleeding_edge_git = ""
-    if bleeding_edge_revision:
-      bleeding_edge_git = self.vc.SvnGit(bleeding_edge_revision,
-                                         self.vc.RemoteMasterBranch())
-    else:
-      bleeding_edge_git = self.GetBleedingEdgeGitFromPush(title)
+    bleeding_edge_git = self.GetBleedingEdgeGitFromPush(title)
+    bleeding_edge_position = ""
+    if bleeding_edge_git:
+      bleeding_edge_position = self.GetCommitPositionNumber(bleeding_edge_git)
+    # TODO(machenbach): Add the commit position number.
     return self.GetReleaseDict(
-        git_hash, bleeding_edge_revision, bleeding_edge_git, branch, version,
+        git_hash, bleeding_edge_position, bleeding_edge_git, branch, version,
         patches, body), self["patch"]
 
   def GetReleasesFromMaster(self):
-    tag_text = self.SVN("log https://v8.googlecode.com/svn/tags -v --limit 20")
-    releases = []
-    for (tag, revision) in re.findall(BLEEDING_EDGE_TAGS_RE, tag_text):
-      git_hash = self.vc.SvnGit(revision)
+    # TODO(machenbach): Implement this in git as soon as we tag again on
+    # master.
+    # tag_text = self.SVN("log https://v8.googlecode.com/svn/tags -v
+    # --limit 20")
+    # releases = []
+    # for (tag, revision) in re.findall(BLEEDING_EDGE_TAGS_RE, tag_text):
+    #   git_hash = self.vc.SvnGit(revision)
 
       # Add bleeding edge release. It does not contain patches or a code
       # review link, as tags are not uploaded.
-      releases.append(self.GetReleaseDict(
-        git_hash, revision, git_hash, self.vc.MasterBranch(), tag, "", ""))
-    return releases
+      releases.append(self.GetReleaseDict(
+        git_hash, revision, git_hash, self.vc.MasterBranch(), tag, "", ""))
+    return []
 
   def GetReleasesFromBranch(self, branch):
     self.GitReset(self.vc.RemoteBranch(branch))
@@ -324,7 +322,7 @@ def ConvertToCommitNumber(step, revision):
   # Simple check for git hashes.
   if revision.isdigit() and len(revision) < 8:
     return revision
-  return step.GitConvertToSVNRevision(
+  return step.GetCommitPositionNumber(
       revision, cwd=os.path.join(step._options.chromium, "v8"))
 
 
index 41cc97f..db702a3 100644 (file)
@@ -57,7 +57,6 @@ TEST_CONFIG = {
   "BRANCHNAME": "test-prepare-push",
   "TRUNKBRANCH": "test-trunk-push",
   "PERSISTFILE_BASENAME": "/tmp/test-v8-push-to-trunk-tempfile",
-  "CHANGELOG_FILE": None,
   "CHANGELOG_ENTRY_FILE": "/tmp/test-v8-push-to-trunk-tempfile-changelog-entry",
   "PATCH_FILE": "/tmp/test-v8-push-to-trunk-tempfile-patch",
   "COMMITMSG_FILE": "/tmp/test-v8-push-to-trunk-tempfile-commitmsg",
@@ -447,7 +446,6 @@ class ScriptTest(unittest.TestCase):
       Cmd("git status -s -uno", ""),
       Cmd("git status -s -b -uno", "## some_branch"),
       Cmd("git fetch", ""),
-      Cmd("git svn fetch", ""),
       Cmd("git branch", "  branch1\n* %s" % TEST_CONFIG["BRANCHNAME"]),
       RL("Y"),
       Cmd("git branch -D %s" % TEST_CONFIG["BRANCHNAME"], ""),
@@ -461,7 +459,6 @@ class ScriptTest(unittest.TestCase):
       Cmd("git status -s -uno", ""),
       Cmd("git status -s -b -uno", "## some_branch"),
       Cmd("git fetch", ""),
-      Cmd("git svn fetch", ""),
       Cmd("git branch", "  branch1\n* %s" % TEST_CONFIG["BRANCHNAME"]),
       RL("n"),
     ])
@@ -474,7 +471,6 @@ class ScriptTest(unittest.TestCase):
       Cmd("git status -s -uno", ""),
       Cmd("git status -s -b -uno", "## some_branch"),
       Cmd("git fetch", ""),
-      Cmd("git svn fetch", ""),
       Cmd("git branch", "  branch1\n* %s" % TEST_CONFIG["BRANCHNAME"]),
       RL("Y"),
       Cmd("git branch -D %s" % TEST_CONFIG["BRANCHNAME"], None),
@@ -502,8 +498,7 @@ class ScriptTest(unittest.TestCase):
       Cmd("git fetch", ""),
       Cmd("git log -1 --format=%H --grep=\"Title\" origin/candidates", ""),
     ])
-    args = ["--branch", "candidates", "--vc-interface", "git_read_svn_write",
-            "ab12345"]
+    args = ["--branch", "candidates", "ab12345"]
     self._state["version"] = "tag_name"
     self._state["commit_title"] = "Title"
     self.assertRaises(Exception,
@@ -717,9 +712,9 @@ Performance and stability improvements on all platforms."""
     self.WriteFakeVersionFile(build=5)
 
     TEST_CONFIG["CHANGELOG_ENTRY_FILE"] = self.MakeEmptyTempFile()
-    TEST_CONFIG["CHANGELOG_FILE"] = self.MakeEmptyTempFile()
     bleeding_edge_change_log = "2014-03-17: Sentinel\n"
-    TextToFile(bleeding_edge_change_log, TEST_CONFIG["CHANGELOG_FILE"])
+    TextToFile(bleeding_edge_change_log,
+               os.path.join(TEST_CONFIG["DEFAULT_CWD"], CHANGELOG_FILE))
     os.environ["EDITOR"] = "vi"
 
     def ResetChangeLog():
@@ -728,7 +723,8 @@ Performance and stability improvements on all platforms."""
       trunk_change_log = """1999-04-05: Version 3.22.4
 
         Performance and stability improvements on all platforms.\n"""
-      TextToFile(trunk_change_log, TEST_CONFIG["CHANGELOG_FILE"])
+      TextToFile(trunk_change_log,
+                 os.path.join(TEST_CONFIG["DEFAULT_CWD"], CHANGELOG_FILE))
 
     def ResetToTrunk():
       ResetChangeLog()
@@ -751,7 +747,8 @@ Performance and stability improvements on all platforms.""", commit)
       self.assertTrue(re.search(r"#define IS_CANDIDATE_VERSION\s+0", version))
 
       # Check that the change log on the trunk branch got correctly modified.
-      change_log = FileToText(TEST_CONFIG["CHANGELOG_FILE"])
+      change_log = FileToText(
+          os.path.join(TEST_CONFIG["DEFAULT_CWD"], CHANGELOG_FILE))
       self.assertEquals(
 """1999-07-31: Version 3.22.5
 
@@ -773,7 +770,6 @@ Performance and stability improvements on all platforms.""", commit)
       Cmd("git status -s -uno", ""),
       Cmd("git status -s -b -uno", "## some_branch\n"),
       Cmd("git fetch", ""),
-      Cmd("git svn fetch", ""),
       Cmd("git branch", "  branch1\n* branch2\n"),
       Cmd("git branch", "  branch1\n* branch2\n"),
       Cmd(("git new-branch %s --upstream origin/master" %
@@ -809,14 +805,12 @@ Performance and stability improvements on all platforms.""", commit)
           Cmd("vi %s" % TEST_CONFIG["CHANGELOG_ENTRY_FILE"], ""))
     expectations += [
       Cmd("git fetch", ""),
-      Cmd("git svn fetch", "fetch result\n"),
       Cmd("git checkout -f origin/master", ""),
       Cmd("git diff origin/candidates push_hash", "patch content\n"),
       Cmd(("git new-branch %s --upstream origin/candidates" %
            TEST_CONFIG["TRUNKBRANCH"]), "", cb=ResetToTrunk),
       Cmd("git apply --index --reject \"%s\"" % TEST_CONFIG["PATCH_FILE"], ""),
-      Cmd(("git checkout -f origin/candidates -- %s" %
-           TEST_CONFIG["CHANGELOG_FILE"]), "",
+      Cmd("git checkout -f origin/candidates -- ChangeLog", "",
           cb=ResetChangeLog),
       Cmd("git checkout -f origin/candidates -- src/version.cc", "",
           cb=self.WriteFakeVersionFile),
@@ -826,7 +820,7 @@ Performance and stability improvements on all platforms.""", commit)
     if manual:
       expectations.append(RL("Y"))  # Sanity check.
     expectations += [
-      Cmd("git svn dcommit 2>&1", ""),
+      Cmd("git cl land -f --bypass-hooks", ""),
       Cmd("git fetch", ""),
       Cmd("git log -1 --format=%H --grep="
           "\"Version 3.22.5 (based on push_hash)\""
@@ -839,14 +833,13 @@ Performance and stability improvements on all platforms.""", commit)
     ]
     self.Expect(expectations)
 
-    args = ["-a", "author@chromium.org", "--revision", "push_hash",
-            "--vc-interface", "git_read_svn_write",]
+    args = ["-a", "author@chromium.org", "--revision", "push_hash"]
     if force: args.append("-f")
     if manual: args.append("-m")
     else: args += ["-r", "reviewer@chromium.org"]
     PushToTrunk(TEST_CONFIG, self).Run(args)
 
-    cl = FileToText(TEST_CONFIG["CHANGELOG_FILE"])
+    cl = FileToText(os.path.join(TEST_CONFIG["DEFAULT_CWD"], CHANGELOG_FILE))
     self.assertTrue(re.search(r"^\d\d\d\d\-\d+\-\d+: Version 3\.22\.5", cl))
     self.assertTrue(re.search(r"        Log text 1 \(issue 321\).", cl))
     self.assertTrue(re.search(r"1999\-04\-05: Version 3\.22\.4", cl))
@@ -864,140 +857,6 @@ Performance and stability improvements on all platforms.""", commit)
   def testPushToTrunkForced(self):
     self._PushToTrunk(force=True)
 
-  def testPushToTrunkGit(self):
-    svn_root = self.MakeEmptyTempDirectory()
-    TextToFile("", os.path.join(TEST_CONFIG["DEFAULT_CWD"], ".git"))
-
-    # The version file on bleeding edge has build level 5, while the version
-    # file from trunk has build level 4.
-    self.WriteFakeVersionFile(build=5)
-
-    TEST_CONFIG["CHANGELOG_ENTRY_FILE"] = self.MakeEmptyTempFile()
-    TEST_CONFIG["CHANGELOG_FILE"] = self.MakeEmptyTempFile()
-    bleeding_edge_change_log = "2014-03-17: Sentinel\n"
-    TextToFile(bleeding_edge_change_log, TEST_CONFIG["CHANGELOG_FILE"])
-
-    def ResetChangeLog():
-      """On 'git co -b new_branch svn/trunk', and 'git checkout -- ChangeLog',
-      the ChangLog will be reset to its content on trunk."""
-      trunk_change_log = """1999-04-05: Version 3.22.4
-
-        Performance and stability improvements on all platforms.\n"""
-      TextToFile(trunk_change_log, TEST_CONFIG["CHANGELOG_FILE"])
-
-    def ResetToTrunk():
-      ResetChangeLog()
-      self.WriteFakeVersionFile()
-
-    def CheckSVNCommit():
-      commit = FileToText(TEST_CONFIG["COMMITMSG_FILE"])
-      self.assertEquals(
-"""Version 3.22.5 (based on push_hash)
-
-Log text 1 (issue 321).
-
-Performance and stability improvements on all platforms.""", commit)
-      version = FileToText(
-          os.path.join(TEST_CONFIG["DEFAULT_CWD"], VERSION_FILE))
-      self.assertTrue(re.search(r"#define MINOR_VERSION\s+22", version))
-      self.assertTrue(re.search(r"#define BUILD_NUMBER\s+5", version))
-      self.assertFalse(re.search(r"#define BUILD_NUMBER\s+6", version))
-      self.assertTrue(re.search(r"#define PATCH_LEVEL\s+0", version))
-      self.assertTrue(re.search(r"#define IS_CANDIDATE_VERSION\s+0", version))
-
-      # Check that the change log on the trunk branch got correctly modified.
-      change_log = FileToText(TEST_CONFIG["CHANGELOG_FILE"])
-      self.assertEquals(
-"""1999-07-31: Version 3.22.5
-
-        Log text 1 (issue 321).
-
-        Performance and stability improvements on all platforms.
-
-
-1999-04-05: Version 3.22.4
-
-        Performance and stability improvements on all platforms.\n""",
-          change_log)
-
-    expectations = [
-      Cmd("git status -s -uno", ""),
-      Cmd("git status -s -b -uno", "## some_branch\n"),
-      Cmd("git fetch", ""),
-      Cmd("git branch", "  branch1\n* branch2\n"),
-      Cmd("git branch", "  branch1\n* branch2\n"),
-      Cmd(("git new-branch %s --upstream origin/master" %
-           TEST_CONFIG["BRANCHNAME"]),
-          ""),
-      Cmd(("git log -1 --format=%H --grep="
-           "\"^Version [[:digit:]]*\.[[:digit:]]*\.[[:digit:]]* (based\" "
-           "origin/candidates"), "hash2\n"),
-      Cmd("git log -1 hash2", "Log message\n"),
-      Cmd("git log -1 --format=%s hash2",
-       "Version 3.4.5 (based on abc3)\n"),
-      Cmd("git checkout -f origin/master -- src/version.cc",
-          "", cb=self.WriteFakeVersionFile),
-      Cmd("git checkout -f hash2 -- src/version.cc", "",
-          cb=self.WriteFakeVersionFile),
-      Cmd("git log --format=%H abc3..push_hash", "rev1\n"),
-      Cmd("git log -1 --format=%s rev1", "Log text 1.\n"),
-      Cmd("git log -1 --format=%B rev1", "Text\nLOG=YES\nBUG=v8:321\nText\n"),
-      Cmd("git log -1 --format=%an rev1", "author1@chromium.org\n"),
-      Cmd("git fetch", ""),
-      Cmd("git checkout -f origin/master", ""),
-      Cmd("git diff origin/candidates push_hash", "patch content\n"),
-      Cmd(("git new-branch %s --upstream origin/candidates" %
-           TEST_CONFIG["TRUNKBRANCH"]), "", cb=ResetToTrunk),
-      Cmd("git apply --index --reject \"%s\"" % TEST_CONFIG["PATCH_FILE"], ""),
-      Cmd(("git checkout -f origin/candidates -- %s" %
-           TEST_CONFIG["CHANGELOG_FILE"]), "",
-          cb=ResetChangeLog),
-      Cmd("git checkout -f origin/candidates -- src/version.cc", "",
-          cb=self.WriteFakeVersionFile),
-      Cmd("git commit -aF \"%s\"" % TEST_CONFIG["COMMITMSG_FILE"], "",
-          cb=CheckSVNCommit),
-      # TODO(machenbach): Change test to pure git after flag day.
-      # Cmd("git push origin", ""),
-      Cmd("git diff HEAD^ HEAD", "patch content"),
-      Cmd("svn update", "", cwd=svn_root),
-      Cmd("svn status", "", cwd=svn_root),
-      Cmd("patch -d trunk -p1 -i %s" %
-          TEST_CONFIG["PATCH_FILE"], "Applied patch...", cwd=svn_root),
-      Cmd("svn status", "M       OWNERS\n?       new_file\n!       AUTHORS",
-          cwd=svn_root),
-      Cmd("svn add --force new_file", "", cwd=svn_root),
-      Cmd("svn delete --force AUTHORS", "", cwd=svn_root),
-      Cmd("svn commit --non-interactive --username=author@chromium.org "
-          "--config-dir=[CONFIG_DIR] "
-          "-m \"Version 3.22.5 (based on push_hash)\"",
-          "", cwd=svn_root),
-      Cmd("git fetch", ""),
-      Cmd("git log -1 --format=%H --grep="
-          "\"Version 3.22.5 (based on push_hash)\""
-          " origin/candidates", "hsh_to_tag"),
-      Cmd("git tag 3.22.5 hsh_to_tag", ""),
-      Cmd("git push https://chromium.googlesource.com/v8/v8 3.22.5", ""),
-      Cmd("git checkout -f some_branch", ""),
-      Cmd("git branch -D %s" % TEST_CONFIG["BRANCHNAME"], ""),
-      Cmd("git branch -D %s" % TEST_CONFIG["TRUNKBRANCH"], ""),
-    ]
-    self.Expect(expectations)
-
-    args = ["-a", "author@chromium.org", "--revision", "push_hash",
-            "--vc-interface", "git", "-f", "-r", "reviewer@chromium.org",
-            "--svn", svn_root, "--svn-config", "[CONFIG_DIR]",
-            "--work-dir", TEST_CONFIG["DEFAULT_CWD"]]
-    PushToTrunk(TEST_CONFIG, self).Run(args)
-
-    cl = FileToText(TEST_CONFIG["CHANGELOG_FILE"])
-    self.assertTrue(re.search(r"^\d\d\d\d\-\d+\-\d+: Version 3\.22\.5", cl))
-    self.assertTrue(re.search(r"        Log text 1 \(issue 321\).", cl))
-    self.assertTrue(re.search(r"1999\-04\-05: Version 3\.22\.4", cl))
-
-    # Note: The version file is on build number 5 again in the end of this test
-    # since the git command that merges to the bleeding edge branch is mocked
-    # out.
-
   C_V8_22624_LOG = """V8 CL.
 
 git-svn-id: https://v8.googlecode.com/svn/branches/bleeding_edge@22624 123
@@ -1082,7 +941,7 @@ def get_list():
           "Version 3.4.5 (based on abc123)\n"),
     ])
 
-    self._state["lkgr"] = "abc123"
+    self._state["candidate"] = "abc123"
     self.assertEquals(0, self.RunStep(
         auto_push.AutoPush, CheckLastPush, AUTO_PUSH_ARGS))
 
@@ -1096,8 +955,8 @@ def get_list():
       Cmd("git fetch", ""),
       URL("https://v8-status.appspot.com/current?format=json",
           "{\"message\": \"Tree is throttled\"}"),
-      URL("https://v8-status.appspot.com/lkgr", Exception("Network problem")),
-      URL("https://v8-status.appspot.com/lkgr", "abc123"),
+      Cmd("git fetch origin +refs/heads/candidate:refs/heads/candidate", ""),
+      Cmd("git show-ref -s refs/heads/candidate", "abc123\n"),
       Cmd(("git log -1 --format=%H --grep=\""
            "^Version [[:digit:]]*\.[[:digit:]]*\.[[:digit:]]* (based\""
            " origin/candidates"), "push_hash\n"),
@@ -1105,13 +964,12 @@ def get_list():
           "Version 3.4.5 (based on abc101)\n"),
     ])
 
-    auto_push.AutoPush(TEST_CONFIG, self).Run(
-        AUTO_PUSH_ARGS + ["--push", "--vc-interface", "git"])
+    auto_push.AutoPush(TEST_CONFIG, self).Run(AUTO_PUSH_ARGS + ["--push"])
 
     state = json.loads(FileToText("%s-state.json"
                                   % TEST_CONFIG["PERSISTFILE_BASENAME"]))
 
-    self.assertEquals("abc123", state["lkgr"])
+    self.assertEquals("abc123", state["candidate"])
 
   def testAutoPushStoppedBySettings(self):
     TextToFile("", os.path.join(TEST_CONFIG["DEFAULT_CWD"], ".git"))
@@ -1123,7 +981,6 @@ def get_list():
       Cmd("git status -s -uno", ""),
       Cmd("git status -s -b -uno", "## some_branch\n"),
       Cmd("git fetch", ""),
-      Cmd("git svn fetch", ""),
     ])
 
     def RunAutoPush():
@@ -1138,7 +995,6 @@ def get_list():
       Cmd("git status -s -uno", ""),
       Cmd("git status -s -b -uno", "## some_branch\n"),
       Cmd("git fetch", ""),
-      Cmd("git svn fetch", ""),
       URL("https://v8-status.appspot.com/current?format=json",
           "{\"message\": \"Tree is throttled (no push)\"}"),
     ])
@@ -1179,7 +1035,6 @@ deps = {
           "owner=author%40chromium.org&limit=30&closed=3&format=json",
           ("{\"results\": [{\"subject\": \"different\"}]}")),
       Cmd("git fetch", ""),
-      Cmd("git svn fetch", ""),
       Cmd(("git log -1 --format=%H --grep="
            "\"^Version [[:digit:]]*\.[[:digit:]]*\.[[:digit:]]*\" "
            "origin/candidates"), "push_hash\n"),
@@ -1202,7 +1057,6 @@ deps = {
           "owner=author%40chromium.org&limit=30&closed=3&format=json",
           ("{\"results\": [{\"subject\": \"different\"}]}")),
       Cmd("git fetch", ""),
-      Cmd("git svn fetch", ""),
       Cmd(("git log -1 --format=%H --grep="
            "\"^Version [[:digit:]]*\.[[:digit:]]*\.[[:digit:]]*\" "
            "origin/candidates"), "push_hash\n"),
@@ -1261,7 +1115,6 @@ LOG=N
       Cmd("git status -s -uno", ""),
       Cmd("git status -s -b -uno", "## some_branch\n"),
       Cmd("git fetch", ""),
-      Cmd("git svn fetch", ""),
       Cmd("git branch", "  branch1\n* branch2\n"),
       Cmd("git new-branch %s --upstream origin/candidates" %
           TEST_CONFIG["BRANCHNAME"], ""),
@@ -1320,7 +1173,7 @@ LOG=N
       Cmd("git checkout -f %s" % TEST_CONFIG["BRANCHNAME"], ""),
       RL("LGTM"),  # Enter LGTM for V8 CL.
       Cmd("git cl presubmit", "Presubmit successfull\n"),
-      Cmd("git cl dcommit -f --bypass-hooks", "Closing issue\n",
+      Cmd("git cl land -f --bypass-hooks", "Closing issue\n",
           cb=VerifySVNCommit),
       Cmd("git fetch", ""),
       Cmd("git log -1 --format=%H --grep=\""
@@ -1352,33 +1205,6 @@ LOG=N
     MergeToBranch(TEST_CONFIG, self).Run(args)
 
   def testReleases(self):
-    tag_response_text = """
-------------------------------------------------------------------------
-r22631 | author1@chromium.org | 2014-07-28 02:05:29 +0200 (Mon, 28 Jul 2014)
-Changed paths:
-   A /tags/3.28.43 (from /trunk:22630)
-
-Tagging version 3.28.43
-------------------------------------------------------------------------
-r22629 | author2@chromium.org | 2014-07-26 05:09:29 +0200 (Sat, 26 Jul 2014)
-Changed paths:
-   A /tags/3.28.41 (from /branches/bleeding_edge:22626)
-
-Tagging version 3.28.41
-------------------------------------------------------------------------
-r22556 | author3@chromium.org | 2014-07-23 13:31:59 +0200 (Wed, 23 Jul 2014)
-Changed paths:
-   A /tags/3.27.34.7 (from /branches/3.27:22555)
-
-Tagging version 3.27.34.7
-------------------------------------------------------------------------
-r22627 | author4@chromium.org | 2014-07-26 01:39:15 +0200 (Sat, 26 Jul 2014)
-Changed paths:
-   A /tags/3.28.40 (from /branches/bleeding_edge:22624)
-
-Tagging version 3.28.40
-------------------------------------------------------------------------
-"""
     c_hash2_commit_log = """Revert something.
 
 BUG=12345
@@ -1399,6 +1225,23 @@ git-svn-id: svn://svn.chromium.org/chrome/trunk/src@4567 0039-1c4b
 git-svn-id: svn://svn.chromium.org/chrome/trunk/src@3456 0039-1c4b
 
 """
+    c_hash_234_commit_log = """Version 3.3.1.1 (cherry-pick).
+
+Merged abc12.
+
+Review URL: fake.com
+
+Cr-Commit-Position: refs/heads/candidates@{#234}
+"""
+    c_hash_123_commit_log = """Version 3.3.1.0
+
+git-svn-id: googlecode@123 0039-1c4b
+"""
+    c_hash_345_commit_log = """Version 3.4.0.
+
+Cr-Commit-Position: refs/heads/candidates@{#345}
+"""
+
     json_output = self.MakeEmptyTempFile()
     csv_output = self.MakeEmptyTempFile()
     self.WriteFakeVersionFile()
@@ -1424,7 +1267,6 @@ git-svn-id: svn://svn.chromium.org/chrome/trunk/src@3456 0039-1c4b
       Cmd("git status -s -uno", ""),
       Cmd("git status -s -b -uno", "## some_branch\n"),
       Cmd("git fetch", ""),
-      Cmd("git svn fetch", ""),
       Cmd("git branch", "  branch1\n* branch2\n"),
       Cmd("git new-branch %s" % TEST_CONFIG["BRANCHNAME"], ""),
       Cmd("git branch -r", "  branch-heads/3.21\n  branch-heads/3.3\n"),
@@ -1434,12 +1276,9 @@ git-svn-id: svn://svn.chromium.org/chrome/trunk/src@3456 0039-1c4b
       Cmd("git diff --name-only hash_234 hash_234^", VERSION_FILE),
       Cmd("git checkout -f hash_234 -- %s" % VERSION_FILE, "",
           cb=ResetVersion(3, 1, 1)),
-      Cmd("git log -1 --format=%B hash_234",
-          "Version 3.3.1.1 (cherry-pick).\n\n"
-          "Merged abc12.\n\n"
-          "Review URL: fake.com\n"),
+      Cmd("git log -1 --format=%B hash_234", c_hash_234_commit_log),
       Cmd("git log -1 --format=%s hash_234", ""),
-      Cmd("git svn find-rev hash_234", "234"),
+      Cmd("git log -1 --format=%B hash_234", c_hash_234_commit_log),
       Cmd("git log -1 --format=%ci hash_234", "18:15"),
       Cmd("git checkout -f HEAD -- %s" % VERSION_FILE, "",
           cb=ResetVersion(22, 5)),
@@ -1448,9 +1287,9 @@ git-svn-id: svn://svn.chromium.org/chrome/trunk/src@3456 0039-1c4b
       Cmd("git diff --name-only hash_123 hash_123^", VERSION_FILE),
       Cmd("git checkout -f hash_123 -- %s" % VERSION_FILE, "",
           cb=ResetVersion(21, 2)),
-      Cmd("git log -1 --format=%B hash_123", ""),
+      Cmd("git log -1 --format=%B hash_123", c_hash_123_commit_log),
       Cmd("git log -1 --format=%s hash_123", ""),
-      Cmd("git svn find-rev hash_123", "123"),
+      Cmd("git log -1 --format=%B hash_123", c_hash_123_commit_log),
       Cmd("git log -1 --format=%ci hash_123", "03:15"),
       Cmd("git checkout -f HEAD -- %s" % VERSION_FILE, "",
           cb=ResetVersion(22, 5)),
@@ -1459,21 +1298,13 @@ git-svn-id: svn://svn.chromium.org/chrome/trunk/src@3456 0039-1c4b
       Cmd("git diff --name-only hash_345 hash_345^", VERSION_FILE),
       Cmd("git checkout -f hash_345 -- %s" % VERSION_FILE, "",
           cb=ResetVersion(22, 3)),
-      Cmd("git log -1 --format=%B hash_345", ""),
+      Cmd("git log -1 --format=%B hash_345", c_hash_345_commit_log),
       Cmd("git log -1 --format=%s hash_345", ""),
-      Cmd("git svn find-rev hash_345", "345"),
+      Cmd("git log -1 --format=%B hash_345", c_hash_345_commit_log),
       Cmd("git log -1 --format=%ci hash_345", ""),
       Cmd("git checkout -f HEAD -- %s" % VERSION_FILE, "",
           cb=ResetVersion(22, 5)),
       Cmd("git reset --hard origin/master", ""),
-      Cmd("svn log https://v8.googlecode.com/svn/tags -v --limit 20",
-          tag_response_text),
-      Cmd("git svn find-rev r22626", "hash_22626"),
-      Cmd("git svn find-rev hash_22626", "22626"),
-      Cmd("git log -1 --format=%ci hash_22626", "01:23"),
-      Cmd("git svn find-rev r22624", "hash_22624"),
-      Cmd("git svn find-rev hash_22624", "22624"),
-      Cmd("git log -1 --format=%ci hash_22624", "02:34"),
       Cmd("git status -s -uno", "", cwd=chrome_dir),
       Cmd("git checkout -f master", "", cwd=chrome_dir),
       Cmd("git pull", "", cwd=chrome_dir),
@@ -1489,8 +1320,6 @@ git-svn-id: svn://svn.chromium.org/chrome/trunk/src@3456 0039-1c4b
           cwd=chrome_dir),
       Cmd("git log -1 --format=%B c_hash2", c_hash2_commit_log,
           cwd=chrome_dir),
-      Cmd("git rev-list -n 1 0123456789012345678901234567890123456789",
-          "0123456789012345678901234567890123456789", cwd=chrome_v8_dir),
       Cmd("git log -1 --format=%B 0123456789012345678901234567890123456789",
           self.C_V8_22624_LOG, cwd=chrome_v8_dir),
       Cmd("git diff --name-only c_hash3 c_hash3^", "DEPS", cwd=chrome_dir),
@@ -1512,50 +1341,19 @@ git-svn-id: svn://svn.chromium.org/chrome/trunk/src@3456 0039-1c4b
     ])
 
     args = ["-c", TEST_CONFIG["CHROMIUM"],
-            "--vc-interface", "git_read_svn_write",
             "--json", json_output,
             "--csv", csv_output,
             "--max-releases", "1"]
     Releases(TEST_CONFIG, self).Run(args)
 
     # Check expected output.
-    csv = ("3.28.41,master,22626,,\r\n"
-           "3.28.40,master,22624,4567,\r\n"
-           "3.22.3,candidates,345,3456:4566,\r\n"
+    csv = ("3.22.3,candidates,345,3456:4566,\r\n"
            "3.21.2,3.21,123,,\r\n"
            "3.3.1.1,3.3,234,,abc12\r\n")
     self.assertEquals(csv, FileToText(csv_output))
 
     expected_json = [
       {
-        "revision": "22626",
-        "revision_git": "hash_22626",
-        "bleeding_edge": "22626",
-        "bleeding_edge_git": "hash_22626",
-        "patches_merged": "",
-        "version": "3.28.41",
-        "chromium_revision": "",
-        "branch": "master",
-        "review_link": "",
-        "date": "01:23",
-        "chromium_branch": "",
-        "revision_link": "https://code.google.com/p/v8/source/detail?r=22626",
-      },
-      {
-        "revision": "22624",
-        "revision_git": "hash_22624",
-        "bleeding_edge": "22624",
-        "bleeding_edge_git": "hash_22624",
-        "patches_merged": "",
-        "version": "3.28.40",
-        "chromium_revision": "4567",
-        "branch": "master",
-        "review_link": "",
-        "date": "02:34",
-        "chromium_branch": "",
-        "revision_link": "https://code.google.com/p/v8/source/detail?r=22624",
-      },
-      {
         "revision": "345",
         "revision_git": "hash_345",
         "bleeding_edge": "",
@@ -1643,7 +1441,7 @@ git-svn-id: svn://svn.chromium.org/chrome/trunk/src@3456 0039-1c4b
     expectations += [
       Cmd("git cl upload --send-mail --email \"author@chromium.org\" -f "
           "--bypass-hooks", ""),
-      Cmd("git cl dcommit -f --bypass-hooks", ""),
+      Cmd("git cl land -f --bypass-hooks", ""),
       Cmd("git checkout -f master", ""),
       Cmd("git branch", "auto-bump-up-version\n* master"),
       Cmd("git branch -D auto-bump-up-version", ""),
@@ -1652,30 +1450,6 @@ git-svn-id: svn://svn.chromium.org/chrome/trunk/src@3456 0039-1c4b
 
     BumpUpVersion(TEST_CONFIG, self).Run(["-a", "author@chromium.org"])
 
-  def testBumpUpVersionSvn(self):
-    svn_root = self.MakeEmptyTempDirectory()
-    expectations = self._bumpUpVersion()
-    expectations += [
-      Cmd("git diff HEAD^ HEAD", "patch content"),
-      Cmd("svn update", "", cwd=svn_root),
-      Cmd("svn status", "", cwd=svn_root),
-      Cmd("patch -d branches/bleeding_edge -p1 -i %s" %
-          TEST_CONFIG["PATCH_FILE"], "Applied patch...", cwd=svn_root),
-     Cmd("svn status", "M       src/version.cc", cwd=svn_root),
-      Cmd("svn commit --non-interactive --username=author@chromium.org "
-          "--config-dir=[CONFIG_DIR] "
-          "-m \"[Auto-roll] Bump up version to 3.11.6.0\"",
-          "", cwd=svn_root),
-      Cmd("git checkout -f master", ""),
-      Cmd("git branch", "auto-bump-up-version\n* master"),
-      Cmd("git branch -D auto-bump-up-version", ""),
-    ]
-    self.Expect(expectations)
-
-    BumpUpVersion(TEST_CONFIG, self).Run(
-        ["-a", "author@chromium.org",
-         "--svn", svn_root,
-         "--svn-config", "[CONFIG_DIR]"])
 
   # Test that we bail out if the last change was a version change.
   def testBumpUpVersionBailout1(self):
index bd474f9..a6fdf31 100755 (executable)
@@ -160,6 +160,9 @@ def BuildOptions():
   result.add_option("--buildbot",
                     help="Adapt to path structure used on buildbots",
                     default=False, action="store_true")
+  result.add_option("--dcheck-always-on",
+                    help="Indicates that V8 was compiled with DCHECKs enabled",
+                    default=False, action="store_true")
   result.add_option("--command-prefix",
                     help="Prepended to each shell command used to run a test",
                     default="")
@@ -390,6 +393,7 @@ def Execute(arch, mode, args, options, suites, workspace):
     "system": utils.GuessOS(),
     "tsan": False,
     "msan": False,
+    "dcheck_always_on": options.dcheck_always_on,
   }
   all_tests = []
   num_tests = 0
index 20f3679..d68d1f8 100755 (executable)
@@ -141,6 +141,9 @@ def BuildOptions():
   result.add_option("--buildbot",
                     help="Adapt to path structure used on buildbots",
                     default=False, action="store_true")
+  result.add_option("--dcheck-always-on",
+                    help="Indicates that V8 was compiled with DCHECKs enabled",
+                    default=False, action="store_true")
   result.add_option("--cat", help="Print the source of the tests",
                     default=False, action="store_true")
   result.add_option("--flaky-tests",
@@ -491,7 +494,8 @@ def Execute(arch, mode, args, options, suites, workspace):
 
   # TODO(all): Combine "simulator" and "simulator_run".
   simulator_run = not options.dont_skip_simulator_slow_tests and \
-      arch in ['arm64', 'arm', 'mips'] and ARCH_GUESS and arch != ARCH_GUESS
+      arch in ['arm64', 'arm', 'mipsel', 'mips', 'mips64el'] and \
+      ARCH_GUESS and arch != ARCH_GUESS
   # Find available test suites and read test cases from them.
   variables = {
     "arch": arch,
@@ -507,6 +511,7 @@ def Execute(arch, mode, args, options, suites, workspace):
     "system": utils.GuessOS(),
     "tsan": options.tsan,
     "msan": options.msan,
+    "dcheck_always_on": options.dcheck_always_on,
   }
   all_tests = []
   num_tests = 0
index da139d7..14d67c5 100755 (executable)
@@ -15,9 +15,10 @@ The suite json format is expected to be:
   "archs": [<architecture name for which this suite is run>, ...],
   "binary": <name of binary to run, default "d8">,
   "flags": [<flag to d8>, ...],
+  "test_flags": [<flag to the test file>, ...],
   "run_count": <how often will this suite run (optional)>,
   "run_count_XXX": <how often will this suite run for arch XXX (optional)>,
-  "resources": [<js file to be loaded before main>, ...]
+  "resources": [<js file to be moved to android device>, ...]
   "main": <main js perf runner file>,
   "results_regexp": <optional regexp>,
   "results_processor": <optional python results processor script>,
@@ -54,6 +55,7 @@ Full example (suite with one runner):
 {
   "path": ["."],
   "flags": ["--expose-gc"],
+  "test_flags": ["5"],
   "archs": ["ia32", "x64"],
   "run_count": 5,
   "run_count_ia32": 3,
@@ -89,10 +91,13 @@ Full example (suite with several runners):
 }
 
 Path pieces are concatenated. D8 is always run with the suite's path as cwd.
+
+The test flags are passed to the js test file after '--'.
 """
 
 from collections import OrderedDict
 import json
+import logging
 import math
 import optparse
 import os
@@ -120,6 +125,21 @@ RESULT_STDDEV_RE = re.compile(r"^\{([^\}]+)\}$")
 RESULT_LIST_RE = re.compile(r"^\[([^\]]+)\]$")
 
 
+def LoadAndroidBuildTools(path):  # pragma: no cover
+  assert os.path.exists(path)
+  sys.path.insert(0, path)
+
+  from pylib.device import device_utils  # pylint: disable=F0401
+  from pylib.device import device_errors  # pylint: disable=F0401
+  from pylib.perf import cache_control  # pylint: disable=F0401
+  from pylib.perf import perf_control  # pylint: disable=F0401
+  import pylib.android_commands  # pylint: disable=F0401
+  global cache_control
+  global device_errors
+  global device_utils
+  global perf_control
+  global pylib
+
 
 def GeometricMean(values):
   """Returns the geometric mean of a list of values.
@@ -171,6 +191,7 @@ class DefaultSentinel(Node):
     self.path = []
     self.graphs = []
     self.flags = []
+    self.test_flags = []
     self.resources = []
     self.results_regexp = None
     self.stddev_regexp = None
@@ -190,19 +211,24 @@ class Graph(Node):
     assert isinstance(suite.get("path", []), list)
     assert isinstance(suite["name"], basestring)
     assert isinstance(suite.get("flags", []), list)
+    assert isinstance(suite.get("test_flags", []), list)
     assert isinstance(suite.get("resources", []), list)
 
     # Accumulated values.
     self.path = parent.path[:] + suite.get("path", [])
     self.graphs = parent.graphs[:] + [suite["name"]]
     self.flags = parent.flags[:] + suite.get("flags", [])
-    self.resources = parent.resources[:] + suite.get("resources", [])
+    self.test_flags = parent.test_flags[:] + suite.get("test_flags", [])
+
+    # Values independent of parent node.
+    self.resources = suite.get("resources", [])
 
     # Descrete values (with parent defaults).
     self.binary = suite.get("binary", parent.binary)
     self.run_count = suite.get("run_count", parent.run_count)
     self.run_count = suite.get("run_count_%s" % arch, self.run_count)
     self.timeout = suite.get("timeout", parent.timeout)
+    self.timeout = suite.get("timeout_%s" % arch, self.timeout)
     self.units = suite.get("units", parent.units)
     self.total = suite.get("total", parent.total)
 
@@ -239,8 +265,11 @@ class Trace(Graph):
 
   def ConsumeOutput(self, stdout):
     try:
-      self.results.append(
-          re.search(self.results_regexp, stdout, re.M).group(1))
+      result = re.search(self.results_regexp, stdout, re.M).group(1)
+      self.results.append(str(float(result)))
+    except ValueError:
+      self.errors.append("Regexp \"%s\" returned a non-numeric for test %s."
+                         % (self.results_regexp, self.graphs[-1]))
     except:
       self.errors.append("Regexp \"%s\" didn't match for test %s."
                          % (self.results_regexp, self.graphs[-1]))
@@ -280,14 +309,13 @@ class Runnable(Graph):
     bench_dir = os.path.normpath(os.path.join(*self.path))
     os.chdir(os.path.join(suite_dir, bench_dir))
 
+  def GetCommandFlags(self):
+    suffix = ["--"] + self.test_flags if self.test_flags else []
+    return self.flags + [self.main] + suffix
+
   def GetCommand(self, shell_dir):
     # TODO(machenbach): This requires +.exe if run on windows.
-    return (
-      [os.path.join(shell_dir, self.binary)] +
-      self.flags +
-      self.resources +
-      [self.main]
-    )
+    return [os.path.join(shell_dir, self.binary)] + self.GetCommandFlags()
 
   def Run(self, runner):
     """Iterates over several runs and handles the output for all traces."""
@@ -349,6 +377,7 @@ class RunnableGeneric(Runnable):
           units = match.group(4)
           match_stddev = RESULT_STDDEV_RE.match(body)
           match_list = RESULT_LIST_RE.match(body)
+          errors = []
           if match_stddev:
             result, stddev = map(str.strip, match_stddev.group(1).split(","))
             results = [result]
@@ -357,12 +386,19 @@ class RunnableGeneric(Runnable):
           else:
             results = [body.strip()]
 
+          try:
+            results = map(lambda r: str(float(r)), results)
+          except ValueError:
+            results = []
+            errors = ["Found non-numeric in %s" %
+                      "/".join(self.graphs + [graph, trace])]
+
           trace_result = traces.setdefault(trace, Results([{
             "graphs": self.graphs + [graph, trace],
             "units": (units or self.units).strip(),
             "results": [],
             "stddev": "",
-          }], []))
+          }], errors))
           trace_result.traces[0]["results"].extend(results)
           trace_result.traces[0]["stddev"] = stddev
 
@@ -400,7 +436,7 @@ def BuildGraphs(suite, arch, parent=None):
   parent = parent or DefaultSentinel()
 
   # TODO(machenbach): Implement notion of cpu type?
-  if arch not in suite.get("archs", ["ia32", "x64"]):
+  if arch not in suite.get("archs", SUPPORTED_ARCHS):
     return None
 
   graph = MakeGraph(suite, arch, parent)
@@ -410,23 +446,152 @@ def BuildGraphs(suite, arch, parent=None):
   return graph
 
 
-def FlattenRunnables(node):
+def FlattenRunnables(node, node_cb):
   """Generator that traverses the tree structure and iterates over all
   runnables.
   """
+  node_cb(node)
   if isinstance(node, Runnable):
     yield node
   elif isinstance(node, Node):
     for child in node._children:
-      for result in FlattenRunnables(child):
+      for result in FlattenRunnables(child, node_cb):
         yield result
   else:  # pragma: no cover
     raise Exception("Invalid suite configuration.")
 
 
+class Platform(object):
+  @staticmethod
+  def GetPlatform(options):
+    if options.arch.startswith("android"):
+      return AndroidPlatform(options)
+    else:
+      return DesktopPlatform(options)
+
+
+class DesktopPlatform(Platform):
+  def __init__(self, options):
+    self.shell_dir = options.shell_dir
+
+  def PreExecution(self):
+    pass
+
+  def PostExecution(self):
+    pass
+
+  def PreTests(self, node, path):
+    if isinstance(node, Runnable):
+      node.ChangeCWD(path)
+
+  def Run(self, runnable, count):
+    output = commands.Execute(runnable.GetCommand(self.shell_dir),
+                              timeout=runnable.timeout)
+    print ">>> Stdout (#%d):" % (count + 1)
+    print output.stdout
+    if output.stderr:  # pragma: no cover
+      # Print stderr for debugging.
+      print ">>> Stderr (#%d):" % (count + 1)
+      print output.stderr
+    if output.timed_out:
+      print ">>> Test timed out after %ss." % runnable.timeout
+    return output.stdout
+
+
+class AndroidPlatform(Platform):  # pragma: no cover
+  DEVICE_DIR = "/data/local/tmp/v8/"
+
+  def __init__(self, options):
+    self.shell_dir = options.shell_dir
+    LoadAndroidBuildTools(options.android_build_tools)
+
+    if not options.device:
+      # Detect attached device if not specified.
+      devices = pylib.android_commands.GetAttachedDevices(
+          hardware=True, emulator=False, offline=False)
+      assert devices and len(devices) == 1, (
+          "None or multiple devices detected. Please specify the device on "
+          "the command-line with --device")
+      options.device = devices[0]
+    adb_wrapper = pylib.android_commands.AndroidCommands(options.device)
+    self.device = device_utils.DeviceUtils(adb_wrapper)
+    self.adb = adb_wrapper.Adb()
+
+  def PreExecution(self):
+    perf = perf_control.PerfControl(self.device)
+    perf.SetHighPerfMode()
+
+    # Remember what we have already pushed to the device.
+    self.pushed = set()
+
+  def PostExecution(self):
+    perf = perf_control.PerfControl(self.device)
+    perf.SetDefaultPerfMode()
+    self.device.RunShellCommand(["rm", "-rf", AndroidPlatform.DEVICE_DIR])
+
+  def _PushFile(self, host_dir, file_name, target_rel="."):
+    file_on_host = os.path.join(host_dir, file_name)
+    file_on_device = os.path.join(
+        AndroidPlatform.DEVICE_DIR, target_rel, file_name)
+
+    # Only push files not yet pushed in one execution.
+    if file_on_host in self.pushed:
+      return
+    else:
+      self.pushed.add(file_on_host)
+
+    logging.info("adb push %s %s" % (file_on_host, file_on_device))
+    self.adb.Push(file_on_host, file_on_device)
+
+  def PreTests(self, node, path):
+    suite_dir = os.path.abspath(os.path.dirname(path))
+    if node.path:
+      bench_rel = os.path.normpath(os.path.join(*node.path))
+      bench_abs = os.path.join(suite_dir, bench_rel)
+    else:
+      bench_rel = "."
+      bench_abs = suite_dir
+
+    self._PushFile(self.shell_dir, node.binary)
+    if isinstance(node, Runnable):
+      self._PushFile(bench_abs, node.main, bench_rel)
+    for resource in node.resources:
+      self._PushFile(bench_abs, resource, bench_rel)
+
+  def Run(self, runnable, count):
+    cache = cache_control.CacheControl(self.device)
+    cache.DropRamCaches()
+    binary_on_device = AndroidPlatform.DEVICE_DIR + runnable.binary
+    cmd = [binary_on_device] + runnable.GetCommandFlags()
+
+    # Relative path to benchmark directory.
+    if runnable.path:
+      bench_rel = os.path.normpath(os.path.join(*runnable.path))
+    else:
+      bench_rel = "."
+
+    try:
+      output = self.device.RunShellCommand(
+          cmd,
+          cwd=os.path.join(AndroidPlatform.DEVICE_DIR, bench_rel),
+          timeout=runnable.timeout,
+          retries=0,
+      )
+      stdout = "\n".join(output)
+      print ">>> Stdout (#%d):" % (count + 1)
+      print stdout
+    except device_errors.CommandTimeoutError:
+      print ">>> Test timed out after %ss." % runnable.timeout
+      stdout = ""
+    return stdout
+
+
 # TODO: Implement results_processor.
 def Main(args):
+  logging.getLogger().setLevel(logging.INFO)
   parser = optparse.OptionParser()
+  parser.add_option("--android-build-tools",
+                    help="Path to chromium's build/android.")
   parser.add_option("--arch",
                     help=("The architecture to run tests for, "
                           "'auto' or 'native' for auto-detect"),
@@ -434,6 +599,9 @@ def Main(args):
   parser.add_option("--buildbot",
                     help="Adapt to path structure used on buildbots",
                     default=False, action="store_true")
+  parser.add_option("--device",
+                    help="The device ID to run Android tests on. If not given "
+                         "it will be autodetected.")
   parser.add_option("--json-test-results",
                     help="Path to a file for storing json results.")
   parser.add_option("--outdir", help="Base directory with compile output",
@@ -451,13 +619,26 @@ def Main(args):
     print "Unknown architecture %s" % options.arch
     return 1
 
+  if (bool(options.arch.startswith("android")) !=
+      bool(options.android_build_tools)):  # pragma: no cover
+    print ("Android architectures imply setting --android-build-tools and the "
+           "other way around.")
+    return 1
+
+  if (options.device and not
+      options.arch.startswith("android")):  # pragma: no cover
+    print "Specifying a device requires an Android architecture to be used."
+    return 1
+
   workspace = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
 
   if options.buildbot:
-    shell_dir = os.path.join(workspace, options.outdir, "Release")
+    options.shell_dir = os.path.join(workspace, options.outdir, "Release")
   else:
-    shell_dir = os.path.join(workspace, options.outdir,
-                             "%s.release" % options.arch)
+    options.shell_dir = os.path.join(workspace, options.outdir,
+                                     "%s.release" % options.arch)
+
+  platform = Platform.GetPlatform(options)
 
   results = Results()
   for path in args:
@@ -473,30 +654,32 @@ def Main(args):
     # If no name is given, default to the file name without .json.
     suite.setdefault("name", os.path.splitext(os.path.basename(path))[0])
 
-    for runnable in FlattenRunnables(BuildGraphs(suite, options.arch)):
+    # Setup things common to one test suite.
+    platform.PreExecution()
+
+    # Build the graph/trace tree structure.
+    root = BuildGraphs(suite, options.arch)
+
+    # Callback to be called on each node on traversal.
+    def NodeCB(node):
+      platform.PreTests(node, path)
+
+    # Traverse graph/trace tree and interate over all runnables.
+    for runnable in FlattenRunnables(root, NodeCB):
       print ">>> Running suite: %s" % "/".join(runnable.graphs)
-      runnable.ChangeCWD(path)
 
       def Runner():
         """Output generator that reruns several times."""
         for i in xrange(0, max(1, runnable.run_count)):
           # TODO(machenbach): Allow timeout per arch like with run_count per
           # arch.
-          output = commands.Execute(runnable.GetCommand(shell_dir),
-                                    timeout=runnable.timeout)
-          print ">>> Stdout (#%d):" % (i + 1)
-          print output.stdout
-          if output.stderr:  # pragma: no cover
-            # Print stderr for debugging.
-            print ">>> Stderr (#%d):" % (i + 1)
-            print output.stderr
-          if output.timed_out:
-            print ">>> Test timed out after %ss." % runnable.timeout
-          yield output.stdout
+          yield platform.Run(runnable, i)
 
       # Let runnable iterate over all runs and handle output.
       results += runnable.Run(Runner)
 
+    platform.PostExecution()
+
   if options.json_test_results:
     results.WriteToFile(options.json_test_results)
   else:  # pragma: no cover
index 36ce7be..5c5fbac 100644 (file)
@@ -33,6 +33,7 @@ import time
 from pool import Pool
 from . import commands
 from . import perfdata
+from . import statusfile
 from . import utils
 
 
@@ -98,6 +99,10 @@ class Runner(object):
         "--stress-opt" in self.context.mode_flags or
         "--stress-opt" in self.context.extra_flags):
       timeout *= 4
+    # FIXME(machenbach): Make this more OO. Don't expose default outcomes or
+    # the like.
+    if statusfile.IsSlow(test.outcomes or [statusfile.PASS]):
+      timeout *= 2
     if test.dependency is not None:
       dep_command = [ c.replace(test.path, test.dependency) for c in command ]
     else:
index 8caa58c..2616958 100644 (file)
@@ -333,6 +333,7 @@ class JsonTestProgressIndicator(ProgressIndicator):
       "stderr": test.output.stderr,
       "exit_code": test.output.exit_code,
       "result": test.suite.GetOutcome(test),
+      "expected": list(test.outcomes or ["PASS"]),
     })
 
 
index 6ff97b3..84f07fe 100644 (file)
@@ -136,6 +136,9 @@ class TestSuite(object):
         t.outcomes = self.rules[testname]
         if statusfile.DoSkip(t.outcomes):
           continue  # Don't add skipped tests to |filtered|.
+        for outcome in t.outcomes:
+          if outcome.startswith('Flags: '):
+            t.flags += outcome[7:].split()
         flaky = statusfile.IsFlaky(t.outcomes)
         slow = statusfile.IsSlow(t.outcomes)
         pass_fail = statusfile.IsPassOrFail(t.outcomes)
index ca82606..6c55082 100644 (file)
 from . import output
 
 class TestCase(object):
-  def __init__(self, suite, path, flags=[], dependency=None):
-    self.suite = suite  # TestSuite object
-    self.path = path    # string, e.g. 'div-mod', 'test-api/foo'
-    self.flags = flags  # list of strings, flags specific to this test case
+  def __init__(self, suite, path, flags=None, dependency=None):
+    self.suite = suite        # TestSuite object
+    self.path = path          # string, e.g. 'div-mod', 'test-api/foo'
+    self.flags = flags or []  # list of strings, flags specific to this test
     self.dependency = dependency  # |path| for testcase that must be run first
     self.outcomes = None
     self.output = None
diff --git a/deps/v8/tools/trace-maps-processor.py b/deps/v8/tools/trace-maps-processor.py
new file mode 100755 (executable)
index 0000000..bf8c8a8
--- /dev/null
@@ -0,0 +1,172 @@
+#!/usr/bin/env python
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import sys
+
+
+action = sys.argv[1]
+
+if action in ["help", "-h", "--help"] or len(sys.argv) != 3:
+  print("Usage: %s <action> <inputfile>, where action can be: \n"
+        "help    Print this message\n"
+        "plain   Print ASCII tree to stdout\n"
+        "dot     Print dot file to stdout\n"
+        "count   Count most frequent transition reasons\n" % sys.argv[0])
+  sys.exit(0)
+
+
+filename = sys.argv[2]
+maps = {}
+root_maps = []
+transitions = {}
+annotations = {}
+
+
+class Map(object):
+
+  def __init__(self, pointer, origin):
+    self.pointer = pointer
+    self.origin = origin
+
+  def __str__(self):
+    return "%s (%s)" % (self.pointer, self.origin)
+
+
+class Transition(object):
+
+  def __init__(self, from_map, to_map, reason):
+    self.from_map = from_map
+    self.to_map = to_map
+    self.reason = reason
+
+
+def RegisterNewMap(raw_map):
+  if raw_map in annotations:
+    annotations[raw_map] += 1
+  else:
+    annotations[raw_map] = 0
+  return AnnotateExistingMap(raw_map)
+
+
+def AnnotateExistingMap(raw_map):
+  return "%s_%d" % (raw_map, annotations[raw_map])
+
+
+def AddMap(pointer, origin):
+  pointer = RegisterNewMap(pointer)
+  maps[pointer] = Map(pointer, origin)
+  return pointer
+
+
+def AddTransition(from_map, to_map, reason):
+  from_map = AnnotateExistingMap(from_map)
+  to_map = AnnotateExistingMap(to_map)
+  if from_map not in transitions:
+    transitions[from_map] = {}
+  targets = transitions[from_map]
+  if to_map in targets:
+    # Some events get printed twice, that's OK. In some cases, ignore the
+    # second output...
+    old_reason = targets[to_map].reason
+    if old_reason.startswith("ReplaceDescriptors"):
+      return
+    # ...and in others use it for additional detail.
+    if reason in []:
+      targets[to_map].reason = reason
+      return
+    # Unexpected duplicate events? Warn.
+    print("// warning: already have a transition from %s to %s, reason: %s" %
+            (from_map, to_map, targets[to_map].reason))
+    return
+  targets[to_map] = Transition(from_map, to_map, reason)
+
+
+with open(filename, "r") as f:
+  last_to_map = ""
+  for line in f:
+    if not line.startswith("[TraceMaps: "): continue
+    words = line.split(" ")
+    event = words[1]
+    if event == "InitialMap":
+      assert words[2] == "map="
+      assert words[4] == "SFI="
+      new_map = AddMap(words[3], "SFI#%s" % words[5])
+      root_maps.append(new_map)
+      continue
+    if words[2] == "from=" and words[4] == "to=":
+      from_map = words[3]
+      to_map = words[5]
+      if from_map not in annotations:
+        print("// warning: unknown from_map %s" % from_map)
+        new_map = AddMap(from_map, "<unknown>")
+        root_maps.append(new_map)
+      if to_map != last_to_map:
+        AddMap(to_map, "<transition> (%s)" % event)
+      last_to_map = to_map
+      if event in ["Transition", "NoTransition"]:
+        assert words[6] == "name=", line
+        reason = "%s: %s" % (event, words[7])
+      elif event in ["Normalize", "ReplaceDescriptors", "SlowToFast"]:
+        assert words[6] == "reason=", line
+        reason = "%s: %s" % (event, words[7])
+        if words[8].strip() != "]":
+          reason = "%s_%s" % (reason, words[8])
+      else:
+        reason = event
+      AddTransition(from_map, to_map, reason)
+      continue
+
+
+def PlainPrint(m, indent, label):
+  print("%s%s (%s)" % (indent, m, label))
+  if m in transitions:
+    for t in transitions[m]:
+      PlainPrint(t, indent + "  ", transitions[m][t].reason)
+
+
+def CountTransitions(m):
+  if m not in transitions: return 0
+  return len(transitions[m])
+
+
+def DotPrint(m, label):
+  print("m%s [label=\"%s\"]" % (m[2:], label))
+  if m in transitions:
+    for t in transitions[m]:
+      # GraphViz doesn't like node labels looking like numbers, so use
+      # "m..." instead of "0x...".
+      print("m%s -> m%s" % (m[2:], t[2:]))
+      reason = transitions[m][t].reason
+      reason = reason.replace("\\", "BACKSLASH")
+      reason = reason.replace("\"", "\\\"")
+      DotPrint(t, reason)
+
+
+if action == "plain":
+  root_maps = sorted(root_maps, key=CountTransitions, reverse=True)
+  for m in root_maps:
+    PlainPrint(m, "", maps[m].origin)
+
+elif action == "dot":
+  print("digraph g {")
+  for m in root_maps:
+    DotPrint(m, maps[m].origin)
+  print("}")
+
+elif action == "count":
+  reasons = {}
+  for s in transitions:
+    for t in transitions[s]:
+      reason = transitions[s][t].reason
+      if reason not in reasons:
+        reasons[reason] = 1
+      else:
+        reasons[reason] += 1
+  reasons_list = []
+  for r in reasons:
+    reasons_list.append("%8d %s" % (reasons[r], r))
+  reasons_list.sort(reverse=True)
+  for r in reasons_list[:20]:
+    print r
diff --git a/deps/v8/tools/try_perf.py b/deps/v8/tools/try_perf.py
new file mode 100755 (executable)
index 0000000..fcd1ddc
--- /dev/null
@@ -0,0 +1,44 @@
+#!/usr/bin/env python
+# Copyright 2014 the V8 project authors. All rights reserved.
+# Use of this source code is governed by a BSD-style license that can be
+# found in the LICENSE file.
+
+import find_depot_tools
+import sys
+
+find_depot_tools.add_depot_tools_to_path()
+
+from git_cl import Changelist
+
+BOTS = [
+  'v8_linux32_perf_try',
+  'v8_linux64_perf_try',
+]
+
+def main(tests):
+  cl = Changelist()
+  if not cl.GetIssue():
+    print 'Need to upload first'
+    return 1
+
+  props = cl.GetIssueProperties()
+  if props.get('closed'):
+    print 'Cannot send tryjobs for a closed CL'
+    return 1
+
+  if props.get('private'):
+    print 'Cannot use trybots with private issue'
+    return 1
+
+  if not tests:
+    print 'Please specify the benchmarks to run as arguments.'
+    return 1
+
+  masters = {'internal.client.v8': dict((b, tests) for b in BOTS)}
+  cl.RpcServer().trigger_distributed_try_jobs(
+        cl.GetIssue(), cl.GetMostRecentPatchset(), cl.GetBranch(),
+        False, None, masters)
+  return 0
+
+if __name__ == "__main__":  # pragma: no cover
+  sys.exit(main(sys.argv[1:]))
index 0c3d6f6..f9ea0c0 100644 (file)
@@ -174,11 +174,25 @@ class PerfTest(unittest.TestCase):
     self.assertEquals(0, self._CallMain())
     self._VerifyResults("test", "score", [
       {"name": "Richards", "results": ["1.234"], "stddev": ""},
-      {"name": "DeltaBlue", "results": ["10657567"], "stddev": ""},
+      {"name": "DeltaBlue", "results": ["10657567.0"], "stddev": ""},
     ])
     self._VerifyErrors([])
     self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
 
+  def testOneRunWithTestFlags(self):
+    test_input = dict(V8_JSON)
+    test_input["test_flags"] = ["2", "test_name"]
+    self._WriteTestInput(test_input)
+    self._MockCommand(["."], ["Richards: 1.234\nDeltaBlue: 10657567"])
+    self.assertEquals(0, self._CallMain())
+    self._VerifyResults("test", "score", [
+      {"name": "Richards", "results": ["1.234"], "stddev": ""},
+      {"name": "DeltaBlue", "results": ["10657567.0"], "stddev": ""},
+    ])
+    self._VerifyErrors([])
+    self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js",
+                     "--", "2", "test_name")
+
   def testTwoRuns_Units_SuiteName(self):
     test_input = dict(V8_JSON)
     test_input["run_count"] = 2
@@ -190,8 +204,8 @@ class PerfTest(unittest.TestCase):
                        "Richards: 50\nDeltaBlue: 300\n"])
     self.assertEquals(0, self._CallMain())
     self._VerifyResults("v8", "ms", [
-      {"name": "Richards", "results": ["50", "100"], "stddev": ""},
-      {"name": "DeltaBlue", "results": ["300", "200"], "stddev": ""},
+      {"name": "Richards", "results": ["50.0", "100.0"], "stddev": ""},
+      {"name": "DeltaBlue", "results": ["300.0", "200.0"], "stddev": ""},
     ])
     self._VerifyErrors([])
     self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
@@ -208,8 +222,8 @@ class PerfTest(unittest.TestCase):
                        "Richards: 50\nDeltaBlue: 300\n"])
     self.assertEquals(0, self._CallMain())
     self._VerifyResults("test", "score", [
-      {"name": "Richards", "results": ["50", "100"], "stddev": ""},
-      {"name": "DeltaBlue", "results": ["300", "200"], "stddev": ""},
+      {"name": "Richards", "results": ["50.0", "100.0"], "stddev": ""},
+      {"name": "DeltaBlue", "results": ["300.0", "200.0"], "stddev": ""},
     ])
     self._VerifyErrors([])
     self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
@@ -227,23 +241,21 @@ class PerfTest(unittest.TestCase):
     self.assertEquals([
       {"units": "score",
        "graphs": ["test", "Richards"],
-       "results": ["50", "100"],
+       "results": ["50.0", "100.0"],
        "stddev": ""},
       {"units": "ms",
        "graphs": ["test", "Sub", "Leaf"],
-       "results": ["3", "2", "1"],
+       "results": ["3.0", "2.0", "1.0"],
        "stddev": ""},
       {"units": "score",
        "graphs": ["test", "DeltaBlue"],
-       "results": ["200"],
+       "results": ["200.0"],
        "stddev": ""},
       ], self._LoadResults()["traces"])
     self._VerifyErrors([])
     self._VerifyMockMultiple(
-        (path.join("out", "x64.release", "d7"), "--flag", "file1.js",
-         "file2.js", "run.js"),
-        (path.join("out", "x64.release", "d7"), "--flag", "file1.js",
-         "file2.js", "run.js"),
+        (path.join("out", "x64.release", "d7"), "--flag", "run.js"),
+        (path.join("out", "x64.release", "d7"), "--flag", "run.js"),
         (path.join("out", "x64.release", "d8"), "--flag", "run.js"),
         (path.join("out", "x64.release", "d8"), "--flag", "run.js"),
         (path.join("out", "x64.release", "d8"), "--flag", "run.js"),
@@ -258,7 +270,7 @@ class PerfTest(unittest.TestCase):
     self.assertEquals(0, self._CallMain())
     self._VerifyResults("test", "score", [
       {"name": "Richards", "results": ["1.234"], "stddev": "0.23"},
-      {"name": "DeltaBlue", "results": ["10657567"], "stddev": "106"},
+      {"name": "DeltaBlue", "results": ["10657567.0"], "stddev": "106"},
     ])
     self._VerifyErrors([])
     self._VerifyMock(path.join("out", "x64.release", "d7"), "--flag", "run.js")
@@ -274,8 +286,8 @@ class PerfTest(unittest.TestCase):
                               "DeltaBlue: 5\nDeltaBlue-stddev: 0.8\n"])
     self.assertEquals(1, self._CallMain())
     self._VerifyResults("test", "score", [
-      {"name": "Richards", "results": ["2", "3"], "stddev": "0.7"},
-      {"name": "DeltaBlue", "results": ["5", "6"], "stddev": "0.8"},
+      {"name": "Richards", "results": ["2.0", "3.0"], "stddev": "0.7"},
+      {"name": "DeltaBlue", "results": ["5.0", "6.0"], "stddev": "0.8"},
     ])
     self._VerifyErrors(
         ["Test Richards should only run once since a stddev is provided "
@@ -292,7 +304,7 @@ class PerfTest(unittest.TestCase):
     self.assertEquals(0, self._CallMain("--buildbot"))
     self._VerifyResults("test", "score", [
       {"name": "Richards", "results": ["1.234"], "stddev": ""},
-      {"name": "DeltaBlue", "results": ["10657567"], "stddev": ""},
+      {"name": "DeltaBlue", "results": ["10657567.0"], "stddev": ""},
     ])
     self._VerifyErrors([])
     self._VerifyMock(path.join("out", "Release", "d7"), "--flag", "run.js")
@@ -305,7 +317,7 @@ class PerfTest(unittest.TestCase):
     self.assertEquals(0, self._CallMain("--buildbot"))
     self._VerifyResults("test", "score", [
       {"name": "Richards", "results": ["1.234"], "stddev": ""},
-      {"name": "DeltaBlue", "results": ["10657567"], "stddev": ""},
+      {"name": "DeltaBlue", "results": ["10657567.0"], "stddev": ""},
       {"name": "Total", "results": ["3626.49109719"], "stddev": ""},
     ])
     self._VerifyErrors([])
@@ -315,14 +327,15 @@ class PerfTest(unittest.TestCase):
     test_input = dict(V8_JSON)
     test_input["total"] = True
     self._WriteTestInput(test_input)
-    self._MockCommand(["."], ["x\nRichaards: 1.234\nDeltaBlue: 10657567\ny\n"])
+    self._MockCommand(["."], ["x\nRichards: bla\nDeltaBlue: 10657567\ny\n"])
     self.assertEquals(1, self._CallMain("--buildbot"))
     self._VerifyResults("test", "score", [
       {"name": "Richards", "results": [], "stddev": ""},
-      {"name": "DeltaBlue", "results": ["10657567"], "stddev": ""},
+      {"name": "DeltaBlue", "results": ["10657567.0"], "stddev": ""},
     ])
     self._VerifyErrors(
-        ["Regexp \"^Richards: (.+)$\" didn't match for test Richards.",
+        ["Regexp \"^Richards: (.+)$\" "
+         "returned a non-numeric for test Richards.",
          "Not all traces have the same number of results."])
     self._VerifyMock(path.join("out", "Release", "d7"), "--flag", "run.js")
 
@@ -332,7 +345,7 @@ class PerfTest(unittest.TestCase):
     self.assertEquals(1, self._CallMain())
     self._VerifyResults("test", "score", [
       {"name": "Richards", "results": [], "stddev": ""},
-      {"name": "DeltaBlue", "results": ["10657567"], "stddev": ""},
+      {"name": "DeltaBlue", "results": ["10657567.0"], "stddev": ""},
     ])
     self._VerifyErrors(
         ["Regexp \"^Richards: (.+)$\" didn't match for test Richards."])
@@ -344,23 +357,28 @@ class PerfTest(unittest.TestCase):
     self._MockCommand(["."], [
       "RESULT Infra: Constant1= 11 count\n"
       "RESULT Infra: Constant2= [10,5,10,15] count\n"
-      "RESULT Infra: Constant3= {12,1.2} count\n"])
-    self.assertEquals(0, self._CallMain())
+      "RESULT Infra: Constant3= {12,1.2} count\n"
+      "RESULT Infra: Constant4= [10,5,error,15] count\n"])
+    self.assertEquals(1, self._CallMain())
     self.assertEquals([
       {"units": "count",
        "graphs": ["test", "Infra", "Constant1"],
-       "results": ["11"],
+       "results": ["11.0"],
        "stddev": ""},
       {"units": "count",
        "graphs": ["test", "Infra", "Constant2"],
-       "results": ["10", "5", "10", "15"],
+       "results": ["10.0", "5.0", "10.0", "15.0"],
        "stddev": ""},
       {"units": "count",
        "graphs": ["test", "Infra", "Constant3"],
-       "results": ["12"],
+       "results": ["12.0"],
        "stddev": "1.2"},
+      {"units": "count",
+       "graphs": ["test", "Infra", "Constant4"],
+       "results": [],
+       "stddev": ""},
       ], self._LoadResults()["traces"])
-    self._VerifyErrors([])
+    self._VerifyErrors(["Found non-numeric in test/Infra/Constant4"])
     self._VerifyMock(path.join("out", "x64.release", "cc"), "--flag", "")
 
   def testOneRunTimingOut(self):
@@ -379,3 +397,22 @@ class PerfTest(unittest.TestCase):
     ])
     self._VerifyMock(
         path.join("out", "x64.release", "d7"), "--flag", "run.js", timeout=70)
+
+  # Simple test that mocks out the android platform. Testing the platform would
+  # require lots of complicated mocks for the android tools.
+  def testAndroid(self):
+    self._WriteTestInput(V8_JSON)
+    platform = run_perf.Platform
+    platform.PreExecution = MagicMock(return_value=None)
+    platform.PostExecution = MagicMock(return_value=None)
+    platform.PreTests = MagicMock(return_value=None)
+    platform.Run = MagicMock(
+        return_value="Richards: 1.234\nDeltaBlue: 10657567\n")
+    run_perf.AndroidPlatform = MagicMock(return_value=platform)
+    self.assertEquals(
+        0, self._CallMain("--android-build-tools", "/some/dir",
+                          "--arch", "android_arm"))
+    self._VerifyResults("test", "score", [
+      {"name": "Richards", "results": ["1.234"], "stddev": ""},
+      {"name": "DeltaBlue", "results": ["10657567.0"], "stddev": ""},
+    ])
index 4430966..55592a9 100644 (file)
@@ -5,4 +5,4 @@ Try to write something funny. And please don't add trailing whitespace.
 A Smi walks into a bar and says:
 "I'm so deoptimized today!"
 The doubles heard this and started to unbox.
-The Smi looked at them when a crazy v8-autoroll account showed up..
+The Smi looked at them when a crazy v8-autoroll account showed up........